summaryrefslogtreecommitdiffstats
path: root/pyload/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/plugins')
-rw-r--r--pyload/plugins/Account.py281
-rw-r--r--pyload/plugins/AccountManager.py173
-rw-r--r--pyload/plugins/Container.py61
-rw-r--r--pyload/plugins/Crypter.py64
-rw-r--r--pyload/plugins/Hook.py149
-rw-r--r--pyload/plugins/Hoster.py20
-rw-r--r--pyload/plugins/OCR.py299
-rw-r--r--pyload/plugins/Plugin.py629
-rw-r--r--pyload/plugins/PluginManager.py356
-rw-r--r--pyload/plugins/README.md16
-rw-r--r--pyload/plugins/__init__.py0
-rw-r--r--pyload/plugins/accounts/AlldebridCom.py58
-rw-r--r--pyload/plugins/accounts/BayfilesCom.py36
-rw-r--r--pyload/plugins/accounts/BitshareCom.py31
-rw-r--r--pyload/plugins/accounts/CramitIn.py15
-rw-r--r--pyload/plugins/accounts/CyberlockerCh.py35
-rw-r--r--pyload/plugins/accounts/CzshareCom.py41
-rw-r--r--pyload/plugins/accounts/DebridItaliaCom.py36
-rw-r--r--pyload/plugins/accounts/DepositfilesCom.py32
-rw-r--r--pyload/plugins/accounts/EasybytezCom.py61
-rw-r--r--pyload/plugins/accounts/EgoFilesCom.py44
-rw-r--r--pyload/plugins/accounts/EuroshareEu.py41
-rw-r--r--pyload/plugins/accounts/FastixRu.py36
-rw-r--r--pyload/plugins/accounts/FastshareCz.py41
-rw-r--r--pyload/plugins/accounts/File4safeCom.py18
-rw-r--r--pyload/plugins/accounts/FilecloudIo.py57
-rw-r--r--pyload/plugins/accounts/FilefactoryCom.py46
-rw-r--r--pyload/plugins/accounts/FilejungleCom.py47
-rw-r--r--pyload/plugins/accounts/FilerNet.py49
-rw-r--r--pyload/plugins/accounts/FilerioCom.py15
-rw-r--r--pyload/plugins/accounts/FilesMailRu.py27
-rw-r--r--pyload/plugins/accounts/FileserveCom.py43
-rw-r--r--pyload/plugins/accounts/FourSharedCom.py34
-rw-r--r--pyload/plugins/accounts/FreakshareCom.py39
-rw-r--r--pyload/plugins/accounts/FreeWayMe.py52
-rw-r--r--pyload/plugins/accounts/FshareVn.py59
-rw-r--r--pyload/plugins/accounts/Ftp.py16
-rw-r--r--pyload/plugins/accounts/HellshareCz.py74
-rw-r--r--pyload/plugins/accounts/HotfileCom.py74
-rw-r--r--pyload/plugins/accounts/Http.py16
-rw-r--r--pyload/plugins/accounts/LetitbitNet.py33
-rw-r--r--pyload/plugins/accounts/LinksnappyCom.py49
-rw-r--r--pyload/plugins/accounts/MegaDebridEu.py37
-rw-r--r--pyload/plugins/accounts/MegasharesCom.py46
-rw-r--r--pyload/plugins/accounts/MovReelCom.py21
-rw-r--r--pyload/plugins/accounts/MultiDebridCom.py34
-rw-r--r--pyload/plugins/accounts/MultishareCz.py44
-rw-r--r--pyload/plugins/accounts/NetloadIn.py38
-rw-r--r--pyload/plugins/accounts/OboomCom.py53
-rw-r--r--pyload/plugins/accounts/OneFichierCom.py48
-rw-r--r--pyload/plugins/accounts/OverLoadMe.py35
-rw-r--r--pyload/plugins/accounts/Premium4Me.py29
-rw-r--r--pyload/plugins/accounts/PremiumizeMe.py46
-rw-r--r--pyload/plugins/accounts/QuickshareCz.py39
-rw-r--r--pyload/plugins/accounts/RPNetBiz.py49
-rw-r--r--pyload/plugins/accounts/RapidgatorNet.py56
-rw-r--r--pyload/plugins/accounts/RapidshareCom.py54
-rw-r--r--pyload/plugins/accounts/RarefileNet.py15
-rw-r--r--pyload/plugins/accounts/RealdebridCom.py35
-rw-r--r--pyload/plugins/accounts/RehostTo.py37
-rw-r--r--pyload/plugins/accounts/RyushareCom.py23
-rw-r--r--pyload/plugins/accounts/ShareRapidCom.py52
-rw-r--r--pyload/plugins/accounts/ShareonlineBiz.py42
-rw-r--r--pyload/plugins/accounts/SimplyPremiumCom.py45
-rw-r--r--pyload/plugins/accounts/SimplydebridCom.py33
-rw-r--r--pyload/plugins/accounts/StahnuTo.py34
-rw-r--r--pyload/plugins/accounts/TurbobitNet.py41
-rw-r--r--pyload/plugins/accounts/UlozTo.py45
-rw-r--r--pyload/plugins/accounts/UnrestrictLi.py43
-rw-r--r--pyload/plugins/accounts/UploadedTo.py53
-rw-r--r--pyload/plugins/accounts/UploadheroCom.py40
-rw-r--r--pyload/plugins/accounts/UploadingCom.py40
-rw-r--r--pyload/plugins/accounts/UptoboxCom.py17
-rw-r--r--pyload/plugins/accounts/YibaishiwuCom.py38
-rw-r--r--pyload/plugins/accounts/ZeveraCom.py54
-rw-r--r--pyload/plugins/accounts/__init__.py0
-rw-r--r--pyload/plugins/container/CCF.py43
-rw-r--r--pyload/plugins/container/DLC_25.pycbin0 -> 8340 bytes
-rw-r--r--pyload/plugins/container/DLC_26.pycbin0 -> 8313 bytes
-rw-r--r--pyload/plugins/container/DLC_27.pycbin0 -> 8237 bytes
-rw-r--r--pyload/plugins/container/LinkList.py73
-rw-r--r--pyload/plugins/container/RSDF.py51
-rw-r--r--pyload/plugins/container/__init__.py0
-rw-r--r--pyload/plugins/crypter/BitshareComFolder.py18
-rw-r--r--pyload/plugins/crypter/C1neonCom.py15
-rw-r--r--pyload/plugins/crypter/ChipDe.py27
-rw-r--r--pyload/plugins/crypter/CrockoComFolder.py17
-rw-r--r--pyload/plugins/crypter/CryptItCom.py15
-rw-r--r--pyload/plugins/crypter/CzshareComFolder.py31
-rw-r--r--pyload/plugins/crypter/DDLMusicOrg.py48
-rw-r--r--pyload/plugins/crypter/DailymotionBatch.py98
-rw-r--r--pyload/plugins/crypter/DataHuFolder.py43
-rw-r--r--pyload/plugins/crypter/DdlstorageComFolder.py18
-rw-r--r--pyload/plugins/crypter/DepositfilesComFolder.py17
-rw-r--r--pyload/plugins/crypter/Dereferer.py24
-rw-r--r--pyload/plugins/crypter/DlProtectCom.py62
-rw-r--r--pyload/plugins/crypter/DontKnowMe.py26
-rw-r--r--pyload/plugins/crypter/DuckCryptInfo.py59
-rw-r--r--pyload/plugins/crypter/DuploadOrgFolder.py17
-rw-r--r--pyload/plugins/crypter/EasybytezComFolder.py20
-rw-r--r--pyload/plugins/crypter/EmbeduploadCom.py55
-rw-r--r--pyload/plugins/crypter/FilebeerInfoFolder.py15
-rw-r--r--pyload/plugins/crypter/FilecloudIoFolder.py18
-rw-r--r--pyload/plugins/crypter/FilefactoryComFolder.py25
-rw-r--r--pyload/plugins/crypter/FilerNetFolder.py22
-rw-r--r--pyload/plugins/crypter/FileserveComFolder.py37
-rw-r--r--pyload/plugins/crypter/FilestubeCom.py18
-rw-r--r--pyload/plugins/crypter/FiletramCom.py18
-rw-r--r--pyload/plugins/crypter/FiredriveComFolder.py28
-rw-r--r--pyload/plugins/crypter/FourChanOrg.py25
-rw-r--r--pyload/plugins/crypter/FreakhareComFolder.py35
-rw-r--r--pyload/plugins/crypter/FreetexthostCom.py25
-rw-r--r--pyload/plugins/crypter/FshareVnFolder.py17
-rw-r--r--pyload/plugins/crypter/GooGl.py29
-rw-r--r--pyload/plugins/crypter/HoerbuchIn.py57
-rw-r--r--pyload/plugins/crypter/HotfileFolderCom.py30
-rw-r--r--pyload/plugins/crypter/ILoadTo.py15
-rw-r--r--pyload/plugins/crypter/ImgurComAlbum.py24
-rw-r--r--pyload/plugins/crypter/LetitbitNetFolder.py32
-rw-r--r--pyload/plugins/crypter/LinkSaveIn.py225
-rw-r--r--pyload/plugins/crypter/LinkdecrypterCom.py91
-rw-r--r--pyload/plugins/crypter/LixIn.py59
-rw-r--r--pyload/plugins/crypter/LofCc.py15
-rw-r--r--pyload/plugins/crypter/MBLinkInfo.py15
-rw-r--r--pyload/plugins/crypter/MediafireComFolder.py56
-rw-r--r--pyload/plugins/crypter/Movie2kTo.py15
-rw-r--r--pyload/plugins/crypter/MultiUpOrg.py35
-rw-r--r--pyload/plugins/crypter/MultiloadCz.py42
-rw-r--r--pyload/plugins/crypter/MultiuploadCom.py64
-rw-r--r--pyload/plugins/crypter/NCryptIn.py303
-rw-r--r--pyload/plugins/crypter/NetfolderIn.py73
-rw-r--r--pyload/plugins/crypter/NosvideoCom.py18
-rw-r--r--pyload/plugins/crypter/OneKhDe.py38
-rw-r--r--pyload/plugins/crypter/OronComFolder.py15
-rw-r--r--pyload/plugins/crypter/PastebinCom.py18
-rw-r--r--pyload/plugins/crypter/QuickshareCzFolder.py31
-rw-r--r--pyload/plugins/crypter/RSLayerCom.py15
-rw-r--r--pyload/plugins/crypter/RelinkUs.py263
-rw-r--r--pyload/plugins/crypter/SafelinkingNet.py82
-rw-r--r--pyload/plugins/crypter/SecuredIn.py15
-rw-r--r--pyload/plugins/crypter/SerienjunkiesOrg.py324
-rw-r--r--pyload/plugins/crypter/ShareLinksBiz.py269
-rw-r--r--pyload/plugins/crypter/ShareRapidComFolder.py17
-rw-r--r--pyload/plugins/crypter/SpeedLoadOrgFolder.py15
-rw-r--r--pyload/plugins/crypter/StealthTo.py15
-rw-r--r--pyload/plugins/crypter/TnyCz.py24
-rw-r--r--pyload/plugins/crypter/TrailerzoneInfo.py15
-rw-r--r--pyload/plugins/crypter/TurbobitNetFolder.py39
-rw-r--r--pyload/plugins/crypter/TusfilesNetFolder.py40
-rw-r--r--pyload/plugins/crypter/UlozToFolder.py45
-rw-r--r--pyload/plugins/crypter/UploadableChFolder.py21
-rw-r--r--pyload/plugins/crypter/UploadedToFolder.py38
-rw-r--r--pyload/plugins/crypter/WiiReloadedOrg.py15
-rw-r--r--pyload/plugins/crypter/XupPl.py23
-rw-r--r--pyload/plugins/crypter/YoutubeBatch.py138
-rw-r--r--pyload/plugins/crypter/__init__.py0
-rw-r--r--pyload/plugins/hooks/AlldebridCom.py28
-rw-r--r--pyload/plugins/hooks/BypassCaptcha.py127
-rw-r--r--pyload/plugins/hooks/Captcha9kw.py156
-rw-r--r--pyload/plugins/hooks/CaptchaBrotherhood.py157
-rw-r--r--pyload/plugins/hooks/Checksum.py175
-rw-r--r--pyload/plugins/hooks/ClickAndLoad.py76
-rw-r--r--pyload/plugins/hooks/DeathByCaptcha.py202
-rw-r--r--pyload/plugins/hooks/DebridItaliaCom.py29
-rw-r--r--pyload/plugins/hooks/DeleteFinished.py69
-rw-r--r--pyload/plugins/hooks/DownloadScheduler.py75
-rw-r--r--pyload/plugins/hooks/EasybytezCom.py37
-rw-r--r--pyload/plugins/hooks/Ev0InFetcher.py81
-rw-r--r--pyload/plugins/hooks/ExpertDecoders.py94
-rw-r--r--pyload/plugins/hooks/ExternalScripts.py104
-rw-r--r--pyload/plugins/hooks/ExtractArchive.py320
-rw-r--r--pyload/plugins/hooks/FastixRu.py28
-rw-r--r--pyload/plugins/hooks/FreeWayMe.py26
-rw-r--r--pyload/plugins/hooks/HotFolder.py65
-rw-r--r--pyload/plugins/hooks/IRCInterface.py404
-rw-r--r--pyload/plugins/hooks/ImageTyperz.py143
-rw-r--r--pyload/plugins/hooks/LinkdecrypterCom.py55
-rw-r--r--pyload/plugins/hooks/LinksnappyCom.py28
-rw-r--r--pyload/plugins/hooks/MegaDebridEu.py31
-rw-r--r--pyload/plugins/hooks/MergeFiles.py76
-rw-r--r--pyload/plugins/hooks/MultiDebridCom.py29
-rw-r--r--pyload/plugins/hooks/MultiHome.py75
-rw-r--r--pyload/plugins/hooks/MultishareCz.py27
-rw-r--r--pyload/plugins/hooks/OverLoadMe.py31
-rw-r--r--pyload/plugins/hooks/Premium4Me.py34
-rw-r--r--pyload/plugins/hooks/PremiumizeMe.py54
-rw-r--r--pyload/plugins/hooks/RPNetBiz.py52
-rw-r--r--pyload/plugins/hooks/RealdebridCom.py28
-rw-r--r--pyload/plugins/hooks/RehostTo.py40
-rw-r--r--pyload/plugins/hooks/RestartFailed.py42
-rw-r--r--pyload/plugins/hooks/SimplyPremiumCom.py30
-rw-r--r--pyload/plugins/hooks/SimplydebridCom.py23
-rw-r--r--pyload/plugins/hooks/UnSkipOnFail.py85
-rw-r--r--pyload/plugins/hooks/UnrestrictLi.py31
-rw-r--r--pyload/plugins/hooks/UpdateManager.py281
-rw-r--r--pyload/plugins/hooks/WindowsPhoneToastNotify.py59
-rw-r--r--pyload/plugins/hooks/XFileSharingPro.py78
-rw-r--r--pyload/plugins/hooks/XMPPInterface.py233
-rw-r--r--pyload/plugins/hooks/ZeveraCom.py23
-rw-r--r--pyload/plugins/hooks/__init__.py0
-rw-r--r--pyload/plugins/hoster/AlldebridCom.py87
-rw-r--r--pyload/plugins/hoster/BasePlugin.py116
-rw-r--r--pyload/plugins/hoster/BayfilesCom.py84
-rw-r--r--pyload/plugins/hoster/BezvadataCz.py87
-rw-r--r--pyload/plugins/hoster/BillionuploadsCom.py23
-rw-r--r--pyload/plugins/hoster/BitshareCom.py151
-rw-r--r--pyload/plugins/hoster/BoltsharingCom.py18
-rw-r--r--pyload/plugins/hoster/CatShareNet.py44
-rw-r--r--pyload/plugins/hoster/CloudzerNet.py18
-rw-r--r--pyload/plugins/hoster/CramitIn.py27
-rw-r--r--pyload/plugins/hoster/CrockoCom.py75
-rw-r--r--pyload/plugins/hoster/CyberlockerCh.py18
-rw-r--r--pyload/plugins/hoster/CzshareCom.py148
-rw-r--r--pyload/plugins/hoster/DailymotionCom.py111
-rw-r--r--pyload/plugins/hoster/DataHu.py41
-rw-r--r--pyload/plugins/hoster/DataportCz.py56
-rw-r--r--pyload/plugins/hoster/DateiTo.py83
-rw-r--r--pyload/plugins/hoster/DdlstorageCom.py18
-rw-r--r--pyload/plugins/hoster/DebridItaliaCom.py49
-rw-r--r--pyload/plugins/hoster/DepositfilesCom.py129
-rw-r--r--pyload/plugins/hoster/DlFreeFr.py205
-rw-r--r--pyload/plugins/hoster/DuploadOrg.py22
-rw-r--r--pyload/plugins/hoster/EasybytezCom.py31
-rw-r--r--pyload/plugins/hoster/EdiskCz.py54
-rw-r--r--pyload/plugins/hoster/EgoFilesCom.py89
-rw-r--r--pyload/plugins/hoster/EpicShareNet.py26
-rw-r--r--pyload/plugins/hoster/EuroshareEu.py64
-rw-r--r--pyload/plugins/hoster/ExtabitCom.py77
-rw-r--r--pyload/plugins/hoster/FastixRu.py71
-rw-r--r--pyload/plugins/hoster/FastshareCz.py88
-rw-r--r--pyload/plugins/hoster/File4safeCom.py40
-rw-r--r--pyload/plugins/hoster/FileApeCom.py18
-rw-r--r--pyload/plugins/hoster/FileParadoxIn.py25
-rw-r--r--pyload/plugins/hoster/FileStoreTo.py34
-rw-r--r--pyload/plugins/hoster/FilebeerInfo.py18
-rw-r--r--pyload/plugins/hoster/FilecloudIo.py115
-rw-r--r--pyload/plugins/hoster/FilefactoryCom.py106
-rw-r--r--pyload/plugins/hoster/FilejungleCom.py28
-rw-r--r--pyload/plugins/hoster/FileomCom.py39
-rw-r--r--pyload/plugins/hoster/FilepostCom.py129
-rw-r--r--pyload/plugins/hoster/FilerNet.py109
-rw-r--r--pyload/plugins/hoster/FilerioCom.py27
-rw-r--r--pyload/plugins/hoster/FilesMailRu.py101
-rw-r--r--pyload/plugins/hoster/FileserveCom.py209
-rw-r--r--pyload/plugins/hoster/FileshareInUa.py83
-rw-r--r--pyload/plugins/hoster/FilezyNet.py42
-rw-r--r--pyload/plugins/hoster/FiredriveCom.py51
-rw-r--r--pyload/plugins/hoster/FlyFilesNet.py46
-rw-r--r--pyload/plugins/hoster/FourSharedCom.py59
-rw-r--r--pyload/plugins/hoster/FreakshareCom.py173
-rw-r--r--pyload/plugins/hoster/FreeWayMe.py35
-rw-r--r--pyload/plugins/hoster/FreevideoCz.py18
-rw-r--r--pyload/plugins/hoster/FshareVn.py120
-rw-r--r--pyload/plugins/hoster/Ftp.py74
-rw-r--r--pyload/plugins/hoster/GamefrontCom.py84
-rw-r--r--pyload/plugins/hoster/GigapetaCom.py64
-rw-r--r--pyload/plugins/hoster/GooIm.py36
-rw-r--r--pyload/plugins/hoster/HellshareCz.py47
-rw-r--r--pyload/plugins/hoster/HellspyCz.py18
-rw-r--r--pyload/plugins/hoster/HotfileCom.py18
-rw-r--r--pyload/plugins/hoster/HugefilesNet.py25
-rw-r--r--pyload/plugins/hoster/HundredEightyUploadCom.py26
-rw-r--r--pyload/plugins/hoster/IFileWs.py23
-rw-r--r--pyload/plugins/hoster/IcyFilesCom.py18
-rw-r--r--pyload/plugins/hoster/IfileIt.py62
-rw-r--r--pyload/plugins/hoster/IfolderRu.py75
-rw-r--r--pyload/plugins/hoster/JumbofilesCom.py36
-rw-r--r--pyload/plugins/hoster/Keep2shareCC.py110
-rw-r--r--pyload/plugins/hoster/LemUploadsCom.py26
-rw-r--r--pyload/plugins/hoster/LetitbitNet.py160
-rw-r--r--pyload/plugins/hoster/LinksnappyCom.py72
-rw-r--r--pyload/plugins/hoster/LoadTo.py69
-rw-r--r--pyload/plugins/hoster/LomafileCom.py61
-rw-r--r--pyload/plugins/hoster/LuckyShareNet.py75
-rw-r--r--pyload/plugins/hoster/MediafireCom.py125
-rw-r--r--pyload/plugins/hoster/MegaDebridEu.py89
-rw-r--r--pyload/plugins/hoster/MegaFilesSe.py23
-rw-r--r--pyload/plugins/hoster/MegaNz.py132
-rw-r--r--pyload/plugins/hoster/MegacrypterCom.py53
-rw-r--r--pyload/plugins/hoster/MegareleaseOrg.py22
-rw-r--r--pyload/plugins/hoster/MegasharesCom.py105
-rw-r--r--pyload/plugins/hoster/MovReelCom.py24
-rw-r--r--pyload/plugins/hoster/MultiDebridCom.py45
-rw-r--r--pyload/plugins/hoster/MultishareCz.py72
-rw-r--r--pyload/plugins/hoster/MyvideoDe.py45
-rw-r--r--pyload/plugins/hoster/NarodRu.py60
-rw-r--r--pyload/plugins/hoster/NetloadIn.py258
-rw-r--r--pyload/plugins/hoster/NosuploadCom.py42
-rw-r--r--pyload/plugins/hoster/NovafileCom.py33
-rw-r--r--pyload/plugins/hoster/NowDownloadEu.py60
-rw-r--r--pyload/plugins/hoster/OboomCom.py132
-rw-r--r--pyload/plugins/hoster/OneFichierCom.py90
-rw-r--r--pyload/plugins/hoster/OverLoadMe.py82
-rw-r--r--pyload/plugins/hoster/PandaPlanet.py28
-rw-r--r--pyload/plugins/hoster/PornhostCom.py76
-rw-r--r--pyload/plugins/hoster/PornhubCom.py85
-rw-r--r--pyload/plugins/hoster/PotloadCom.py22
-rw-r--r--pyload/plugins/hoster/Premium4Me.py72
-rw-r--r--pyload/plugins/hoster/PremiumizeMe.py55
-rw-r--r--pyload/plugins/hoster/PromptfileCom.py45
-rw-r--r--pyload/plugins/hoster/QuickshareCz.py92
-rw-r--r--pyload/plugins/hoster/RPNetBiz.py80
-rw-r--r--pyload/plugins/hoster/RapidgatorNet.py191
-rw-r--r--pyload/plugins/hoster/RapidshareCom.py223
-rw-r--r--pyload/plugins/hoster/RarefileNet.py39
-rw-r--r--pyload/plugins/hoster/RealdebridCom.py91
-rw-r--r--pyload/plugins/hoster/RedtubeCom.py58
-rw-r--r--pyload/plugins/hoster/RehostTo.py41
-rw-r--r--pyload/plugins/hoster/RemixshareCom.py59
-rw-r--r--pyload/plugins/hoster/RgHostNet.py32
-rw-r--r--pyload/plugins/hoster/RyushareCom.py85
-rw-r--r--pyload/plugins/hoster/SecureUploadEu.py23
-rw-r--r--pyload/plugins/hoster/SendmywayCom.py23
-rw-r--r--pyload/plugins/hoster/SendspaceCom.py60
-rw-r--r--pyload/plugins/hoster/Share4webCom.py21
-rw-r--r--pyload/plugins/hoster/Share76Com.py18
-rw-r--r--pyload/plugins/hoster/ShareFilesCo.py18
-rw-r--r--pyload/plugins/hoster/ShareRapidCom.py66
-rw-r--r--pyload/plugins/hoster/SharebeesCom.py18
-rw-r--r--pyload/plugins/hoster/ShareonlineBiz.py199
-rw-r--r--pyload/plugins/hoster/ShareplaceCom.py84
-rw-r--r--pyload/plugins/hoster/ShragleCom.py18
-rw-r--r--pyload/plugins/hoster/SimplyPremiumCom.py81
-rw-r--r--pyload/plugins/hoster/SimplydebridCom.py62
-rw-r--r--pyload/plugins/hoster/SockshareCom.py88
-rw-r--r--pyload/plugins/hoster/SoundcloudCom.py57
-rw-r--r--pyload/plugins/hoster/SpeedLoadOrg.py18
-rw-r--r--pyload/plugins/hoster/SpeedfileCz.py18
-rw-r--r--pyload/plugins/hoster/SpeedyshareCom.py43
-rw-r--r--pyload/plugins/hoster/StreamCz.py70
-rw-r--r--pyload/plugins/hoster/StreamcloudEu.py124
-rw-r--r--pyload/plugins/hoster/TurbobitNet.py167
-rw-r--r--pyload/plugins/hoster/TurbouploadCom.py18
-rw-r--r--pyload/plugins/hoster/TusfilesNet.py31
-rw-r--r--pyload/plugins/hoster/TwoSharedCom.py39
-rw-r--r--pyload/plugins/hoster/UlozTo.py158
-rw-r--r--pyload/plugins/hoster/UloziskoSk.py70
-rw-r--r--pyload/plugins/hoster/UnibytesCom.py71
-rw-r--r--pyload/plugins/hoster/UnrestrictLi.py89
-rw-r--r--pyload/plugins/hoster/UploadStationCom.py18
-rw-r--r--pyload/plugins/hoster/UploadedTo.py240
-rw-r--r--pyload/plugins/hoster/UploadheroCom.py77
-rw-r--r--pyload/plugins/hoster/UploadingCom.py99
-rw-r--r--pyload/plugins/hoster/UpstoreNet.py75
-rw-r--r--pyload/plugins/hoster/UptoboxCom.py69
-rw-r--r--pyload/plugins/hoster/VeehdCom.py79
-rw-r--r--pyload/plugins/hoster/VeohCom.py51
-rw-r--r--pyload/plugins/hoster/VidPlayNet.py27
-rw-r--r--pyload/plugins/hoster/VimeoCom.py72
-rw-r--r--pyload/plugins/hoster/Vipleech4uCom.py18
-rw-r--r--pyload/plugins/hoster/WarserverCz.py18
-rw-r--r--pyload/plugins/hoster/WebshareCz.py60
-rw-r--r--pyload/plugins/hoster/WrzucTo.py51
-rw-r--r--pyload/plugins/hoster/WuploadCom.py18
-rw-r--r--pyload/plugins/hoster/X7To.py18
-rw-r--r--pyload/plugins/hoster/XFileSharingPro.py324
-rw-r--r--pyload/plugins/hoster/XHamsterCom.py123
-rw-r--r--pyload/plugins/hoster/XVideosCom.py28
-rw-r--r--pyload/plugins/hoster/Xdcc.py205
-rw-r--r--pyload/plugins/hoster/YibaishiwuCom.py54
-rw-r--r--pyload/plugins/hoster/YoupornCom.py56
-rw-r--r--pyload/plugins/hoster/YourfilesTo.py81
-rw-r--r--pyload/plugins/hoster/YoutubeCom.py180
-rw-r--r--pyload/plugins/hoster/ZDF.py56
-rw-r--r--pyload/plugins/hoster/ZeveraCom.py108
-rw-r--r--pyload/plugins/hoster/ZippyshareCom.py74
-rw-r--r--pyload/plugins/hoster/__init__.py0
-rw-r--r--pyload/plugins/internal/AbstractExtractor.py101
-rw-r--r--pyload/plugins/internal/CaptchaService.py96
-rw-r--r--pyload/plugins/internal/DeadCrypter.py19
-rw-r--r--pyload/plugins/internal/DeadHoster.py27
-rw-r--r--pyload/plugins/internal/MultiHoster.py192
-rw-r--r--pyload/plugins/internal/SimpleCrypter.py118
-rw-r--r--pyload/plugins/internal/SimpleHoster.py292
-rw-r--r--pyload/plugins/internal/UnRar.py212
-rw-r--r--pyload/plugins/internal/UnZip.py38
-rw-r--r--pyload/plugins/internal/XFSPAccount.py69
-rw-r--r--pyload/plugins/internal/__init__.py0
-rw-r--r--pyload/plugins/ocr/GigasizeCom.py23
-rw-r--r--pyload/plugins/ocr/LinksaveIn.py149
-rw-r--r--pyload/plugins/ocr/NetloadIn.py27
-rw-r--r--pyload/plugins/ocr/ShareonlineBiz.py38
-rw-r--r--pyload/plugins/ocr/__init__.py0
383 files changed, 25445 insertions, 0 deletions
diff --git a/pyload/plugins/Account.py b/pyload/plugins/Account.py
new file mode 100644
index 000000000..12ea494a0
--- /dev/null
+++ b/pyload/plugins/Account.py
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+
+from random import choice
+from time import time
+from traceback import print_exc
+from threading import RLock
+
+from pyload.plugins.Plugin import Base
+from pyload.utils import compare_time, parseFileSize, lock
+
+
+class WrongPassword(Exception):
+ pass
+
+
+class Account(Base):
+ """
+ Base class for every Account plugin.
+ Just overwrite `login` and cookies will be stored and account becomes accessible in\
+ associated hoster plugin. Plugin should also provide `loadAccountInfo`
+ """
+ __name__ = "Account"
+ __type__ = "account"
+ __version__ = "0.3"
+
+ __description__ = """Base account plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+ #: after that time (in minutes) pyload will relogin the account
+ login_timeout = 10 * 60
+ #: after that time (in minutes) account data will be reloaded
+ info_threshold = 10 * 60
+
+
+ def __init__(self, manager, accounts):
+ Base.__init__(self, manager.core)
+
+ self.manager = manager
+ self.accounts = {}
+ self.infos = {} # cache for account information
+ self.lock = RLock()
+
+ self.timestamps = {}
+ self.setAccounts(accounts)
+ self.init()
+
+ def init(self):
+ pass
+
+ def login(self, user, data, req):
+ """login into account, the cookies will be saved so user can be recognized
+
+ :param user: loginname
+ :param data: data dictionary
+ :param req: `Request` instance
+ """
+ pass
+
+ @lock
+ def _login(self, user, data):
+ # set timestamp for login
+ self.timestamps[user] = time()
+
+ req = self.getAccountRequest(user)
+ try:
+ self.login(user, data, req)
+ except WrongPassword:
+ self.logWarning(
+ _("Could not login with account %(user)s | %(msg)s") % {"user": user
+ , "msg": _("Wrong Password")})
+ success = data['valid'] = False
+ except Exception, e:
+ self.logWarning(
+ _("Could not login with account %(user)s | %(msg)s") % {"user": user
+ , "msg": e})
+ success = data['valid'] = False
+ if self.core.debug:
+ print_exc()
+ else:
+ success = True
+ finally:
+ if req:
+ req.close()
+ return success
+
+ def relogin(self, user):
+ req = self.getAccountRequest(user)
+ if req:
+ req.cj.clear()
+ req.close()
+ if user in self.infos:
+ del self.infos[user] #delete old information
+
+ return self._login(user, self.accounts[user])
+
+ def setAccounts(self, accounts):
+ self.accounts = accounts
+ for user, data in self.accounts.iteritems():
+ self._login(user, data)
+ self.infos[user] = {}
+
+ def updateAccounts(self, user, password=None, options={}):
+ """ updates account and return true if anything changed """
+
+ if user in self.accounts:
+ self.accounts[user]['valid'] = True #do not remove or accounts will not login
+ if password:
+ self.accounts[user]['password'] = password
+ self.relogin(user)
+ return True
+ if options:
+ before = self.accounts[user]['options']
+ self.accounts[user]['options'].update(options)
+ return self.accounts[user]['options'] != before
+ else:
+ self.accounts[user] = {"password": password, "options": options, "valid": True}
+ self._login(user, self.accounts[user])
+ return True
+
+ def removeAccount(self, user):
+ if user in self.accounts:
+ del self.accounts[user]
+ if user in self.infos:
+ del self.infos[user]
+ if user in self.timestamps:
+ del self.timestamps[user]
+
+ @lock
+ def getAccountInfo(self, name, force=False):
+ """retrieve account infos for an user, do **not** overwrite this method!\\
+ just use it to retrieve infos in hoster plugins. see `loadAccountInfo`
+
+ :param name: username
+ :param force: reloads cached account information
+ :return: dictionary with information
+ """
+ data = Account.loadAccountInfo(self, name)
+
+ if force or name not in self.infos:
+ self.logDebug("Get Account Info for %s" % name)
+ req = self.getAccountRequest(name)
+
+ try:
+ infos = self.loadAccountInfo(name, req)
+ if not type(infos) == dict:
+ raise Exception("Wrong return format")
+ except Exception, e:
+ infos = {"error": str(e)}
+
+ if req: req.close()
+
+ self.logDebug("Account Info: %s" % str(infos))
+
+ infos['timestamp'] = time()
+ self.infos[name] = infos
+ elif "timestamp" in self.infos[name] and self.infos[name][
+ "timestamp"] + self.info_threshold * 60 < time():
+ self.logDebug("Reached timeout for account data")
+ self.scheduleRefresh(name)
+
+ data.update(self.infos[name])
+ return data
+
+ def isPremium(self, user):
+ info = self.getAccountInfo(user)
+ return info['premium']
+
+ def loadAccountInfo(self, name, req=None):
+ """this should be overwritten in account plugin,\
+ and retrieving account information for user
+
+ :param name:
+ :param req: `Request` instance
+ :return:
+ """
+ return {
+ "validuntil": None, # -1 for unlimited
+ "login": name,
+ #"password": self.accounts[name]['password'], #@XXX: security
+ "options": self.accounts[name]['options'],
+ "valid": self.accounts[name]['valid'],
+ "trafficleft": None, # in kb, -1 for unlimited
+ "maxtraffic": None,
+ "premium": True, #useful for free accounts
+ "timestamp": 0, #time this info was retrieved
+ "type": self.__name__,
+ }
+
+ def getAllAccounts(self, force=False):
+ return [self.getAccountInfo(user, force) for user, data in self.accounts.iteritems()]
+
+ def getAccountRequest(self, user=None):
+ if not user:
+ user, data = self.selectAccount()
+ if not user:
+ return None
+
+ req = self.core.requestFactory.getRequest(self.__name__, user)
+ return req
+
+ def getAccountCookies(self, user=None):
+ if not user:
+ user, data = self.selectAccount()
+ if not user:
+ return None
+
+ cj = self.core.requestFactory.getCookieJar(self.__name__, user)
+ return cj
+
+ def getAccountData(self, user):
+ return self.accounts[user]
+
+ def selectAccount(self):
+ """ returns an valid account name and data"""
+ usable = []
+ for user, data in self.accounts.iteritems():
+ if not data['valid']: continue
+
+ if "time" in data['options'] and data['options']['time']:
+ time_data = ""
+ try:
+ time_data = data['options']['time'][0]
+ start, end = time_data.split("-")
+ if not compare_time(start.split(":"), end.split(":")):
+ continue
+ except:
+ self.logWarning(_("Your Time %s has wrong format, use: 1:22-3:44") % time_data)
+
+ if user in self.infos:
+ if "validuntil" in self.infos[user]:
+ if self.infos[user]['validuntil'] > 0 and time() > self.infos[user]['validuntil']:
+ continue
+ if "trafficleft" in self.infos[user]:
+ if self.infos[user]['trafficleft'] == 0:
+ continue
+
+ usable.append((user, data))
+
+ if not usable: return None, None
+ return choice(usable)
+
+ def canUse(self):
+ return False if self.selectAccount() == (None, None) else True
+
+ def parseTraffic(self, string): #returns kbyte
+ return parseFileSize(string) / 1024
+
+ def wrongPassword(self):
+ raise WrongPassword
+
+ def empty(self, user):
+ if user in self.infos:
+ self.logWarning(_("Account %s has not enough traffic, checking again in 30min") % user)
+
+ self.infos[user].update({"trafficleft": 0})
+ self.scheduleRefresh(user, 30 * 60)
+
+ def expired(self, user):
+ if user in self.infos:
+ self.logWarning(_("Account %s is expired, checking again in 1h") % user)
+
+ self.infos[user].update({"validuntil": time() - 1})
+ self.scheduleRefresh(user, 60 * 60)
+
+ def scheduleRefresh(self, user, time=0, force=True):
+ """ add task to refresh account info to sheduler """
+ self.logDebug("Scheduled Account refresh for %s in %s seconds." % (user, time))
+ self.core.scheduler.addJob(time, self.getAccountInfo, [user, force])
+
+ @lock
+ def checkLogin(self, user):
+ """ checks if user is still logged in """
+ if user in self.timestamps:
+ if self.login_timeout > 0 and self.timestamps[user] + self.login_timeout * 60 < time():
+ self.logDebug("Reached login timeout for %s" % user)
+ return self.relogin(user)
+ else:
+ return True
+ else:
+ return False
diff --git a/pyload/plugins/AccountManager.py b/pyload/plugins/AccountManager.py
new file mode 100644
index 000000000..67909072f
--- /dev/null
+++ b/pyload/plugins/AccountManager.py
@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+
+from os.path import exists
+from shutil import copy
+
+from threading import Lock
+
+from pyload.PullEvents import AccountUpdateEvent
+from pyload.utils import chmod, lock
+
+ACC_VERSION = 1
+
+
+class AccountManager:
+ """manages all accounts"""
+
+ #--------------------------------------------------------------------------
+ def __init__(self, core):
+ """Constructor"""
+
+ self.core = core
+ self.lock = Lock()
+
+ self.initPlugins()
+ self.saveAccounts() # save to add categories to conf
+
+ def initPlugins(self):
+ self.accounts = {} # key = ( plugin )
+ self.plugins = {}
+
+ self.initAccountPlugins()
+ self.loadAccounts()
+
+ def getAccountPlugin(self, plugin):
+ """get account instance for plugin or None if anonymous"""
+ if plugin in self.accounts:
+ if plugin not in self.plugins:
+ try:
+ self.plugins[plugin] = self.core.pluginManager.loadClass("accounts", plugin)(self, self.accounts[plugin])
+ except TypeError: # The account class no longer exists (blacklisted plugin). Skipping the account to avoid crash
+ return None
+
+ return self.plugins[plugin]
+ else:
+ return None
+
+ def getAccountPlugins(self):
+ """ get all account instances"""
+
+ plugins = []
+ for plugin in self.accounts.keys():
+ plugins.append(self.getAccountPlugin(plugin))
+
+ return plugins
+
+ #--------------------------------------------------------------------------
+ def loadAccounts(self):
+ """loads all accounts available"""
+
+ if not exists("accounts.conf"):
+ f = open("accounts.conf", "wb")
+ f.write("version: " + str(ACC_VERSION))
+ f.close()
+
+ f = open("accounts.conf", "rb")
+ content = f.readlines()
+ version = content[0].split(":")[1].strip() if content else ""
+ f.close()
+
+ if not version or int(version) < ACC_VERSION:
+ copy("accounts.conf", "accounts.backup")
+ f = open("accounts.conf", "wb")
+ f.write("version: " + str(ACC_VERSION))
+ f.close()
+ self.core.log.warning(_("Account settings deleted, due to new config format."))
+ return
+
+ plugin = ""
+ name = ""
+
+ for line in content[1:]:
+ line = line.strip()
+
+ if not line: continue
+ if line.startswith("#"): continue
+ if line.startswith("version"): continue
+
+ if line.endswith(":") and line.count(":") == 1:
+ plugin = line[:-1]
+ self.accounts[plugin] = {}
+
+ elif line.startswith("@"):
+ try:
+ option = line[1:].split()
+ self.accounts[plugin][name]['options'][option[0]] = [] if len(option) < 2 else ([option[1]] if len(option) < 3 else option[1:])
+ except:
+ pass
+
+ elif ":" in line:
+ name, sep, pw = line.partition(":")
+ self.accounts[plugin][name] = {"password": pw, "options": {}, "valid": True}
+
+ #--------------------------------------------------------------------------
+ def saveAccounts(self):
+ """save all account information"""
+
+ f = open("accounts.conf", "wb")
+ f.write("version: " + str(ACC_VERSION) + "\n")
+
+ for plugin, accounts in self.accounts.iteritems():
+ f.write("\n")
+ f.write(plugin+":\n")
+
+ for name,data in accounts.iteritems():
+ f.write("\n\t%s:%s\n" % (name,data['password']) )
+ if data['options']:
+ for option, values in data['options'].iteritems():
+ f.write("\t@%s %s\n" % (option, " ".join(values)))
+
+ f.close()
+ chmod(f.name, 0600)
+
+ #--------------------------------------------------------------------------
+ def initAccountPlugins(self):
+ """init names"""
+ for name in self.core.pluginManager.getAccountPlugins():
+ self.accounts[name] = {}
+
+ @lock
+ def updateAccount(self, plugin , user, password=None, options={}):
+ """add or update account"""
+ if plugin in self.accounts:
+ p = self.getAccountPlugin(plugin)
+ updated = p.updateAccounts(user, password, options)
+ #since accounts is a ref in plugin self.accounts doesnt need to be updated here
+
+ self.saveAccounts()
+ if updated: p.scheduleRefresh(user, force=False)
+
+ @lock
+ def removeAccount(self, plugin, user):
+ """remove account"""
+
+ if plugin in self.accounts:
+ p = self.getAccountPlugin(plugin)
+ p.removeAccount(user)
+
+ self.saveAccounts()
+
+ @lock
+ def getAccountInfos(self, force=True, refresh=False):
+ data = {}
+
+ if refresh:
+ self.core.scheduler.addJob(0, self.core.accountManager.getAccountInfos)
+ force = False
+
+ for p in self.accounts.keys():
+ if self.accounts[p]:
+ p = self.getAccountPlugin(p)
+ if p:
+ data[p.__name__] = p.getAllAccounts(force)
+ else: # When an account has been skipped, p is None
+ data[p] = []
+ else:
+ data[p] = []
+ e = AccountUpdateEvent()
+ self.core.pullManager.addEvent(e)
+ return data
+
+ def sendChange(self):
+ e = AccountUpdateEvent()
+ self.core.pullManager.addEvent(e)
diff --git a/pyload/plugins/Container.py b/pyload/plugins/Container.py
new file mode 100644
index 000000000..747232c18
--- /dev/null
+++ b/pyload/plugins/Container.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from os import remove
+from os.path import basename, exists
+
+from pyload.plugins.Crypter import Crypter
+from pyload.utils import safe_join
+
+
+class Container(Crypter):
+ __name__ = "Container"
+ __type__ = "container"
+ __version__ = "0.1"
+
+ __pattern__ = None
+
+ __description__ = """Base container decrypter plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def preprocessing(self, thread):
+ """prepare"""
+
+ self.setup()
+ self.thread = thread
+
+ self.loadToDisk()
+
+ self.decrypt(self.pyfile)
+ self.deleteTmp()
+
+ self.createPackages()
+
+
+ def loadToDisk(self):
+ """loads container to disk if its stored remotely and overwrite url,
+ or check existent on several places at disk"""
+
+ if self.pyfile.url.startswith("http"):
+ self.pyfile.name = re.findall("([^\/=]+)", self.pyfile.url)[-1]
+ content = self.load(self.pyfile.url)
+ self.pyfile.url = safe_join(self.config['general']['download_folder'], self.pyfile.name)
+ f = open(self.pyfile.url, "wb" )
+ f.write(content)
+ f.close()
+
+ else:
+ self.pyfile.name = basename(self.pyfile.url)
+ if not exists(self.pyfile.url):
+ if exists(safe_join(pypath, self.pyfile.url)):
+ self.pyfile.url = safe_join(pypath, self.pyfile.url)
+ else:
+ self.fail(_("File not exists."))
+
+
+ def deleteTmp(self):
+ if self.pyfile.name.startswith("tmp_"):
+ remove(self.pyfile.url)
diff --git a/pyload/plugins/Crypter.py b/pyload/plugins/Crypter.py
new file mode 100644
index 000000000..ed72c57c1
--- /dev/null
+++ b/pyload/plugins/Crypter.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Plugin import Plugin
+
+
+class Crypter(Plugin):
+ __name__ = "Crypter"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = None
+
+ __description__ = """Base decrypter plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def __init__(self, pyfile):
+ Plugin.__init__(self, pyfile)
+
+ #: Put all packages here. It's a list of tuples like: ( name, [list of links], folder )
+ self.packages = []
+
+ #: List of urls, pyLoad will generate packagenames
+ self.urls = []
+
+ self.multiDL = True
+ self.limitDL = 0
+
+
+ def preprocessing(self, thread):
+ """prepare"""
+ self.setup()
+ self.thread = thread
+
+ self.decrypt(self.pyfile)
+
+ self.createPackages()
+
+
+ def decrypt(self, pyfile):
+ raise NotImplementedError
+
+ def createPackages(self):
+ """ create new packages from self.packages """
+ for pack in self.packages:
+
+ name, links, folder = pack
+
+ self.logDebug("Parsed package %(name)s with %(len)d links" % {"name": name, "len": len(links)})
+
+ links = [x.decode("utf-8") for x in links]
+
+ pid = self.api.addPackage(name, links, self.pyfile.package().queue)
+
+ if name != folder is not None:
+ self.api.setPackageData(pid, {"folder": folder}) #: Due to not break API addPackage method right now
+ self.logDebug("Set package %(name)s folder to %(folder)s" % {"name": name, "folder": folder})
+
+ if self.pyfile.package().password:
+ self.api.setPackageData(pid, {"password": self.pyfile.package().password})
+
+ if self.urls:
+ self.api.generateAndAddPackages(self.urls)
diff --git a/pyload/plugins/Hook.py b/pyload/plugins/Hook.py
new file mode 100644
index 000000000..b9ffbc647
--- /dev/null
+++ b/pyload/plugins/Hook.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+
+from traceback import print_exc
+
+from pyload.plugins.Plugin import Base
+
+
+class Expose(object):
+ """ used for decoration to declare rpc services """
+
+ def __new__(cls, f, *args, **kwargs):
+ hookManager.addRPC(f.__module__, f.func_name, f.func_doc)
+ return f
+
+
+def threaded(f):
+
+ def run(*args,**kwargs):
+ hookManager.startThread(f, *args, **kwargs)
+ return run
+
+
+class Hook(Base):
+ """
+ Base class for hook plugins.
+ """
+ __name__ = "Hook"
+ __type__ = "hook"
+ __version__ = "0.2"
+
+ __config__ = [("name", "type", "desc", "default")]
+
+ __description__ = """Interface for hook"""
+ __author_name__ = ("mkaay", "RaNaN")
+ __author_mail__ = ("mkaay@mkaay.de", "RaNaN@pyload.org")
+
+ #: automatically register event listeners for functions, attribute will be deleted dont use it yourself
+ event_map = None
+
+ # Alternative to event_map
+ #: List of events the plugin can handle, name the functions exactly like eventname.
+ event_list = None # dont make duplicate entries in event_map
+
+ #: periodic call interval in secondc
+ interval = 60
+
+
+ def __init__(self, core, manager):
+ Base.__init__(self, core)
+
+ #: Provide information in dict here, usable by API `getInfo`
+ self.info = None
+
+ #: Callback of periodical job task, used by hookmanager
+ self.cb = None
+
+ #: `HookManager`
+ self.manager = manager
+
+ #register events
+ if self.event_map:
+ for event, funcs in self.event_map.iteritems():
+ if type(funcs) in (list, tuple):
+ for f in funcs:
+ self.manager.addEvent(event, getattr(self,f))
+ else:
+ self.manager.addEvent(event, getattr(self,funcs))
+
+ #delete for various reasons
+ self.event_map = None
+
+ if self.event_list:
+ for f in self.event_list:
+ self.manager.addEvent(f, getattr(self,f))
+
+ self.event_list = None
+
+ self.setup()
+ self.initPeriodical()
+
+
+ def initPeriodical(self):
+ if self.interval >=1:
+ self.cb = self.core.scheduler.addJob(0, self._periodical, threaded=False)
+
+ def _periodical(self):
+ try:
+ if self.isActivated(): self.periodical()
+ except Exception, e:
+ self.logError(_("Error executing hooks: %s") % str(e))
+ if self.core.debug:
+ print_exc()
+
+ self.cb = self.core.scheduler.addJob(self.interval, self._periodical, threaded=False)
+
+
+ def __repr__(self):
+ return "<Hook %s>" % self.__name__
+
+ def setup(self):
+ """ more init stuff if needed """
+ pass
+
+ def unload(self):
+ """ called when hook was deactivated """
+ pass
+
+ def isActivated(self):
+ """ checks if hook is activated"""
+ return self.config.getPlugin(self.__name__, "activated")
+
+
+ #event methods - overwrite these if needed
+ def coreReady(self):
+ pass
+
+ def coreExiting(self):
+ pass
+
+ def downloadPreparing(self, pyfile):
+ pass
+
+ def downloadFinished(self, pyfile):
+ pass
+
+ def downloadFailed(self, pyfile):
+ pass
+
+ def packageFinished(self, pypack):
+ pass
+
+ def beforeReconnecting(self, ip):
+ pass
+
+ def afterReconnecting(self, ip):
+ pass
+
+ def periodical(self):
+ pass
+
+ def newCaptchaTask(self, task):
+ """ new captcha task for the plugin, it MUST set the handler and timeout or will be ignored """
+ pass
+
+ def captchaCorrect(self, task):
+ pass
+
+ def captchaInvalid(self, task):
+ pass
diff --git a/pyload/plugins/Hoster.py b/pyload/plugins/Hoster.py
new file mode 100644
index 000000000..23369deec
--- /dev/null
+++ b/pyload/plugins/Hoster.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Plugin import Plugin
+
+
+def getInfo(self):
+ #result = [ .. (name, size, status, url) .. ]
+ return
+
+
+class Hoster(Plugin):
+ __name__ = "Hoster"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = None
+
+ __description__ = """Base hoster plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
diff --git a/pyload/plugins/OCR.py b/pyload/plugins/OCR.py
new file mode 100644
index 000000000..0991184f3
--- /dev/null
+++ b/pyload/plugins/OCR.py
@@ -0,0 +1,299 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+import os
+import logging
+import subprocess
+
+from os.path import abspath, join
+from PIL import Image
+from PIL import TiffImagePlugin
+from PIL import PngImagePlugin
+from PIL import GifImagePlugin
+from PIL import JpegImagePlugin
+
+
+class OCR(object):
+ __name__ = "OCR"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """OCR base plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+
+ def __init__(self):
+ self.logger = logging.getLogger("log")
+
+ def load_image(self, image):
+ self.image = Image.open(image)
+ self.pixels = self.image.load()
+ self.result_captcha = ''
+
+ def unload(self):
+ """delete all tmp images"""
+ pass
+
+ def threshold(self, value):
+ self.image = self.image.point(lambda a: a * value + 10)
+
+ def run(self, command):
+ """Run a command"""
+
+ popen = subprocess.Popen(command, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ popen.wait()
+ output = popen.stdout.read() + " | " + popen.stderr.read()
+ popen.stdout.close()
+ popen.stderr.close()
+ self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
+
+ def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
+ #self.logger.debug("create tmp tif")
+ #tmp = tempfile.NamedTemporaryFile(suffix=".tif")
+ tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
+ tmp.close()
+ #self.logger.debug("create tmp txt")
+ #tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
+ tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
+ tmpTxt.close()
+
+ self.logger.debug("save tiff")
+ self.image.save(tmp.name, 'TIFF')
+
+ if os.name == "nt":
+ tessparams = [join(pypath, "tesseract", "tesseract.exe")]
+ else:
+ tessparams = ['tesseract']
+
+ tessparams.extend([abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")])
+
+ if subset and (digits or lowercase or uppercase):
+ #self.logger.debug("create temp subset config")
+ #tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
+ tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
+ tmpSub.write("tessedit_char_whitelist ")
+ if digits:
+ tmpSub.write("0123456789")
+ if lowercase:
+ tmpSub.write("abcdefghijklmnopqrstuvwxyz")
+ if uppercase:
+ tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ tmpSub.write("\n")
+ tessparams.append("nobatch")
+ tessparams.append(abspath(tmpSub.name))
+ tmpSub.close()
+
+ self.logger.debug("run tesseract")
+ self.run(tessparams)
+ self.logger.debug("read txt")
+
+ try:
+ with open(tmpTxt.name, 'r') as f:
+ self.result_captcha = f.read().replace("\n", "")
+ except:
+ self.result_captcha = ""
+
+ self.logger.debug(self.result_captcha)
+ try:
+ os.remove(tmp.name)
+ os.remove(tmpTxt.name)
+ if subset and (digits or lowercase or uppercase):
+ os.remove(tmpSub.name)
+ except:
+ pass
+
+ def get_captcha(self, name):
+ raise NotImplementedError
+
+ def to_greyscale(self):
+ if self.image.mode != 'L':
+ self.image = self.image.convert('L')
+
+ self.pixels = self.image.load()
+
+ def eval_black_white(self, limit):
+ self.pixels = self.image.load()
+ w, h = self.image.size
+ for x in xrange(w):
+ for y in xrange(h):
+ if self.pixels[x, y] > limit:
+ self.pixels[x, y] = 255
+ else:
+ self.pixels[x, y] = 0
+
+ def clean(self, allowed):
+ pixels = self.pixels
+
+ w, h = self.image.size
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 255:
+ continue
+ # No point in processing white pixels since we only want to remove black pixel
+ count = 0
+
+ try:
+ if pixels[x - 1, y - 1] != 255:
+ count += 1
+ if pixels[x - 1, y] != 255:
+ count += 1
+ if pixels[x - 1, y + 1] != 255:
+ count += 1
+ if pixels[x, y + 1] != 255:
+ count += 1
+ if pixels[x + 1, y + 1] != 255:
+ count += 1
+ if pixels[x + 1, y] != 255:
+ count += 1
+ if pixels[x + 1, y - 1] != 255:
+ count += 1
+ if pixels[x, y - 1] != 255:
+ count += 1
+ except:
+ pass
+
+ # not enough neighbors are dark pixels so mark this pixel
+ # to be changed to white
+ if count < allowed:
+ pixels[x, y] = 1
+
+ # second pass: this time set all 1's to 255 (white)
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 1:
+ pixels[x, y] = 255
+
+ self.pixels = pixels
+
+ def derotate_by_average(self):
+ """rotate by checking each angle and guess most suitable"""
+
+ w, h = self.image.size
+ pixels = self.pixels
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 155
+
+ highest = {}
+ counts = {}
+
+ for angle in xrange(-45, 45):
+
+ tmpimage = self.image.rotate(angle)
+
+ pixels = tmpimage.load()
+
+ w, h = self.image.size
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 255
+
+ count = {}
+
+ for x in xrange(w):
+ count[x] = 0
+ for y in xrange(h):
+ if pixels[x, y] == 155:
+ count[x] += 1
+
+ sum = 0
+ cnt = 0
+
+ for x in count.values():
+ if x != 0:
+ sum += x
+ cnt += 1
+
+ avg = sum / cnt
+ counts[angle] = cnt
+ highest[angle] = 0
+ for x in count.values():
+ if x > highest[angle]:
+ highest[angle] = x
+
+ highest[angle] = highest[angle] - avg
+
+ hkey = 0
+ hvalue = 0
+
+ for key, value in highest.iteritems():
+ if value > hvalue:
+ hkey = key
+ hvalue = value
+
+ self.image = self.image.rotate(hkey)
+ pixels = self.image.load()
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 255
+
+ if pixels[x, y] == 155:
+ pixels[x, y] = 0
+
+ self.pixels = pixels
+
+ def split_captcha_letters(self):
+ captcha = self.image
+ started = False
+ letters = []
+ width, height = captcha.size
+ bottomY, topY = 0, height
+ pixels = captcha.load()
+
+ for x in xrange(width):
+ black_pixel_in_col = False
+ for y in xrange(height):
+ if pixels[x, y] != 255:
+ if not started:
+ started = True
+ firstX = x
+ lastX = x
+
+ if y > bottomY:
+ bottomY = y
+ if y < topY:
+ topY = y
+ if x > lastX:
+ lastX = x
+
+ black_pixel_in_col = True
+
+ if black_pixel_in_col is False and started is True:
+ rect = (firstX, topY, lastX, bottomY)
+ new_captcha = captcha.crop(rect)
+
+ w, h = new_captcha.size
+ if w > 5 and h > 5:
+ letters.append(new_captcha)
+
+ started = False
+ bottomY, topY = 0, height
+
+ return letters
+
+ def correct(self, values, var=None):
+ if var:
+ result = var
+ else:
+ result = self.result_captcha
+
+ for key, item in values.iteritems():
+
+ if key.__class__ == str:
+ result = result.replace(key, item)
+ else:
+ for expr in key:
+ result = result.replace(expr, item)
+
+ if var:
+ return result
+ else:
+ self.result_captcha = result
diff --git a/pyload/plugins/Plugin.py b/pyload/plugins/Plugin.py
new file mode 100644
index 000000000..31cbfca57
--- /dev/null
+++ b/pyload/plugins/Plugin.py
@@ -0,0 +1,629 @@
+# -*- coding: utf-8 -*-
+
+from time import time, sleep
+from random import randint
+
+import os
+from os import remove, makedirs, chmod, stat
+from os.path import exists, join
+
+if os.name != "nt":
+ from os import chown
+ from pwd import getpwnam
+ from grp import getgrnam
+
+from itertools import islice
+
+from pyload.utils import safe_join, safe_filename, fs_encode, fs_decode
+
+def chunks(iterable, size):
+ it = iter(iterable)
+ item = list(islice(it, size))
+ while item:
+ yield item
+ item = list(islice(it, size))
+
+
+class Abort(Exception):
+ """ raised when aborted """
+
+
+class Fail(Exception):
+ """ raised when failed """
+
+
+class Reconnect(Exception):
+ """ raised when reconnected """
+
+
+class Retry(Exception):
+ """ raised when start again from beginning """
+
+
+class SkipDownload(Exception):
+ """ raised when download should be skipped """
+
+
+class Base(object):
+ """
+ A Base class with log/config/db methods *all* plugin types can use
+ """
+
+ def __init__(self, core):
+ #: Core instance
+ self.core = core
+ #: logging instance
+ self.log = core.log
+ #: core config
+ self.config = core.config
+
+ #log functions
+ def logInfo(self, *args):
+ self.log.info("%s: %s" % (self.__name__, " | ".join([a if isinstance(a, basestring) else str(a) for a in args])))
+
+ def logWarning(self, *args):
+ self.log.warning("%s: %s" % (self.__name__, " | ".join([a if isinstance(a, basestring) else str(a) for a in args])))
+
+ def logError(self, *args):
+ self.log.error("%s: %s" % (self.__name__, " | ".join([a if isinstance(a, basestring) else str(a) for a in args])))
+
+ def logDebug(self, *args):
+ self.log.debug("%s: %s" % (self.__name__, " | ".join([a if isinstance(a, basestring) else str(a) for a in args])))
+
+
+ def setConf(self, option, value):
+ """ see `setConfig` """
+ self.config.setPlugin(self.__name__, option, value)
+
+ def setConfig(self, option, value):
+ """ Set config value for current plugin
+
+ :param option:
+ :param value:
+ :return:
+ """
+ self.setConf(option, value)
+
+ #: Deprecated method
+ def getConf(self, option):
+ """ see `getConfig` """
+ return self.getConfig(option)
+
+ def getConfig(self, option):
+ """ Returns config value for current plugin
+
+ :param option:
+ :return:
+ """
+ return self.config.getPlugin(self.__name__, option)
+
+ def setStorage(self, key, value):
+ """ Saves a value persistently to the database """
+ self.core.db.setStorage(self.__name__, key, value)
+
+ def store(self, key, value):
+ """ same as `setStorage` """
+ self.core.db.setStorage(self.__name__, key, value)
+
+ def getStorage(self, key=None, default=None):
+ """ Retrieves saved value or dict of all saved entries if key is None """
+ if key is not None:
+ return self.core.db.getStorage(self.__name__, key) or default
+ return self.core.db.getStorage(self.__name__, key)
+
+ def retrieve(self, *args, **kwargs):
+ """ same as `getStorage` """
+ return self.getStorage(*args, **kwargs)
+
+ def delStorage(self, key):
+ """ Delete entry in db """
+ self.core.db.delStorage(self.__name__, key)
+
+
+class Plugin(Base):
+ """
+ Base plugin for hoster/crypter.
+ Overwrite `process` / `decrypt` in your subclassed plugin.
+ """
+ __name__ = "Plugin"
+ __type__ = "hoster"
+ __version__ = "0.5"
+
+ __pattern__ = None
+ __config__ = [("name", "type", "desc", "default")]
+
+ __description__ = """Base plugin"""
+ __author_name__ = ("RaNaN", "spoob", "mkaay")
+ __author_mail__ = ("RaNaN@pyload.org", "spoob@pyload.org", "mkaay@mkaay.de")
+
+
+ def __init__(self, pyfile):
+ Base.__init__(self, pyfile.m.core)
+
+ #: engage wan reconnection
+ self.wantReconnect = False
+
+ #: enable simultaneous processing of multiple downloads
+ self.multiDL = True
+ self.limitDL = 0
+
+ #: chunk limit
+ self.chunkLimit = 1
+ self.resumeDownload = False
+
+ #: time() + wait in seconds
+ self.waitUntil = 0
+ self.waiting = False
+
+ #: captcha reader instance
+ self.ocr = None
+
+ #: account handler instance, see :py:class:`Account`
+ self.account = pyfile.m.core.accountManager.getAccountPlugin(self.__name__)
+
+ #: premium status
+ self.premium = False
+ #: username/login
+ self.user = None
+
+ if self.account and not self.account.canUse():
+ self.account = None
+
+ if self.account:
+ self.user, data = self.account.selectAccount()
+ #: Browser instance, see `network.Browser`
+ self.req = self.account.getAccountRequest(self.user)
+ self.chunkLimit = -1 # chunk limit, -1 for unlimited
+ #: enables resume (will be ignored if server dont accept chunks)
+ self.resumeDownload = True
+ self.multiDL = True #every hoster with account should provide multiple downloads
+ #: premium status
+ self.premium = self.account.isPremium(self.user)
+ else:
+ self.req = pyfile.m.core.requestFactory.getRequest(self.__name__)
+
+ #: associated pyfile instance, see `PyFile`
+ self.pyfile = pyfile
+
+ self.thread = None # holds thread in future
+
+ #: location where the last call to download was saved
+ self.lastDownload = ""
+ #: re match of the last call to `checkDownload`
+ self.lastCheck = None
+
+ #: js engine, see `JsEngine`
+ self.js = self.core.js
+
+ #: captcha task
+ self.cTask = None
+
+ #: amount of retries already made
+ self.retries = 0
+
+ #: some plugins store html code here
+ self.html = None
+
+ #: quick caller for API
+ self.api = self.core.api
+
+ self.init()
+
+ def getChunkCount(self):
+ if self.chunkLimit <= 0:
+ return self.config['download']['chunks']
+ return min(self.config['download']['chunks'], self.chunkLimit)
+
+ def __call__(self):
+ return self.__name__
+
+ def init(self):
+ """initialize the plugin (in addition to `__init__`)"""
+ pass
+
+ def setup(self):
+ """ setup for enviroment and other things, called before downloading (possibly more than one time)"""
+ pass
+
+ def preprocessing(self, thread):
+ """ handles important things to do before starting """
+ self.thread = thread
+
+ if self.account:
+ self.account.checkLogin(self.user)
+ else:
+ self.req.clearCookies()
+
+ self.setup()
+
+ self.pyfile.setStatus("starting")
+
+ return self.process(self.pyfile)
+
+
+ def process(self, pyfile):
+ """the 'main' method of every plugin, you **have to** overwrite it"""
+ raise NotImplementedError
+
+ def resetAccount(self):
+ """ dont use account and retry download """
+ self.account = None
+ self.req = self.core.requestFactory.getRequest(self.__name__)
+ self.retry()
+
+ def checksum(self, local_file=None):
+ """
+ return codes:
+ 0 - checksum ok
+ 1 - checksum wrong
+ 5 - can't get checksum
+ 10 - not implemented
+ 20 - unknown error
+ """
+ #@TODO checksum check hook
+
+ return True, 10
+
+
+ def setWait(self, seconds, reconnect=None):
+ """Set a specific wait time later used with `wait`
+
+ :param seconds: wait time in seconds
+ :param reconnect: True if a reconnect would avoid wait time
+ """
+ if reconnect:
+ self.wantReconnect = True
+ self.pyfile.waitUntil = time() + int(seconds)
+
+ def wait(self, seconds=None, reconnect=None):
+ """ Waits the time previously set or use these from arguments. See `setWait`
+ """
+ if seconds:
+ self.setWait(seconds, reconnect)
+
+ self._wait()
+
+ def _wait(self):
+ self.waiting = True
+ self.pyfile.setStatus("waiting")
+
+ while self.pyfile.waitUntil > time():
+ self.thread.m.reconnecting.wait(2)
+
+ if self.pyfile.abort:
+ raise Abort
+ if self.thread.m.reconnecting.isSet():
+ self.waiting = False
+ self.wantReconnect = False
+ raise Reconnect
+
+ self.waiting = False
+ self.pyfile.setStatus("starting")
+
+ def fail(self, reason):
+ """ fail and give reason """
+ raise Fail(reason)
+
+ def offline(self):
+ """ fail and indicate file is offline """
+ raise Fail("offline")
+
+ def tempOffline(self):
+ """ fail and indicates file ist temporary offline, the core may take consequences """
+ raise Fail("temp. offline")
+
+ def retry(self, max_tries=3, wait_time=1, reason=""):
+ """Retries and begin again from the beginning
+
+ :param max_tries: number of maximum retries
+ :param wait_time: time to wait in seconds
+ :param reason: reason for retrying, will be passed to fail if max_tries reached
+ """
+ if 0 < max_tries <= self.retries:
+ if not reason: reason = "Max retries reached"
+ raise Fail(reason)
+
+ self.wantReconnect = False
+ self.setWait(wait_time)
+ self.wait()
+
+ self.retries += 1
+ raise Retry(reason)
+
+ def invalidCaptcha(self):
+ if self.cTask:
+ self.cTask.invalid()
+
+ def correctCaptcha(self):
+ if self.cTask:
+ self.cTask.correct()
+
+ def decryptCaptcha(self, url, get={}, post={}, cookies=False, forceUser=False, imgtype='jpg',
+ result_type='textual'):
+ """ Loads a captcha and decrypts it with ocr, plugin, user input
+
+ :param url: url of captcha image
+ :param get: get part for request
+ :param post: post part for request
+ :param cookies: True if cookies should be enabled
+ :param forceUser: if True, ocr is not used
+ :param imgtype: Type of the Image
+ :param result_type: 'textual' if text is written on the captcha\
+ or 'positional' for captcha where the user have to click\
+ on a specific region on the captcha
+
+ :return: result of decrypting
+ """
+
+ img = self.load(url, get=get, post=post, cookies=cookies)
+
+ id = ("%.2f" % time())[-6:].replace(".", "")
+ temp_file = open(join("tmp", "tmpCaptcha_%s_%s.%s" % (self.__name__, id, imgtype)), "wb")
+ temp_file.write(img)
+ temp_file.close()
+
+ has_plugin = self.__name__ in self.core.pluginManager.captchaPlugins
+
+ if self.core.captcha:
+ Ocr = self.core.pluginManager.loadClass("captcha", self.__name__)
+ else:
+ Ocr = None
+
+ if Ocr and not forceUser:
+ sleep(randint(3000, 5000) / 1000.0)
+ if self.pyfile.abort: raise Abort
+
+ ocr = Ocr()
+ result = ocr.get_captcha(temp_file.name)
+ else:
+ captchaManager = self.core.captchaManager
+ task = captchaManager.newTask(img, imgtype, temp_file.name, result_type)
+ self.cTask = task
+ captchaManager.handleCaptcha(task)
+
+ while task.isWaiting():
+ if self.pyfile.abort:
+ captchaManager.removeTask(task)
+ raise Abort
+ sleep(1)
+
+ captchaManager.removeTask(task)
+
+ if task.error and has_plugin: #ignore default error message since the user could use OCR
+ self.fail(_("Pil and tesseract not installed and no Client connected for captcha decrypting"))
+ elif task.error:
+ self.fail(task.error)
+ elif not task.result:
+ self.fail(_("No captcha result obtained in appropiate time by any of the plugins."))
+
+ result = task.result
+ self.logDebug("Received captcha result: %s" % str(result))
+
+ if not self.core.debug:
+ try:
+ remove(temp_file.name)
+ except:
+ pass
+
+ return result
+
+
+ def load(self, url, get={}, post={}, ref=True, cookies=True, just_header=False, decode=False):
+ """Load content at url and returns it
+
+ :param url:
+ :param get:
+ :param post:
+ :param ref:
+ :param cookies:
+ :param just_header: if True only the header will be retrieved and returned as dict
+ :param decode: Wether to decode the output according to http header, should be True in most cases
+ :return: Loaded content
+ """
+ if self.pyfile.abort: raise Abort
+ #utf8 vs decode -> please use decode attribute in all future plugins
+ if type(url) == unicode: url = str(url)
+
+ res = self.req.load(url, get, post, ref, cookies, just_header, decode=decode)
+
+ if self.core.debug:
+ from inspect import currentframe
+
+ frame = currentframe()
+ if not exists(join("tmp", self.__name__)):
+ makedirs(join("tmp", self.__name__))
+
+ f = open(
+ join("tmp", self.__name__, "%s_line%s.dump.html" % (frame.f_back.f_code.co_name, frame.f_back.f_lineno))
+ , "wb")
+ del frame # delete the frame or it wont be cleaned
+
+ try:
+ tmp = res.encode("utf8")
+ except:
+ tmp = res
+
+ f.write(tmp)
+ f.close()
+
+ if just_header:
+ #parse header
+ header = {"code": self.req.code}
+ for line in res.splitlines():
+ line = line.strip()
+ if not line or ":" not in line: continue
+
+ key, none, value = line.partition(":")
+ key = key.lower().strip()
+ value = value.strip()
+
+ if key in header:
+ if type(header[key]) == list:
+ header[key].append(value)
+ else:
+ header[key] = [header[key], value]
+ else:
+ header[key] = value
+ res = header
+
+ return res
+
+ def download(self, url, get={}, post={}, ref=True, cookies=True, disposition=False):
+ """Downloads the content at url to download folder
+
+ :param url:
+ :param get:
+ :param post:
+ :param ref:
+ :param cookies:
+ :param disposition: if True and server provides content-disposition header\
+ the filename will be changed if needed
+ :return: The location where the file was saved
+ """
+
+ self.checkForSameFiles()
+
+ self.pyfile.setStatus("downloading")
+
+ download_folder = self.config['general']['download_folder']
+
+ location = safe_join(download_folder, self.pyfile.package().folder)
+
+ if not exists(location):
+ makedirs(location, int(self.config['permission']['folder'], 8))
+
+ if self.config['permission']['change_dl'] and os.name != "nt":
+ try:
+ uid = getpwnam(self.config['permission']['user'])[2]
+ gid = getgrnam(self.config['permission']['group'])[2]
+
+ chown(location, uid, gid)
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed: %s") % str(e))
+
+ # convert back to unicode
+ location = fs_decode(location)
+ name = safe_filename(self.pyfile.name)
+
+ filename = join(location, name)
+
+ self.core.hookManager.dispatchEvent("downloadStarts", self.pyfile, url, filename)
+
+ try:
+ newname = self.req.httpDownload(url, filename, get=get, post=post, ref=ref, cookies=cookies,
+ chunks=self.getChunkCount(), resume=self.resumeDownload,
+ progressNotify=self.pyfile.setProgress, disposition=disposition)
+ finally:
+ self.pyfile.size = self.req.size
+
+ if disposition and newname and newname != name: #triple check, just to be sure
+ self.logInfo("%(name)s saved as %(newname)s" % {"name": name, "newname": newname})
+ self.pyfile.name = newname
+ filename = join(location, newname)
+
+ fs_filename = fs_encode(filename)
+
+ if self.config['permission']['change_file']:
+ chmod(fs_filename, int(self.config['permission']['file'], 8))
+
+ if self.config['permission']['change_dl'] and os.name != "nt":
+ try:
+ uid = getpwnam(self.config['permission']['user'])[2]
+ gid = getgrnam(self.config['permission']['group'])[2]
+
+ chown(fs_filename, uid, gid)
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed: %s") % str(e))
+
+ self.lastDownload = filename
+ return self.lastDownload
+
+ def checkDownload(self, rules, api_size=0, max_size=50000, delete=True, read_size=0):
+ """ checks the content of the last downloaded file, re match is saved to `lastCheck`
+
+ :param rules: dict with names and rules to match (compiled regexp or strings)
+ :param api_size: expected file size
+ :param max_size: if the file is larger then it wont be checked
+ :param delete: delete if matched
+ :param read_size: amount of bytes to read from files larger then max_size
+ :return: dictionary key of the first rule that matched
+ """
+ lastDownload = fs_encode(self.lastDownload)
+ if not exists(lastDownload): return None
+
+ size = stat(lastDownload)
+ size = size.st_size
+
+ if api_size and api_size <= size: return None
+ elif size > max_size and not read_size: return None
+ self.logDebug("Download Check triggered")
+ f = open(lastDownload, "rb")
+ content = f.read(read_size if read_size else -1)
+ f.close()
+ #produces encoding errors, better log to other file in the future?
+ #self.logDebug("Content: %s" % content)
+ for name, rule in rules.iteritems():
+ if type(rule) in (str, unicode):
+ if rule in content:
+ if delete:
+ remove(lastDownload)
+ return name
+ elif hasattr(rule, "search"):
+ m = rule.search(content)
+ if m:
+ if delete:
+ remove(lastDownload)
+ self.lastCheck = m
+ return name
+
+
+ def getPassword(self):
+ """ get the password the user provided in the package"""
+ password = self.pyfile.package().password
+ if not password: return ""
+ return password
+
+
+ def checkForSameFiles(self, starting=False):
+ """ checks if same file was/is downloaded within same package
+
+ :param starting: indicates that the current download is going to start
+ :raises SkipDownload:
+ """
+
+ pack = self.pyfile.package()
+
+ for pyfile in self.core.files.cache.values():
+ if pyfile != self.pyfile and pyfile.name == self.pyfile.name and pyfile.package().folder == pack.folder:
+ if pyfile.status in (0, 12): #finished or downloading
+ raise SkipDownload(pyfile.pluginname)
+ elif pyfile.status in (
+ 5, 7) and starting: #a download is waiting/starting and was appenrently started before
+ raise SkipDownload(pyfile.pluginname)
+
+ download_folder = self.config['general']['download_folder']
+ location = safe_join(download_folder, pack.folder, self.pyfile.name)
+
+ if starting and self.config['download']['skip_existing'] and exists(location):
+ size = os.stat(location).st_size
+ if size >= self.pyfile.size:
+ raise SkipDownload("File exists.")
+
+ pyfile = self.core.db.findDuplicates(self.pyfile.id, self.pyfile.package().folder, self.pyfile.name)
+ if pyfile:
+ if exists(location):
+ raise SkipDownload(pyfile[0])
+
+ self.logDebug("File %s not skipped, because it does not exists." % self.pyfile.name)
+
+ def clean(self):
+ """ clean everything and remove references """
+ if hasattr(self, "pyfile"):
+ del self.pyfile
+ if hasattr(self, "req"):
+ self.req.close()
+ del self.req
+ if hasattr(self, "thread"):
+ del self.thread
+ if hasattr(self, "html"):
+ del self.html
diff --git a/pyload/plugins/PluginManager.py b/pyload/plugins/PluginManager.py
new file mode 100644
index 000000000..1edc0f819
--- /dev/null
+++ b/pyload/plugins/PluginManager.py
@@ -0,0 +1,356 @@
+# -*- coding: utf-8 -*-
+
+import re
+import sys
+
+from itertools import chain
+from os import listdir, makedirs
+from os.path import isfile, join, exists, abspath
+from sys import version_info
+from traceback import print_exc
+
+from pyload.lib.SafeEval import const_eval as literal_eval
+
+from pyload.ConfigParser import IGNORE
+
+
+class PluginManager:
+ ROOT = "pyload.plugins."
+ USERROOT = "userplugins."
+ TYPES = ("accounts", "container", "crypter", "hooks", "hoster", "internal", "ocr")
+
+ PATTERN = re.compile(r'__pattern__.*=.*r("|\')([^"\']+)')
+ VERSION = re.compile(r'__version__.*=.*("|\')([0-9.]+)')
+ CONFIG = re.compile(r'__config__.*=.*\[([^\]]+)', re.MULTILINE)
+ DESC = re.compile(r'__description__.?=.?("|"""|\')([^"\']+)')
+
+
+ def __init__(self, core):
+ self.core = core
+
+ self.config = core.config
+ self.log = core.log
+
+ self.plugins = {}
+ self.createIndex()
+
+ #register for import hook
+ sys.meta_path.append(self)
+
+
+ def createIndex(self):
+ """create information for all plugins available"""
+
+ sys.path.append(abspath(""))
+
+ if not exists("userplugins"):
+ makedirs("userplugins")
+ if not exists(join("userplugins", "__init__.py")):
+ f = open(join("userplugins", "__init__.py"), "wb")
+ f.close()
+
+ self.plugins['crypter'] = self.crypterPlugins = self.parse("crypter", pattern=True)
+ self.plugins['container'] = self.containerPlugins = self.parse("container", pattern=True)
+ self.plugins['hoster'] = self.hosterPlugins = self.parse("hoster", pattern=True)
+
+ self.plugins['ocr'] = self.captchaPlugins = self.parse("ocr")
+ self.plugins['accounts'] = self.accountPlugins = self.parse("accounts")
+ self.plugins['hooks'] = self.hookPlugins = self.parse("hooks")
+ self.plugins['internal'] = self.internalPlugins = self.parse("internal")
+
+ self.log.debug("created index of plugins")
+
+ def parse(self, folder, pattern=False, home={}):
+ """
+ returns dict with information
+ home contains parsed plugins from pyload.
+
+ {
+ name : {path, version, config, (pattern, re), (plugin, class)}
+ }
+
+ """
+ plugins = {}
+ if home:
+ pfolder = join("userplugins", folder)
+ if not exists(pfolder):
+ makedirs(pfolder)
+ if not exists(join(pfolder, "__init__.py")):
+ f = open(join(pfolder, "__init__.py"), "wb")
+ f.close()
+
+ else:
+ pfolder = join(pypath, "pyload", "plugins", folder)
+
+ for f in listdir(pfolder):
+ if (isfile(join(pfolder, f)) and f.endswith(".py") or f.endswith("_25.pyc") or f.endswith(
+ "_26.pyc") or f.endswith("_27.pyc")) and not f.startswith("_"):
+ data = open(join(pfolder, f))
+ content = data.read()
+ data.close()
+
+ if f.endswith("_25.pyc") and version_info[0:2] != (2, 5):
+ continue
+ elif f.endswith("_26.pyc") and version_info[0:2] != (2, 6):
+ continue
+ elif f.endswith("_27.pyc") and version_info[0:2] != (2, 7):
+ continue
+
+ name = f[:-3]
+ if name[-1] == ".": name = name[:-4]
+
+ version = self.VERSION.findall(content)
+ if version:
+ version = float(version[0][1])
+ else:
+ version = 0
+
+ # home contains plugins from pyload root
+ if home and name in home:
+ if home[name]['v'] >= version:
+ continue
+
+ if name in IGNORE or (folder, name) in IGNORE:
+ continue
+
+ plugins[name] = {}
+ plugins[name]['v'] = version
+
+ module = f.replace(".pyc", "").replace(".py", "")
+
+ # the plugin is loaded from user directory
+ plugins[name]['user'] = True if home else False
+ plugins[name]['name'] = module
+
+ if pattern:
+ pattern = self.PATTERN.findall(content)
+
+ if pattern:
+ pattern = pattern[0][1]
+ else:
+ pattern = "^unmachtable$"
+
+ plugins[name]['pattern'] = pattern
+
+ try:
+ plugins[name]['re'] = re.compile(pattern)
+ except:
+ self.log.error(_("%s has a invalid pattern.") % name)
+
+
+ # internals have no config
+ if folder == "internal":
+ self.config.deleteConfig(name)
+ continue
+
+ config = self.CONFIG.findall(content)
+ if config:
+ config = literal_eval(config[0].strip().replace("\n", "").replace("\r", ""))
+ desc = self.DESC.findall(content)
+ desc = desc[0][1] if desc else ""
+
+ if type(config[0]) == tuple:
+ config = [list(x) for x in config]
+ else:
+ config = [list(config)]
+
+ if folder == "hooks":
+ append = True
+ for item in config:
+ if item[0] == "activated": append = False
+
+ # activated flag missing
+ if append: config.append(["activated", "bool", "Activated", False])
+
+ try:
+ self.config.addPluginConfig(name, config, desc)
+ except:
+ self.log.error("Invalid config in %s: %s" % (name, config))
+
+ elif folder == "hooks": #force config creation
+ desc = self.DESC.findall(content)
+ desc = desc[0][1] if desc else ""
+ config = (["activated", "bool", "Activated", False],)
+
+ try:
+ self.config.addPluginConfig(name, config, desc)
+ except:
+ self.log.error("Invalid config in %s: %s" % (name, config))
+
+ if not home:
+ temp = self.parse(folder, pattern, plugins)
+ plugins.update(temp)
+
+ return plugins
+
+
+ def parseUrls(self, urls):
+ """parse plugins for given list of urls"""
+
+ last = None
+ res = [] # tupels of (url, plugin)
+
+ for url in urls:
+ if type(url) not in (str, unicode, buffer): continue
+ found = False
+
+ if last and last[1]['re'].match(url):
+ res.append((url, last[0]))
+ continue
+
+ for name, value in chain(self.crypterPlugins.iteritems(), self.hosterPlugins.iteritems(),
+ self.containerPlugins.iteritems()):
+ if value['re'].match(url):
+ res.append((url, name))
+ last = (name, value)
+ found = True
+ break
+
+ if not found:
+ res.append((url, "BasePlugin"))
+
+ return res
+
+ def findPlugin(self, name, pluginlist=("hoster", "crypter", "container")):
+ for ptype in pluginlist:
+ if name in self.plugins[ptype]:
+ return self.plugins[ptype][name], ptype
+ return None, None
+
+ def getPlugin(self, name, original=False):
+ """return plugin module from hoster|decrypter|container"""
+ plugin, type = self.findPlugin(name)
+
+ if not plugin:
+ self.log.warning("Plugin %s not found." % name)
+ plugin = self.hosterPlugins['BasePlugin']
+
+ if "new_module" in plugin and not original:
+ return plugin['new_module']
+
+ return self.loadModule(type, name)
+
+ def getPluginName(self, name):
+ """ used to obtain new name if other plugin was injected"""
+ plugin, type = self.findPlugin(name)
+
+ if "new_name" in plugin:
+ return plugin['new_name']
+
+ return name
+
+ def loadModule(self, type, name):
+ """ Returns loaded module for plugin
+
+ :param type: plugin type, subfolder of pyload.plugins
+ :param name:
+ """
+ plugins = self.plugins[type]
+ if name in plugins:
+ if "module" in plugins[name]: return plugins[name]['module']
+ try:
+ module = __import__(self.ROOT + "%s.%s" % (type, plugins[name]['name']), globals(), locals(),
+ plugins[name]['name'])
+ plugins[name]['module'] = module #cache import, maybe unneeded
+ return module
+ except Exception, e:
+ self.log.error(_("Error importing %(name)s: %(msg)s") % {"name": name, "msg": str(e)})
+ if self.core.debug:
+ print_exc()
+
+ def loadClass(self, type, name):
+ """Returns the class of a plugin with the same name"""
+ module = self.loadModule(type, name)
+ if module: return getattr(module, name)
+
+ def getAccountPlugins(self):
+ """return list of account plugin names"""
+ return self.accountPlugins.keys()
+
+ def find_module(self, fullname, path=None):
+ #redirecting imports if necesarry
+ if fullname.startswith(self.ROOT) or fullname.startswith(self.USERROOT): #seperate pyload plugins
+ if fullname.startswith(self.USERROOT): user = 1
+ else: user = 0 #used as bool and int
+
+ split = fullname.split(".")
+ if len(split) != 4 - user: return
+ type, name = split[2 - user:4 - user]
+
+ if type in self.plugins and name in self.plugins[type]:
+ #userplugin is a newer version
+ if not user and self.plugins[type][name]['user']:
+ return self
+ #imported from userdir, but pyloads is newer
+ if user and not self.plugins[type][name]['user']:
+ return self
+
+
+ def load_module(self, name, replace=True):
+ if name not in sys.modules: #could be already in modules
+ if replace:
+ if self.ROOT in name:
+ newname = name.replace(self.ROOT, self.USERROOT)
+ else:
+ newname = name.replace(self.USERROOT, self.ROOT)
+ else: newname = name
+
+ base, plugin = newname.rsplit(".", 1)
+
+ self.log.debug("Redirected import %s -> %s" % (name, newname))
+
+ module = __import__(newname, globals(), locals(), [plugin])
+ #inject under new an old name
+ sys.modules[name] = module
+ sys.modules[newname] = module
+
+ return sys.modules[name]
+
+
+ def reloadPlugins(self, type_plugins):
+ """ reload and reindex plugins """
+ if not type_plugins:
+ return None
+
+ self.log.debug("Request reload of plugins: %s" % type_plugins)
+
+ reloaded = []
+
+ as_dict = {}
+ for t,n in type_plugins:
+ if t in ("hooks", "internal"): #: do not reload hooks or internals, because would cause to much side effects
+ continue
+ elif t in as_dict:
+ as_dict[t].append(n)
+ else:
+ as_dict[t] = [n]
+
+ for type in as_dict.iterkeys():
+ for plugin in as_dict[type]:
+ if plugin in self.plugins[type] and "module" in self.plugins[type][plugin]:
+ self.log.debug("Reloading %s" % plugin)
+ id = (type, plugin)
+ try:
+ reload(self.plugins[type][plugin]['module'])
+ except Exception, e:
+ self.log.error("Error when reloading %s" % id, str(e))
+ continue
+ else:
+ reloaded.append(id)
+
+ #index creation
+ self.plugins['crypter'] = self.crypterPlugins = self.parse("crypter", pattern=True)
+ self.plugins['container'] = self.containerPlugins = self.parse("container", pattern=True)
+ self.plugins['hoster'] = self.hosterPlugins = self.parse("hoster", pattern=True)
+ self.plugins['ocr'] = self.captchaPlugins = self.parse("ocr")
+ self.plugins['accounts'] = self.accountPlugins = self.parse("accounts")
+
+ if "accounts" in as_dict: #: accounts needs to be reloaded
+ self.core.accountManager.initPlugins()
+ self.core.scheduler.addJob(0, self.core.accountManager.getAccountInfos)
+
+ return reloaded #: return a list of the plugins successfully reloaded
+
+ def reloadPlugin(self, type_plugin):
+ """ reload and reindex ONE plugin """
+ return True if self.reloadPlugins(type_plugin) else False
diff --git a/pyload/plugins/README.md b/pyload/plugins/README.md
new file mode 100644
index 000000000..fa2a4c5b2
--- /dev/null
+++ b/pyload/plugins/README.md
@@ -0,0 +1,16 @@
+Licensing
+---------
+
+According to the terms of the GNU General Public License,
+pyload's plugins must be treated as an extension of the main program.
+This means the plugins must be released under the GPL or a GPL-compatible
+free software license, and that the terms of the GPL must be followed when
+those plugins are distributed.
+
+ * Any plugin published **without a license notice** is intend published under the **GNU GPLv3**.
+ * A different license can be used but it **must be GPL-compatible** and the license notice must be put in the plugin
+ file.
+ * Any plugin published **with a GPL incompatible license** will be rejected.
+ This includes *copyright all right reserved*.
+ * Is recommended to put the license notice at the top of the plugin file.
+ * Is recommended to **not** put the license notice when plugin is published under the GNU GPLv3.
diff --git a/pyload/plugins/__init__.py b/pyload/plugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/__init__.py
diff --git a/pyload/plugins/accounts/AlldebridCom.py b/pyload/plugins/accounts/AlldebridCom.py
new file mode 100644
index 000000000..928e81fe5
--- /dev/null
+++ b/pyload/plugins/accounts/AlldebridCom.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+import re
+import xml.dom.minidom as dom
+
+from time import time
+from urllib import urlencode
+
+from BeautifulSoup import BeautifulSoup
+
+from pyload.plugins.Account import Account
+
+
+class AlldebridCom(Account):
+ __name__ = "AlldebridCom"
+ __type__ = "account"
+ __version__ = "0.22"
+
+ __description__ = """AllDebrid.com account plugin"""
+ __author_name__ = "Andy Voigt"
+ __author_mail__ = "spamsales@online.de"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://www.alldebrid.com/account/")
+ soup = BeautifulSoup(page)
+ #Try to parse expiration date directly from the control panel page (better accuracy)
+ try:
+ time_text = soup.find('div', attrs={'class': 'remaining_time_text'}).strong.string
+ self.logDebug("Account expires in: %s" % time_text)
+ p = re.compile('\d+')
+ exp_data = p.findall(time_text)
+ exp_time = time() + int(exp_data[0]) * 24 * 60 * 60 + int(
+ exp_data[1]) * 60 * 60 + (int(exp_data[2]) - 1) * 60
+ #Get expiration date from API
+ except:
+ data = self.getAccountData(user)
+ page = req.load("http://www.alldebrid.com/api.php?action=info_user&login=%s&pw=%s" % (user,
+ data['password']))
+ self.logDebug(page)
+ xml = dom.parseString(page)
+ exp_time = time() + int(xml.getElementsByTagName("date")[0].childNodes[0].nodeValue) * 24 * 60 * 60
+ account_info = {"validuntil": exp_time, "trafficleft": -1}
+ return account_info
+
+ def login(self, user, data, req):
+ urlparams = urlencode({'action': 'login', 'login_login': user, 'login_password': data['password']})
+ page = req.load("http://www.alldebrid.com/register/?%s" % urlparams)
+
+ if "This login doesn't exist" in page:
+ self.wrongPassword()
+
+ if "The password is not valid" in page:
+ self.wrongPassword()
+
+ if "Invalid captcha" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/BayfilesCom.py b/pyload/plugins/accounts/BayfilesCom.py
new file mode 100644
index 000000000..a42ac6457
--- /dev/null
+++ b/pyload/plugins/accounts/BayfilesCom.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class BayfilesCom(Account):
+ __name__ = "BayfilesCom"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Bayfiles.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ for _ in xrange(2):
+ response = json_loads(req.load("http://api.bayfiles.com/v1/account/info"))
+ self.logDebug(response)
+ if not response['error']:
+ break
+ self.logWarning(response['error'])
+ self.relogin(user)
+
+ return {"premium": bool(response['premium']), "trafficleft": -1,
+ "validuntil": response['expires'] if response['expires'] >= int(time()) else -1}
+
+ def login(self, user, data, req):
+ response = json_loads(req.load("http://api.bayfiles.com/v1/account/login/%s/%s" % (user, data['password'])))
+ self.logDebug(response)
+ if response['error']:
+ self.logError(response['error'])
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/BitshareCom.py b/pyload/plugins/accounts/BitshareCom.py
new file mode 100644
index 000000000..7a982aea5
--- /dev/null
+++ b/pyload/plugins/accounts/BitshareCom.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class BitshareCom(Account):
+ __name__ = "BitshareCom"
+ __type__ = "account"
+ __version__ = "0.12"
+
+ __description__ = """Bitshare account plugin"""
+ __author_name__ = "Paul King"
+ __author_mail__ = None
+
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://bitshare.com/mysettings.html")
+
+ if "\"http://bitshare.com/myupgrade.html\">Free" in page:
+ return {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ if not '<input type="checkbox" name="directdownload" checked="checked" />' in page:
+ self.logWarning(_("Activate direct Download in your Bitshare Account"))
+
+ return {"validuntil": -1, "trafficleft": -1, "premium": True}
+
+ def login(self, user, data, req):
+ page = req.load("http://bitshare.com/login.html",
+ post={"user": user, "password": data['password'], "submit": "Login"}, cookies=True)
+ if "login" in req.lastEffectiveURL:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/CramitIn.py b/pyload/plugins/accounts/CramitIn.py
new file mode 100644
index 000000000..5bf7a3141
--- /dev/null
+++ b/pyload/plugins/accounts/CramitIn.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class CramitIn(XFSPAccount):
+ __name__ = "CramitIn"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Cramit.in account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ MAIN_PAGE = "http://cramit.in/"
diff --git a/pyload/plugins/accounts/CyberlockerCh.py b/pyload/plugins/accounts/CyberlockerCh.py
new file mode 100644
index 000000000..94cc0d8c4
--- /dev/null
+++ b/pyload/plugins/accounts/CyberlockerCh.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+from pyload.plugins.internal.SimpleHoster import parseHtmlForm
+
+
+class CyberlockerCh(XFSPAccount):
+ __name__ = "CyberlockerCh"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Cyberlocker.ch account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ MAIN_PAGE = "http://cyberlocker.ch/"
+
+
+ def login(self, user, data, req):
+ html = req.load(self.MAIN_PAGE + 'login.html', decode=True)
+
+ action, inputs = parseHtmlForm('name="FL"', html)
+ if not inputs:
+ inputs = {"op": "login",
+ "redirect": self.MAIN_PAGE}
+
+ inputs.update({"login": user,
+ "password": data['password']})
+
+ # Without this a 403 Forbidden is returned
+ req.http.lastURL = self.MAIN_PAGE + 'login.html'
+ html = req.load(self.MAIN_PAGE, post=inputs, decode=True)
+
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/CzshareCom.py b/pyload/plugins/accounts/CzshareCom.py
new file mode 100644
index 000000000..584b9a3a2
--- /dev/null
+++ b/pyload/plugins/accounts/CzshareCom.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+import re
+
+from pyload.plugins.Account import Account
+
+
+class CzshareCom(Account):
+ __name__ = "CzshareCom"
+ __type__ = "account"
+ __version__ = "0.14"
+
+ __description__ = """Czshare.com account plugin, now Sdilej.cz"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ CREDIT_LEFT_PATTERN = r'<tr class="active">\s*<td>([0-9 ,]+) (KiB|MiB|GiB)</td>\s*<td>([^<]*)</td>\s*</tr>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://sdilej.cz/prehled_kreditu/")
+
+ m = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if m is None:
+ return {"validuntil": 0, "trafficleft": 0}
+ else:
+ credits = float(m.group(1).replace(' ', '').replace(',', '.'))
+ credits = credits * 1024 ** {'KiB': 0, 'MiB': 1, 'GiB': 2}[m.group(2)]
+ validuntil = mktime(strptime(m.group(3), '%d.%m.%y %H:%M'))
+ return {"validuntil": validuntil, "trafficleft": credits}
+
+ def login(self, user, data, req):
+ html = req.load('https://sdilej.cz/index.php', post={
+ "Prihlasit": "Prihlasit",
+ "login-password": data['password'],
+ "login-name": user
+ })
+
+ if '<div class="login' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/DebridItaliaCom.py b/pyload/plugins/accounts/DebridItaliaCom.py
new file mode 100644
index 000000000..cff0be018
--- /dev/null
+++ b/pyload/plugins/accounts/DebridItaliaCom.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.Account import Account
+
+
+class DebridItaliaCom(Account):
+ __name__ = "DebridItaliaCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Debriditalia.com account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ WALID_UNTIL_PATTERN = r"Premium valid till: (?P<D>[^|]+) \|"
+
+
+ def loadAccountInfo(self, user, req):
+ if 'Account premium not activated' in self.html:
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ m = re.search(self.WALID_UNTIL_PATTERN, self.html)
+ if m:
+ validuntil = int(time.mktime(time.strptime(m.group('D'), "%d/%m/%Y %H:%M")))
+ return {"premium": True, "validuntil": validuntil, "trafficleft": -1}
+ else:
+ self.logError('Unable to retrieve account information - Plugin may be out of date')
+
+ def login(self, user, data, req):
+ self.html = req.load("http://debriditalia.com/login.php",
+ get={"u": user, "p": data['password']})
+ if 'NO' in self.html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/DepositfilesCom.py b/pyload/plugins/accounts/DepositfilesCom.py
new file mode 100644
index 000000000..a17493cc1
--- /dev/null
+++ b/pyload/plugins/accounts/DepositfilesCom.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import strptime, mktime
+
+from pyload.plugins.Account import Account
+
+
+class DepositfilesCom(Account):
+ __name__ = "DepositfilesCom"
+ __type__ = "account"
+ __version__ = "0.3"
+
+ __description__ = """Depositfiles.com account plugin"""
+ __author_name__ = ("mkaay", "stickell", "Walter Purcaro")
+ __author_mail__ = ("mkaay@mkaay.de", "l.stickell@yahoo.it", "vuolter@gmail.com")
+
+
+ def loadAccountInfo(self, user, req):
+ src = req.load("https://dfiles.eu/de/gold/")
+ validuntil = re.search(r"Sie haben Gold Zugang bis: <b>(.*?)</b></div>", src).group(1)
+
+ validuntil = int(mktime(strptime(validuntil, "%Y-%m-%d %H:%M:%S")))
+
+ return {"validuntil": validuntil, "trafficleft": -1}
+
+ def login(self, user, data, req):
+ src = req.load("https://dfiles.eu/de/login.php", get={"return": "/de/gold/payment.php"},
+ post={"login": user, "password": data['password']})
+ if r'<div class="error_message">Sie haben eine falsche Benutzername-Passwort-Kombination verwendet.</div>' in src:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/EasybytezCom.py b/pyload/plugins/accounts/EasybytezCom.py
new file mode 100644
index 000000000..595e95ec4
--- /dev/null
+++ b/pyload/plugins/accounts/EasybytezCom.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime, gmtime
+
+from pyload.plugins.Account import Account
+from pyload.plugins.internal.SimpleHoster import parseHtmlForm
+from pyload.utils import parseFileSize
+
+
+class EasybytezCom(Account):
+ __name__ = "EasybytezCom"
+ __type__ = "account"
+ __version__ = "0.04"
+
+ __description__ = """EasyBytez.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ VALID_UNTIL_PATTERN = r'Premium account expire:</TD><TD><b>([^<]+)</b>'
+ TRAFFIC_LEFT_PATTERN = r'<TR><TD>Traffic available today:</TD><TD><b>(?P<S>[^<]+)</b>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.easybytez.com/?op=my_account", decode=True)
+
+ validuntil = trafficleft = None
+ premium = False
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ try:
+ self.logDebug("Expire date: " + m.group(1))
+ validuntil = mktime(strptime(m.group(1), "%d %B %Y"))
+ except Exception, e:
+ self.logError(e)
+ if validuntil > mktime(gmtime()):
+ premium = True
+ trafficleft = -1
+ else:
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ trafficleft = m.group(1)
+ if "Unlimited" in trafficleft:
+ trafficleft = -1
+ else:
+ trafficleft = parseFileSize(trafficleft) / 1024
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://www.easybytez.com/login.html', decode=True)
+ action, inputs = parseHtmlForm('name="FL"', html)
+ inputs.update({"login": user,
+ "password": data['password'],
+ "redirect": "http://www.easybytez.com/"})
+
+ html = req.load(action, post=inputs, decode=True)
+
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/EgoFilesCom.py b/pyload/plugins/accounts/EgoFilesCom.py
new file mode 100644
index 000000000..3886d053a
--- /dev/null
+++ b/pyload/plugins/accounts/EgoFilesCom.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.Account import Account
+from pyload.utils import parseFileSize
+
+
+class EgoFilesCom(Account):
+ __name__ = "EgoFilesCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Egofiles.com account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ PREMIUM_ACCOUNT_PATTERN = '<br/>\s*Premium: (?P<P>[^/]*) / Traffic left: (?P<T>[\d.]*) (?P<U>\w*)\s*\\n\s*<br/>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://egofiles.com")
+ if 'You are logged as a Free User' in html:
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ m = re.search(self.PREMIUM_ACCOUNT_PATTERN, html)
+ if m:
+ validuntil = int(time.mktime(time.strptime(m.group('P'), "%Y-%m-%d %H:%M:%S")))
+ trafficleft = parseFileSize(m.group('T'), m.group('U')) / 1024
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ else:
+ self.logError('Unable to retrieve account information - Plugin may be out of date')
+
+ def login(self, user, data, req):
+ # Set English language
+ req.load("https://egofiles.com/ajax/lang.php?lang=en", just_header=True)
+
+ html = req.load("http://egofiles.com/ajax/register.php",
+ post={"log": 1,
+ "loginV": user,
+ "passV": data['password']})
+ if 'Login successful' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/EuroshareEu.py b/pyload/plugins/accounts/EuroshareEu.py
new file mode 100644
index 000000000..d74d4526b
--- /dev/null
+++ b/pyload/plugins/accounts/EuroshareEu.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+import re
+
+from pyload.plugins.Account import Account
+
+
+class EuroshareEu(Account):
+ __name__ = "EuroshareEu"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Euroshare.eu account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ self.relogin(user)
+ html = req.load("http://euroshare.eu/customer-zone/settings/")
+
+ m = re.search('id="input_expire_date" value="(\d+\.\d+\.\d+ \d+:\d+)"', html)
+ if m is None:
+ premium, validuntil = False, -1
+ else:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), "%d.%m.%Y %H:%M"))
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+ def login(self, user, data, req):
+
+ html = req.load('http://euroshare.eu/customer-zone/login/', post={
+ "trvale": "1",
+ "login": user,
+ "password": data['password']
+ }, decode=True)
+
+ if u">Nesprávne prihlasovacie meno alebo heslo" in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FastixRu.py b/pyload/plugins/accounts/FastixRu.py
new file mode 100644
index 000000000..a7b939a2c
--- /dev/null
+++ b/pyload/plugins/accounts/FastixRu.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class FastixRu(Account):
+ __name__ = "FastixRu"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Fastix account plugin"""
+ __author_name__ = "Massimo Rosamilia"
+ __author_mail__ = "max@spiritix.eu"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://fastix.ru/api_v2/?apikey=%s&sub=getaccountdetails" % (data['api']))
+ page = json_loads(page)
+ points = page['points']
+ kb = float(points)
+ kb = kb * 1024 ** 2 / 1000
+ if points > 0:
+ account_info = {"validuntil": -1, "trafficleft": kb}
+ else:
+ account_info = {"validuntil": None, "trafficleft": None, "premium": False}
+ return account_info
+
+ def login(self, user, data, req):
+ page = req.load("http://fastix.ru/api_v2/?sub=get_apikey&email=%s&password=%s" % (user, data['password']))
+ api = json_loads(page)
+ api = api['apikey']
+ data['api'] = api
+ if "error_code" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FastshareCz.py b/pyload/plugins/accounts/FastshareCz.py
new file mode 100644
index 000000000..6e86f60fa
--- /dev/null
+++ b/pyload/plugins/accounts/FastshareCz.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Account import Account
+from pyload.utils import parseFileSize
+
+
+class FastshareCz(Account):
+ __name__ = "FastshareCz"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Fastshare.cz account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ CREDIT_PATTERN = r'(?:Kredit|Credit)\s*</td>\s*<td[^>]*>([\d. \w]+)&nbsp;'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.fastshare.cz/user", decode=True)
+
+ m = re.search(self.CREDIT_PATTERN, html)
+ if m:
+ trafficleft = parseFileSize(m.group(1)) / 1024
+ premium = True if trafficleft else False
+ else:
+ trafficleft = None
+ premium = False
+
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ req.load('http://www.fastshare.cz/login') # Do not remove or it will not login
+ html = req.load('http://www.fastshare.cz/sql.php', post={
+ "heslo": data['password'],
+ "login": user
+ }, decode=True)
+
+ if u'>Špatné uşivatelské jméno nebo heslo.<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/File4safeCom.py b/pyload/plugins/accounts/File4safeCom.py
new file mode 100644
index 000000000..4da721193
--- /dev/null
+++ b/pyload/plugins/accounts/File4safeCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class File4safeCom(XFSPAccount):
+ __name__ = "File4safeCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """File4safe.com account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ MAIN_PAGE = "http://file4safe.com/"
+
+ LOGIN_FAIL_PATTERN = r'input_login'
+ PREMIUM_PATTERN = r'Extend Premium'
diff --git a/pyload/plugins/accounts/FilecloudIo.py b/pyload/plugins/accounts/FilecloudIo.py
new file mode 100644
index 000000000..5e6b001e8
--- /dev/null
+++ b/pyload/plugins/accounts/FilecloudIo.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class FilecloudIo(Account):
+ __name__ = "FilecloudIo"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """FilecloudIo account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+
+ def loadAccountInfo(self, user, req):
+ # It looks like the first API request always fails, so we retry 5 times, it should work on the second try
+ for _ in xrange(5):
+ rep = req.load("https://secure.filecloud.io/api-fetch_apikey.api",
+ post={"username": user, "password": self.accounts[user]['password']})
+ rep = json_loads(rep)
+ if rep['status'] == 'ok':
+ break
+ elif rep['status'] == 'error' and rep['message'] == 'no such user or wrong password':
+ self.logError("Wrong username or password")
+ return {"valid": False, "premium": False}
+ else:
+ return {"premium": False}
+
+ akey = rep['akey']
+ self.accounts[user]['akey'] = akey # Saved for hoster plugin
+ rep = req.load("http://api.filecloud.io/api-fetch_account_details.api",
+ post={"akey": akey})
+ rep = json_loads(rep)
+
+ if rep['is_premium'] == 1:
+ return {"validuntil": int(rep['premium_until']), "trafficleft": -1}
+ else:
+ return {"premium": False}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("secure.filecloud.io", "lang", "en")
+ html = req.load('https://secure.filecloud.io/user-login.html')
+
+ if not hasattr(self, "form_data"):
+ self.form_data = {}
+
+ self.form_data['username'] = user
+ self.form_data['password'] = data['password']
+
+ html = req.load('https://secure.filecloud.io/user-login_p.html',
+ post=self.form_data,
+ multipart=True)
+
+ self.logged_in = True if "you have successfully logged in - filecloud.io" in html else False
+ self.form_data = {}
diff --git a/pyload/plugins/accounts/FilefactoryCom.py b/pyload/plugins/accounts/FilefactoryCom.py
new file mode 100644
index 000000000..1e2115ac3
--- /dev/null
+++ b/pyload/plugins/accounts/FilefactoryCom.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pycurl import REFERER
+
+from pyload.plugins.Account import Account
+
+
+class FilefactoryCom(Account):
+ __name__ = "FilefactoryCom"
+ __type__ = "account"
+ __version__ = "0.14"
+
+ __description__ = """Filefactory.com account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ VALID_UNTIL_PATTERN = r'Premium valid until: <strong>(?P<d>\d{1,2})\w{1,2} (?P<m>\w{3}), (?P<y>\d{4})</strong>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.filefactory.com/account/")
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ premium = True
+ validuntil = re.sub(self.VALID_UNTIL_PATTERN, '\g<d> \g<m> \g<y>', m.group(0))
+ validuntil = mktime(strptime(validuntil, "%d %b %Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ req.http.c.setopt(REFERER, "http://www.filefactory.com/member/login.php")
+
+ html = req.load("http://www.filefactory.com/member/signin.php", post={
+ "loginEmail": user,
+ "loginPassword": data['password'],
+ "Submit": "Sign In"})
+
+ if req.lastEffectiveURL != "http://www.filefactory.com/account/":
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FilejungleCom.py b/pyload/plugins/accounts/FilejungleCom.py
new file mode 100644
index 000000000..ab52ffc04
--- /dev/null
+++ b/pyload/plugins/accounts/FilejungleCom.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+
+
+class FilejungleCom(Account):
+ __name__ = "FilejungleCom"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """Filejungle.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ login_timeout = 60
+
+ URL = "http://filejungle.com/"
+ TRAFFIC_LEFT_PATTERN = r'"/extend_premium\.php">Until (\d+ [A-Za-z]+ \d+)<br'
+ LOGIN_FAILED_PATTERN = r'<span htmlfor="loginUser(Name|Password)" generated="true" class="fail_info">'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load(self.URL + "dashboard.php")
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), "%d %b %Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ html = req.load(self.URL + "login.php", post={
+ "loginUserName": user,
+ "loginUserPassword": data['password'],
+ "loginFormSubmit": "Login",
+ "recaptcha_challenge_field": "",
+ "recaptcha_response_field": "",
+ "recaptcha_shortencode_field": ""})
+
+ if re.search(self.LOGIN_FAILED_PATTERN, html):
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FilerNet.py b/pyload/plugins/accounts/FilerNet.py
new file mode 100644
index 000000000..51c2e5d75
--- /dev/null
+++ b/pyload/plugins/accounts/FilerNet.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.Account import Account
+from pyload.utils import parseFileSize
+
+
+class FilerNet(Account):
+ __name__ = "FilerNet"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Filer.net account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ TOKEN_PATTERN = r'_csrf_token" value="([^"]+)" />'
+ WALID_UNTIL_PATTERN = r"Der Premium-Zugang ist gÃŒltig bis (.+)\.\s*</td>"
+ TRAFFIC_PATTERN = r'Traffic</th>\s*<td>([^<]+)</td>'
+ FREE_PATTERN = r'Account Status</th>\s*<td>\s*Free'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("https://filer.net/profile")
+
+ # Free user
+ if re.search(self.FREE_PATTERN, html):
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ until = re.search(self.WALID_UNTIL_PATTERN, html)
+ traffic = re.search(self.TRAFFIC_PATTERN, html)
+ if until and traffic:
+ validuntil = int(time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S")))
+ trafficleft = parseFileSize(traffic.group(1)) / 1024
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ else:
+ self.logError('Unable to retrieve account information - Plugin may be out of date')
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ def login(self, user, data, req):
+ html = req.load("https://filer.net/login")
+ token = re.search(self.TOKEN_PATTERN, html).group(1)
+ html = req.load("https://filer.net/login_check",
+ post={"_username": user, "_password": data['password'],
+ "_remember_me": "on", "_csrf_token": token, "_target_path": "https://filer.net/"})
+ if 'Logout' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FilerioCom.py b/pyload/plugins/accounts/FilerioCom.py
new file mode 100644
index 000000000..0a8bc10cd
--- /dev/null
+++ b/pyload/plugins/accounts/FilerioCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class FilerioCom(XFSPAccount):
+ __name__ = "FilerioCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """FileRio.in account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ MAIN_PAGE = "http://filerio.in/"
diff --git a/pyload/plugins/accounts/FilesMailRu.py b/pyload/plugins/accounts/FilesMailRu.py
new file mode 100644
index 000000000..a3ef4b348
--- /dev/null
+++ b/pyload/plugins/accounts/FilesMailRu.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class FilesMailRu(Account):
+ __name__ = "FilesMailRu"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Filesmail.ru account plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def loadAccountInfo(self, user, req):
+ return {"validuntil": None, "trafficleft": None}
+
+ def login(self, user, data, req):
+ user, domain = user.split("@")
+
+ page = req.load("http://swa.mail.ru/cgi-bin/auth", None,
+ {"Domain": domain, "Login": user, "Password": data['password'],
+ "Page": "http://files.mail.ru/"}, cookies=True)
+
+ if "НеверМПе ОЌя пПльзПвателя ОлО парПль" in page: # @TODO seems not to work
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FileserveCom.py b/pyload/plugins/accounts/FileserveCom.py
new file mode 100644
index 000000000..211023991
--- /dev/null
+++ b/pyload/plugins/accounts/FileserveCom.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class FileserveCom(Account):
+ __name__ = "FileserveCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Fileserve.com account plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+
+ page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
+ "submit": "Submit+Query"})
+ res = json_loads(page)
+
+ if res['type'] == "premium":
+ validuntil = mktime(strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
+ return {"trafficleft": res['traffic'], "validuntil": validuntil}
+ else:
+ return {"premium": False, "trafficleft": None, "validuntil": None}
+
+ def login(self, user, data, req):
+ page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
+ "submit": "Submit+Query"})
+ res = json_loads(page)
+
+ if not res['type']:
+ self.wrongPassword()
+
+ #login at fileserv page
+ req.load("http://www.fileserve.com/login.php",
+ post={"loginUserName": user, "loginUserPassword": data['password'], "autoLogin": "checked",
+ "loginFormSubmit": "Login"})
diff --git a/pyload/plugins/accounts/FourSharedCom.py b/pyload/plugins/accounts/FourSharedCom.py
new file mode 100644
index 000000000..a62749c43
--- /dev/null
+++ b/pyload/plugins/accounts/FourSharedCom.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class FourSharedCom(Account):
+ __name__ = "FourSharedCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """FourShared.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ #fixme
+ return {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("www.4shared.com", "4langcookie", "en")
+ response = req.load('http://www.4shared.com/login',
+ post={"login": user,
+ "password": data['password'],
+ "remember": "false",
+ "doNotRedirect": "true"})
+ self.logDebug(response)
+ response = json_loads(response)
+
+ if not "ok" in response or response['ok'] != True:
+ if "rejectReason" in response and response['rejectReason'] != True:
+ self.logError(response['rejectReason'])
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FreakshareCom.py b/pyload/plugins/accounts/FreakshareCom.py
new file mode 100644
index 000000000..2484a2da1
--- /dev/null
+++ b/pyload/plugins/accounts/FreakshareCom.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import strptime, mktime
+
+from pyload.plugins.Account import Account
+
+
+class FreakshareCom(Account):
+ __name__ = "FreakshareCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Freakshare.com account plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://freakshare.com/")
+
+ validuntil = r"ltig bis:</td>\s*<td><b>([0-9 \-:.]+)</b></td>"
+ validuntil = re.search(validuntil, page, re.MULTILINE)
+ validuntil = validuntil.group(1).strip()
+ validuntil = mktime(strptime(validuntil, "%d.%m.%Y - %H:%M"))
+
+ traffic = r"Traffic verbleibend:</td>\s*<td>([^<]+)"
+ traffic = re.search(traffic, page, re.MULTILINE)
+ traffic = traffic.group(1).strip()
+ traffic = self.parseTraffic(traffic)
+
+ return {"validuntil": validuntil, "trafficleft": traffic}
+
+ def login(self, user, data, req):
+ page = req.load("http://freakshare.com/login.html", None,
+ {"submit": "Login", "user": user, "pass": data['password']}, cookies=True)
+
+ if "Falsche Logindaten!" in page or "Wrong Username or Password!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FreeWayMe.py b/pyload/plugins/accounts/FreeWayMe.py
new file mode 100644
index 000000000..baca53cd4
--- /dev/null
+++ b/pyload/plugins/accounts/FreeWayMe.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class FreeWayMe(Account):
+ __name__ = "FreeWayMe"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """FreeWayMe account plugin"""
+ __author_name__ = "Nicolas Giese"
+ __author_mail__ = "james@free-way.me"
+
+
+ def loadAccountInfo(self, user, req):
+ status = self.getAccountStatus(user, req)
+ if not status:
+ return False
+ self.logDebug(status)
+
+ account_info = {"validuntil": -1, "premium": False}
+ if status['premium'] == "Free":
+ account_info['trafficleft'] = int(status['guthaben']) * 1024
+ elif status['premium'] == "Spender":
+ account_info['trafficleft'] = -1
+ elif status['premium'] == "Flatrate":
+ account_info = {"validuntil": int(status['Flatrate']),
+ "trafficleft": -1,
+ "premium": True}
+
+ return account_info
+
+ def getpw(self, user):
+ return self.accounts[user]['password']
+
+ def login(self, user, data, req):
+ status = self.getAccountStatus(user, req)
+
+ # Check if user and password are valid
+ if not status:
+ self.wrongPassword()
+
+ def getAccountStatus(self, user, req):
+ answer = req.load("https://www.free-way.me/ajax/jd.php",
+ get={"id": 4, "user": user, "pass": self.accounts[user]['password']})
+ self.logDebug("login: %s" % answer)
+ if answer == "Invalid login":
+ self.wrongPassword()
+ return False
+ return json_loads(answer)
diff --git a/pyload/plugins/accounts/FshareVn.py b/pyload/plugins/accounts/FshareVn.py
new file mode 100644
index 000000000..3d664629b
--- /dev/null
+++ b/pyload/plugins/accounts/FshareVn.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+from pycurl import REFERER
+import re
+
+from pyload.plugins.Account import Account
+
+
+class FshareVn(Account):
+ __name__ = "FshareVn"
+ __type__ = "account"
+ __version__ = "0.07"
+
+ __description__ = """Fshare.vn account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ VALID_UNTIL_PATTERN = ur'<dt>Thời hạn dùng:</dt>\s*<dd>([^<]+)</dd>'
+ LIFETIME_PATTERN = ur'<dt>Lần đăng nhập trước:</dt>\s*<dd>[^<]+</dd>'
+ TRAFFIC_LEFT_PATTERN = ur'<dt>Tổng Dung Lượng Tài Khoản</dt>\s*<dd[^>]*>([0-9.]+) ([kKMG])B</dd>'
+ DIRECT_DOWNLOAD_PATTERN = ur'<input type="checkbox"\s*([^=>]*)[^>]*/>Kích hoạt download trực tiếp</dt>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.fshare.vn/account_info.php", decode=True)
+
+ if re.search(self.LIFETIME_PATTERN, html):
+ self.logDebug("Lifetime membership detected")
+ trafficleft = self.getTrafficLeft()
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": True}
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), '%I:%M:%S %p %d-%m-%Y'))
+ trafficleft = self.getTrafficLeft()
+ else:
+ premium = False
+ validuntil = None
+ trafficleft = None
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ req.http.c.setopt(REFERER, "https://www.fshare.vn/login.php")
+
+ html = req.load('https://www.fshare.vn/login.php', post={
+ "login_password": data['password'],
+ "login_useremail": user,
+ "url_refe": "http://www.fshare.vn/index.php"
+ }, referer=True, decode=True)
+
+ if not re.search(r'<img\s+alt="VIP"', html):
+ self.wrongPassword()
+
+ def getTrafficLeft(self):
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ return float(m.group(1)) * 1024 ** {'k': 0, 'K': 0, 'M': 1, 'G': 2}[m.group(2)] if m else 0
diff --git a/pyload/plugins/accounts/Ftp.py b/pyload/plugins/accounts/Ftp.py
new file mode 100644
index 000000000..23b637050
--- /dev/null
+++ b/pyload/plugins/accounts/Ftp.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class Ftp(Account):
+ __name__ = "Ftp"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Ftp dummy account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ login_timeout = info_threshold = -1 #: Unlimited
diff --git a/pyload/plugins/accounts/HellshareCz.py b/pyload/plugins/accounts/HellshareCz.py
new file mode 100644
index 000000000..ae3f974a1
--- /dev/null
+++ b/pyload/plugins/accounts/HellshareCz.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.Account import Account
+
+
+class HellshareCz(Account):
+ __name__ = "HellshareCz"
+ __type__ = "account"
+ __version__ = "0.14"
+
+ __description__ = """Hellshare.cz account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ CREDIT_LEFT_PATTERN = r'<div class="credit-link">\s*<table>\s*<tr>\s*<th>(\d+|\d\d\.\d\d\.)</th>'
+
+
+ def loadAccountInfo(self, user, req):
+ self.relogin(user)
+ html = req.load("http://www.hellshare.com/")
+
+ m = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if m is None:
+ trafficleft = None
+ validuntil = None
+ premium = False
+ else:
+ credit = m.group(1)
+ premium = True
+ try:
+ if "." in credit:
+ #Time-based account
+ vt = [int(x) for x in credit.split('.')[:2]]
+ lt = time.localtime()
+ year = lt.tm_year + int(vt[1] < lt.tm_mon or (vt[1] == lt.tm_mon and vt[0] < lt.tm_mday))
+ validuntil = time.mktime(time.strptime("%s%d 23:59:59" % (credit, year), "%d.%m.%Y %H:%M:%S"))
+ trafficleft = -1
+ else:
+ #Traffic-based account
+ trafficleft = int(credit) * 1024
+ validuntil = -1
+ except Exception, e:
+ self.logError('Unable to parse credit info', e)
+ validuntil = -1
+ trafficleft = -1
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://www.hellshare.com/')
+ if req.lastEffectiveURL != 'http://www.hellshare.com/':
+ #Switch to English
+ self.logDebug('Switch lang - URL: %s' % req.lastEffectiveURL)
+ json = req.load("%s?do=locRouter-show" % req.lastEffectiveURL)
+ hash = re.search(r"(--[0-9a-f]+-)", json).group(1)
+ self.logDebug('Switch lang - HASH: %s' % hash)
+ html = req.load('http://www.hellshare.com/%s/' % hash)
+
+ if re.search(self.CREDIT_LEFT_PATTERN, html):
+ self.logDebug('Already logged in')
+ return
+
+ html = req.load('http://www.hellshare.com/login?do=loginForm-submit', post={
+ "login": "Log in",
+ "password": data['password'],
+ "username": user,
+ "perm_login": "on"
+ })
+
+ if "<p>You input a wrong user name or wrong password</p>" in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/HotfileCom.py b/pyload/plugins/accounts/HotfileCom.py
new file mode 100644
index 000000000..ec164d14f
--- /dev/null
+++ b/pyload/plugins/accounts/HotfileCom.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+from time import strptime, mktime
+import hashlib
+
+from pyload.plugins.Account import Account
+
+
+class HotfileCom(Account):
+ __name__ = "HotfileCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Hotfile.com account plugin"""
+ __author_name__ = ("mkaay", "JoKoT3")
+ __author_mail__ = ("mkaay@mkaay.de", "jokot3@gmail.com")
+
+
+ def loadAccountInfo(self, user, req):
+ resp = self.apiCall("getuserinfo", user=user)
+ if resp.startswith("."):
+ self.core.debug("HotfileCom API Error: %s" % resp)
+ raise Exception
+ info = {}
+ for p in resp.split("&"):
+ key, value = p.split("=")
+ info[key] = value
+
+ if info['is_premium'] == '1':
+ info['premium_until'] = info['premium_until'].replace("T", " ")
+ zone = info['premium_until'][19:]
+ info['premium_until'] = info['premium_until'][:19]
+ zone = int(zone[:3])
+
+ validuntil = int(mktime(strptime(info['premium_until'], "%Y-%m-%d %H:%M:%S"))) + (zone * 60 * 60)
+ tmp = {"validuntil": validuntil, "trafficleft": -1, "premium": True}
+
+ elif info['is_premium'] == '0':
+ tmp = {"premium": False}
+
+ return tmp
+
+ def apiCall(self, method, post={}, user=None):
+ if user:
+ data = self.getAccountData(user)
+ else:
+ user, data = self.selectAccount()
+
+ req = self.getAccountRequest(user)
+
+ digest = req.load("http://api.hotfile.com/", post={"action": "getdigest"})
+ h = hashlib.md5()
+ h.update(data['password'])
+ hp = h.hexdigest()
+ h = hashlib.md5()
+ h.update(hp)
+ h.update(digest)
+ pwhash = h.hexdigest()
+
+ post.update({"action": method})
+ post.update({"username": user, "passwordmd5dig": pwhash, "digest": digest})
+ resp = req.load("http://api.hotfile.com/", post=post)
+ req.close()
+ return resp
+
+ def login(self, user, data, req):
+ cj = self.getAccountCookies(user)
+ cj.setCookie("hotfile.com", "lang", "en")
+ req.load("http://hotfile.com/", cookies=True)
+ page = req.load("http://hotfile.com/login.php", post={"returnto": "/", "user": user, "pass": data['password']},
+ cookies=True)
+
+ if "Bad username/password" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/Http.py b/pyload/plugins/accounts/Http.py
new file mode 100644
index 000000000..eda087c91
--- /dev/null
+++ b/pyload/plugins/accounts/Http.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class Http(Account):
+ __name__ = "Http"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Http dummy account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ login_timeout = info_threshold = -1 #: Unlimited
diff --git a/pyload/plugins/accounts/LetitbitNet.py b/pyload/plugins/accounts/LetitbitNet.py
new file mode 100644
index 000000000..88e679f8e
--- /dev/null
+++ b/pyload/plugins/accounts/LetitbitNet.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+# from pyload.common.json_layer import json_loads, json_dumps
+
+
+class LetitbitNet(Account):
+ __name__ = "LetitbitNet"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Letitbit.net account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def loadAccountInfo(self, user, req):
+ ## DISABLED BECAUSE IT GET 'key exausted' EVEN IF VALID ##
+ # api_key = self.accounts[user]['password']
+ # json_data = [api_key, ['key/info']]
+ # api_rep = req.load('http://api.letitbit.net/json', post={'r': json_dumps(json_data)})
+ # self.logDebug('API Key Info: ' + api_rep)
+ # api_rep = json_loads(api_rep)
+ #
+ # if api_rep['status'] == 'FAIL':
+ # self.logWarning(api_rep['data'])
+ # return {'valid': False, 'premium': False}
+
+ return {"premium": True}
+
+ def login(self, user, data, req):
+ # API_KEY is the username and the PREMIUM_KEY is the password
+ self.logInfo('You must use your API KEY as username and the PREMIUM KEY as password.')
diff --git a/pyload/plugins/accounts/LinksnappyCom.py b/pyload/plugins/accounts/LinksnappyCom.py
new file mode 100644
index 000000000..4ac046988
--- /dev/null
+++ b/pyload/plugins/accounts/LinksnappyCom.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+from hashlib import md5
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class LinksnappyCom(Account):
+ __name__ = "LinksnappyCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Linksnappy.com account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ r = req.load('http://gen.linksnappy.com/lseAPI.php',
+ get={'act': 'USERDETAILS', 'username': user, 'password': md5(data['password']).hexdigest()})
+ self.logDebug("JSON data: " + r)
+ j = json_loads(r)
+
+ if j['error']:
+ return {"premium": False}
+
+ validuntil = j['return']['expire']
+ if validuntil == 'lifetime':
+ validuntil = -1
+ elif validuntil == 'expired':
+ return {"premium": False}
+ else:
+ validuntil = float(validuntil)
+
+ if 'trafficleft' not in j['return'] or isinstance(j['return']['trafficleft'], str):
+ trafficleft = -1
+ else:
+ trafficleft = int(j['return']['trafficleft']) * 1024
+
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+
+ def login(self, user, data, req):
+ r = req.load('http://gen.linksnappy.com/lseAPI.php',
+ get={'act': 'USERDETAILS', 'username': user, 'password': md5(data['password']).hexdigest()})
+
+ if 'Invalid Account Details' in r:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/MegaDebridEu.py b/pyload/plugins/accounts/MegaDebridEu.py
new file mode 100644
index 000000000..b449d7246
--- /dev/null
+++ b/pyload/plugins/accounts/MegaDebridEu.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class MegaDebridEu(Account):
+ __name__ = "MegaDebridEu"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """mega-debrid.eu account plugin"""
+ __author_name__ = "D.Ducatel"
+ __author_mail__ = "dducatel@je-geek.fr"
+
+ # Define the base URL of MegaDebrid api
+ API_URL = "https://www.mega-debrid.eu/api.php"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ jsonResponse = req.load(self.API_URL,
+ get={'action': 'connectUser', 'login': user, 'password': data['password']})
+ response = json_loads(jsonResponse)
+
+ if response['response_code'] == "ok":
+ return {"premium": True, "validuntil": float(response['vip_end']), "status": True}
+ else:
+ self.logError(response)
+ return {"status": False, "premium": False}
+
+ def login(self, user, data, req):
+ jsonResponse = req.load(self.API_URL,
+ get={'action': 'connectUser', 'login': user, 'password': data['password']})
+ response = json_loads(jsonResponse)
+ if response['response_code'] != "ok":
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/MegasharesCom.py b/pyload/plugins/accounts/MegasharesCom.py
new file mode 100644
index 000000000..2032d0578
--- /dev/null
+++ b/pyload/plugins/accounts/MegasharesCom.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+
+
+class MegasharesCom(Account):
+ __name__ = "MegasharesCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Megashares.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ VALID_UNTIL_PATTERN = r'<p class="premium_info_box">Period Ends: (\w{3} \d{1,2}, \d{4})</p>'
+
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://d01.megashares.com/myms.php", decode=True)
+
+ premium = False if '>Premium Upgrade<' in html else True
+
+ validuntil = trafficleft = -1
+ try:
+ timestr = re.search(self.VALID_UNTIL_PATTERN, html).group(1)
+ self.logDebug(timestr)
+ validuntil = mktime(strptime(timestr, "%b %d, %Y"))
+ except Exception, e:
+ self.logError(e)
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://d01.megashares.com/myms_login.php', post={
+ "httpref": "",
+ "myms_login": "Login",
+ "mymslogin_name": user,
+ "mymspassword": data['password']
+ }, decode=True)
+
+ if not '<span class="b ml">%s</span>' % user in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/MovReelCom.py b/pyload/plugins/accounts/MovReelCom.py
new file mode 100644
index 000000000..0f80b1aa8
--- /dev/null
+++ b/pyload/plugins/accounts/MovReelCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class MovReelCom(XFSPAccount):
+ __name__ = "MovReelCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Movreel.com account plugin"""
+ __author_name__ = "t4skforce"
+ __author_mail__ = "t4skforce1337[AT]gmail[DOT]com"
+
+ login_timeout = 60
+ info_threshold = 30
+
+ MAIN_PAGE = "http://movreel.com/"
+
+ TRAFFIC_LEFT_PATTERN = r'Traffic.*?<b>([^<]+)</b>'
+ LOGIN_FAIL_PATTERN = r'<b[^>]*>Incorrect Login or Password</b><br>'
diff --git a/pyload/plugins/accounts/MultiDebridCom.py b/pyload/plugins/accounts/MultiDebridCom.py
new file mode 100644
index 000000000..fa10c92cc
--- /dev/null
+++ b/pyload/plugins/accounts/MultiDebridCom.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class MultiDebridCom(Account):
+ __name__ = "MultiDebridCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Multi-debrid.com account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def loadAccountInfo(self, user, req):
+ if 'days_left' in self.json_data:
+ validuntil = int(time() + self.json_data['days_left'] * 24 * 60 * 60)
+ return {"premium": True, "validuntil": validuntil, "trafficleft": -1}
+ else:
+ self.logError('Unable to get account information')
+
+ def login(self, user, data, req):
+ # Password to use is the API-Password written in http://multi-debrid.com/myaccount
+ html = req.load("http://multi-debrid.com/api.php",
+ get={"user": user, "pass": data['password']})
+ self.logDebug('JSON data: ' + html)
+ self.json_data = json_loads(html)
+ if self.json_data['status'] != 'ok':
+ self.logError('Invalid login. The password to use is the API-Password you find in your "My Account" page')
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/MultishareCz.py b/pyload/plugins/accounts/MultishareCz.py
new file mode 100644
index 000000000..7e72ff513
--- /dev/null
+++ b/pyload/plugins/accounts/MultishareCz.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+#from time import mktime, strptime
+#from pycurl import REFERER
+import re
+from pyload.utils import parseFileSize
+
+
+class MultishareCz(Account):
+ __name__ = "MultishareCz"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Multishare.cz account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ TRAFFIC_LEFT_PATTERN = r'<span class="profil-zvyrazneni">Kredit:</span>\s*<strong>(?P<S>[0-9,]+)&nbsp;(?P<U>\w+)</strong>'
+ ACCOUNT_INFO_PATTERN = r'<input type="hidden" id="(u_ID|u_hash)" name="[^"]*" value="([^"]+)">'
+
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://www.multishare.cz/profil/", decode=True)
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = parseFileSize(m.group('S'), m.group('U')) / 1024 if m else 0
+ self.premium = True if trafficleft else False
+
+ html = req.load("http://www.multishare.cz/", decode=True)
+ mms_info = dict(re.findall(self.ACCOUNT_INFO_PATTERN, html))
+
+ return dict(mms_info, **{"validuntil": -1, "trafficleft": trafficleft})
+
+ def login(self, user, data, req):
+ html = req.load('http://www.multishare.cz/html/prihlaseni_process.php', post={
+ "akce": "Přihlásit",
+ "heslo": data['password'],
+ "jmeno": user
+ }, decode=True)
+
+ if '<div class="akce-chyba akce">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/NetloadIn.py b/pyload/plugins/accounts/NetloadIn.py
new file mode 100644
index 000000000..988affb51
--- /dev/null
+++ b/pyload/plugins/accounts/NetloadIn.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import time
+
+from pyload.plugins.Account import Account
+
+
+class NetloadIn(Account):
+ __name__ = "NetloadIn"
+ __type__ = "account"
+ __version__ = "0.22"
+
+ __description__ = """Netload.in account plugin"""
+ __author_name__ = ("RaNaN", "CryNickSystems")
+ __author_mail__ = ("RaNaN@pyload.org", "webmaster@pcProfil.de")
+
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://netload.in/index.php?id=2&lang=de")
+ left = r">(\d+) (Tag|Tage), (\d+) Stunden<"
+ left = re.search(left, page)
+ if left:
+ validuntil = time() + int(left.group(1)) * 24 * 60 * 60 + int(left.group(3)) * 60 * 60
+ trafficleft = -1
+ premium = True
+ else:
+ validuntil = None
+ premium = False
+ trafficleft = None
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ page = req.load("http://netload.in/index.php", None,
+ {"txtuser": user, "txtpass": data['password'], "txtcheck": "login", "txtlogin": "Login"},
+ cookies=True)
+ if "password or it might be invalid!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/OboomCom.py b/pyload/plugins/accounts/OboomCom.py
new file mode 100644
index 000000000..5d30c5955
--- /dev/null
+++ b/pyload/plugins/accounts/OboomCom.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+import time
+
+from pyload.lib.beaker.crypto.pbkdf2 import PBKDF2
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Account import Account
+
+
+class OboomCom(Account):
+ __name__ = "OboomCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Oboom.com account plugin"""
+ __author_name__ = "stanley"
+ __author_mail__ = "stanley.foerster@gmail.com"
+
+
+ def loadAccountData(self, user, req):
+ passwd = self.getAccountData(user)['password']
+ salt = passwd[::-1]
+ pbkdf2 = PBKDF2(passwd, salt, 1000).hexread(16)
+ result = json_loads(req.load("https://www.oboom.com/1.0/login", get={"auth": user, "pass": pbkdf2}))
+ if not result[0] == 200:
+ self.logWarning("Failed to log in: %s" % result[1])
+ self.wrongPassword()
+ return result[1]
+
+ def loadAccountInfo(self, name, req):
+ accountData = self.loadAccountData(name, req)
+ userData = accountData['user']
+
+ if "premium_unix" in userData:
+ validUntilUtc = int(userData['premium_unix'])
+ if validUntilUtc > int(time.time()):
+ premium = True
+ validUntil = validUntilUtc
+ traffic = userData['traffic']
+ trafficLeft = traffic['current']
+ maxTraffic = traffic['max']
+ session = accountData['session']
+ return {"premium": premium,
+ "validuntil": validUntil,
+ "trafficleft": trafficLeft / 1024,
+ "maxtraffic": maxTraffic / 1024,
+ "session": session
+ }
+ return {"premium": False, "validuntil": -1}
+
+ def login(self, user, data, req):
+ self.loadAccountData(user, req)
diff --git a/pyload/plugins/accounts/OneFichierCom.py b/pyload/plugins/accounts/OneFichierCom.py
new file mode 100644
index 000000000..36899e2a5
--- /dev/null
+++ b/pyload/plugins/accounts/OneFichierCom.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import strptime, mktime
+from pycurl import REFERER
+
+from pyload.plugins.Account import Account
+
+
+class OneFichierCom(Account):
+ __name__ = "OneFichierCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """1fichier.com account plugin"""
+ __author_name__ = "Elrick69"
+ __author_mail__ = "elrick69[AT]rocketmail[DOT]com"
+
+ VALID_UNTIL_PATTERN = r'You are a premium user until (?P<d>\d{2})/(?P<m>\d{2})/(?P<y>\d{4})'
+
+
+ def loadAccountInfo(self, user, req):
+
+ html = req.load("http://1fichier.com/console/abo.pl")
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+
+ if m:
+ premium = True
+ validuntil = re.sub(self.VALID_UNTIL_PATTERN, '\g<d>/\g<m>/\g<y>', m.group(0))
+ validuntil = int(mktime(strptime(validuntil, "%d/%m/%Y")))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+
+ req.http.c.setopt(REFERER, "http://1fichier.com/login.pl?lg=en")
+
+ html = req.load("http://1fichier.com/login.pl?lg=en", post={
+ "mail": user,
+ "pass": data['password'],
+ "Login": "Login"})
+
+ if r'<div class="error_message">Invalid username or password.</div>' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/OverLoadMe.py b/pyload/plugins/accounts/OverLoadMe.py
new file mode 100644
index 000000000..ba5a58158
--- /dev/null
+++ b/pyload/plugins/accounts/OverLoadMe.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class OverLoadMe(Account):
+ __name__ = "OverLoadMe"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Over-Load.me account plugin"""
+ __author_name__ = "marley"
+ __author_mail__ = "marley@over-load.me"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("https://api.over-load.me/account.php", get={"user": user, "auth": data['password']}).strip()
+ data = json_loads(page)
+
+ # Check for premium
+ if data['membership'] == "Free":
+ return {"premium": False}
+
+ account_info = {"validuntil": data['expirationunix'], "trafficleft": -1}
+ return account_info
+
+ def login(self, user, data, req):
+ jsondata = req.load("https://api.over-load.me/account.php",
+ get={"user": user, "auth": data['password']}).strip()
+ data = json_loads(jsondata)
+
+ if data['err'] == 1:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/Premium4Me.py b/pyload/plugins/accounts/Premium4Me.py
new file mode 100644
index 000000000..01b4b834e
--- /dev/null
+++ b/pyload/plugins/accounts/Premium4Me.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class Premium4Me(Account):
+ __name__ = "Premium4Me"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Premium.to account plugin"""
+ __author_name__ = ("RaNaN", "zoidberg", "stickell")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+
+ def loadAccountInfo(self, user, req):
+ traffic = req.load("http://premium.to/api/traffic.php?authcode=%s" % self.authcode)
+
+ account_info = {"trafficleft": int(traffic) / 1024,
+ "validuntil": -1}
+
+ return account_info
+
+ def login(self, user, data, req):
+ self.authcode = req.load("http://premium.to/api/getauthcode.php?username=%s&password=%s" % (
+ user, data['password'])).strip()
+
+ if "wrong username" in self.authcode:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/PremiumizeMe.py b/pyload/plugins/accounts/PremiumizeMe.py
new file mode 100644
index 000000000..822ca8db9
--- /dev/null
+++ b/pyload/plugins/accounts/PremiumizeMe.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+from pyload.common.json_layer import json_loads
+
+
+class PremiumizeMe(Account):
+ __name__ = "PremiumizeMe"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """Premiumize.me account plugin"""
+ __author_name__ = "Florian Franzen"
+ __author_mail__ = "FlorianFranzen@gmail.com"
+
+
+ def loadAccountInfo(self, user, req):
+ # Get user data from premiumize.me
+ status = self.getAccountStatus(user, req)
+ self.logDebug(status)
+
+ # Parse account info
+ account_info = {"validuntil": float(status['result']['expires']),
+ "trafficleft": max(0, status['result']['trafficleft_bytes'] / 1024)}
+
+ if status['result']['type'] == 'free':
+ account_info['premium'] = False
+
+ return account_info
+
+ def login(self, user, data, req):
+ # Get user data from premiumize.me
+ status = self.getAccountStatus(user, req)
+
+ # Check if user and password are valid
+ if status['status'] != 200:
+ self.wrongPassword()
+
+ def getAccountStatus(self, user, req):
+ # Use premiumize.me API v1 (see https://secure.premiumize.me/?show=api)
+ # to retrieve account info and return the parsed json answer
+ answer = req.load(
+ "https://api.premiumize.me/pm-api/v1.php?method=accountstatus&params[login]=%s&params[pass]=%s" % (
+ user, self.accounts[user]['password']))
+ return json_loads(answer)
diff --git a/pyload/plugins/accounts/QuickshareCz.py b/pyload/plugins/accounts/QuickshareCz.py
new file mode 100644
index 000000000..6abc02b1c
--- /dev/null
+++ b/pyload/plugins/accounts/QuickshareCz.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Account import Account
+from pyload.utils import parseFileSize
+
+
+class QuickshareCz(Account):
+ __name__ = "QuickshareCz"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Quickshare.cz account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.quickshare.cz/premium", decode=True)
+
+ m = re.search(r'Stav kreditu: <strong>(.+?)</strong>', html)
+ if m:
+ trafficleft = parseFileSize(m.group(1)) / 1024
+ premium = True if trafficleft else False
+ else:
+ trafficleft = None
+ premium = False
+
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://www.quickshare.cz/html/prihlaseni_process.php', post={
+ "akce": u'Přihlásit',
+ "heslo": data['password'],
+ "jmeno": user
+ }, decode=True)
+
+ if u'>TakovÜ uşivatel neexistuje.<' in html or u'>Špatné heslo.<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/RPNetBiz.py b/pyload/plugins/accounts/RPNetBiz.py
new file mode 100644
index 000000000..0e600b4e3
--- /dev/null
+++ b/pyload/plugins/accounts/RPNetBiz.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class RPNetBiz(Account):
+ __name__ = "RPNetBiz"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """RPNet.biz account plugin"""
+ __author_name__ = "Dman"
+ __author_mail__ = "dmanugm@gmail.com"
+
+
+ def loadAccountInfo(self, user, req):
+ # Get account information from rpnet.biz
+ response = self.getAccountStatus(user, req)
+ try:
+ if response['accountInfo']['isPremium']:
+ # Parse account info. Change the trafficleft later to support per host info.
+ account_info = {"validuntil": int(response['accountInfo']['premiumExpiry']),
+ "trafficleft": -1, "premium": True}
+ else:
+ account_info = {"validuntil": None, "trafficleft": None, "premium": False}
+
+ except KeyError:
+ #handle wrong password exception
+ account_info = {"validuntil": None, "trafficleft": None, "premium": False}
+
+ return account_info
+
+ def login(self, user, data, req):
+ # Get account information from rpnet.biz
+ response = self.getAccountStatus(user, req)
+
+ # If we have an error in the response, we have wrong login information
+ if 'error' in response:
+ self.wrongPassword()
+
+ def getAccountStatus(self, user, req):
+ # Using the rpnet API, check if valid premium account
+ response = req.load("https://premium.rpnet.biz/client_api.php",
+ get={"username": user, "password": self.accounts[user]['password'],
+ "action": "showAccountInformation"})
+ self.logDebug("JSON data: %s" % response)
+
+ return json_loads(response)
diff --git a/pyload/plugins/accounts/RapidgatorNet.py b/pyload/plugins/accounts/RapidgatorNet.py
new file mode 100644
index 000000000..391d7ffcb
--- /dev/null
+++ b/pyload/plugins/accounts/RapidgatorNet.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class RapidgatorNet(Account):
+ __name__ = "RapidgatorNet"
+ __type__ = "account"
+ __version__ = "0.04"
+
+ __description__ = """Rapidgator.net account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ API_URL = 'http://rapidgator.net/api/user'
+
+
+ def loadAccountInfo(self, user, req):
+ try:
+ sid = self.getAccountData(user).get('SID')
+ assert sid
+
+ json = req.load("%s/info?sid=%s" % (self.API_URL, sid))
+ self.logDebug("API:USERINFO", json)
+ json = json_loads(json)
+
+ if json['response_status'] == 200:
+ if "reset_in" in json['response']:
+ self.scheduleRefresh(user, json['response']['reset_in'])
+
+ return {"validuntil": json['response']['expire_date'],
+ "trafficleft": int(json['response']['traffic_left']) / 1024,
+ "premium": True}
+ else:
+ self.logError(json['response_details'])
+ except Exception, e:
+ self.logError(e)
+
+ return {"validuntil": None, "trafficleft": None, "premium": False}
+
+ def login(self, user, data, req):
+ try:
+ json = req.load('%s/login' % self.API_URL, post={"username": user, "password": data['password']})
+ self.logDebug("API:LOGIN", json)
+ json = json_loads(json)
+
+ if json['response_status'] == 200:
+ data['SID'] = str(json['response']['session_id'])
+ return
+ else:
+ self.logError(json['response_details'])
+ except Exception, e:
+ self.logError(e)
+
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/RapidshareCom.py b/pyload/plugins/accounts/RapidshareCom.py
new file mode 100644
index 000000000..38db62200
--- /dev/null
+++ b/pyload/plugins/accounts/RapidshareCom.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class RapidshareCom(Account):
+ __name__ = "RapidshareCom"
+ __type__ = "account"
+ __version__ = "0.22"
+
+ __description__ = """Rapidshare.com account plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_prem = {"sub": "getaccountdetails", "type": "prem", "login": user,
+ "password": data['password'], "withcookie": 1}
+ src = req.load(api_url_base, cookies=False, get=api_param_prem)
+ if src.startswith("ERROR"):
+ raise Exception(src)
+ fields = src.split("\n")
+ info = {}
+ for t in fields:
+ if not t.strip():
+ continue
+ k, v = t.split("=")
+ info[k] = v
+
+ validuntil = int(info['billeduntil'])
+ premium = True if validuntil else False
+
+ tmp = {"premium": premium, "validuntil": validuntil, "trafficleft": -1, "maxtraffic": -1}
+
+ return tmp
+
+ def login(self, user, data, req):
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_prem = {"sub": "getaccountdetails", "type": "prem", "login": user,
+ "password": data['password'], "withcookie": 1}
+ src = req.load(api_url_base, cookies=False, get=api_param_prem)
+ if src.startswith("ERROR"):
+ raise Exception(src + "### Note you have to use your account number for login, instead of name.")
+ fields = src.split("\n")
+ info = {}
+ for t in fields:
+ if not t.strip():
+ continue
+ k, v = t.split("=")
+ info[k] = v
+ cj = self.getAccountCookies(user)
+ cj.setCookie("rapidshare.com", "enc", info['cookie'])
diff --git a/pyload/plugins/accounts/RarefileNet.py b/pyload/plugins/accounts/RarefileNet.py
new file mode 100644
index 000000000..68e2595e2
--- /dev/null
+++ b/pyload/plugins/accounts/RarefileNet.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class RarefileNet(XFSPAccount):
+ __name__ = "RarefileNet"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """RareFile.net account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ MAIN_PAGE = "http://rarefile.net/"
diff --git a/pyload/plugins/accounts/RealdebridCom.py b/pyload/plugins/accounts/RealdebridCom.py
new file mode 100644
index 000000000..8ab0234a9
--- /dev/null
+++ b/pyload/plugins/accounts/RealdebridCom.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+import xml.dom.minidom as dom
+
+from pyload.plugins.Account import Account
+
+
+class RealdebridCom(Account):
+ __name__ = "RealdebridCom"
+ __type__ = "account"
+ __version__ = "0.43"
+
+ __description__ = """Real-Debrid.com account plugin"""
+ __author_name__ = "Devirex Hazzard"
+ __author_mail__ = "naibaf_11@yahoo.de"
+
+
+ def loadAccountInfo(self, user, req):
+ if self.pin_code:
+ return {"premium": False}
+ page = req.load("https://real-debrid.com/api/account.php")
+ xml = dom.parseString(page)
+ account_info = {"validuntil": int(xml.getElementsByTagName("expiration")[0].childNodes[0].nodeValue),
+ "trafficleft": -1}
+
+ return account_info
+
+ def login(self, user, data, req):
+ self.pin_code = False
+ page = req.load("https://real-debrid.com/ajax/login.php", get={"user": user, "pass": data['password']})
+ if "Your login informations are incorrect" in page:
+ self.wrongPassword()
+ elif "PIN Code required" in page:
+ self.logWarning('PIN code required. Please login to https://real-debrid.com using the PIN or disable the double authentication in your control panel on https://real-debrid.com.')
+ self.pin_code = True
diff --git a/pyload/plugins/accounts/RehostTo.py b/pyload/plugins/accounts/RehostTo.py
new file mode 100644
index 000000000..3bda118f4
--- /dev/null
+++ b/pyload/plugins/accounts/RehostTo.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class RehostTo(Account):
+ __name__ = "RehostTo"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Rehost.to account plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://rehost.to/api.php?cmd=login&user=%s&pass=%s" % (user, data['password']))
+ data = [x.split("=") for x in page.split(",")]
+ ses = data[0][1]
+ long_ses = data[1][1]
+
+ page = req.load("http://rehost.to/api.php?cmd=get_premium_credits&long_ses=%s" % long_ses)
+ traffic, valid = page.split(",")
+
+ account_info = {"trafficleft": int(traffic) * 1024,
+ "validuntil": int(valid),
+ "long_ses": long_ses,
+ "ses": ses}
+
+ return account_info
+
+ def login(self, user, data, req):
+ page = req.load("http://rehost.to/api.php?cmd=login&user=%s&pass=%s" % (user, data['password']))
+
+ if "Login failed." in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/RyushareCom.py b/pyload/plugins/accounts/RyushareCom.py
new file mode 100644
index 000000000..74258e984
--- /dev/null
+++ b/pyload/plugins/accounts/RyushareCom.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class RyushareCom(XFSPAccount):
+ __name__ = "RyushareCom"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Ryushare.com account plugin"""
+ __author_name__ = ("zoidberg", "trance4us")
+ __author_mail__ = ("zoidberg@mujmail.cz", "")
+
+ MAIN_PAGE = "http://ryushare.com/"
+
+
+ def login(self, user, data, req):
+ req.lastURL = "http://ryushare.com/login.python"
+ html = req.load("http://ryushare.com/login.python",
+ post={"login": user, "password": data['password'], "op": "login"})
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/ShareRapidCom.py b/pyload/plugins/accounts/ShareRapidCom.py
new file mode 100644
index 000000000..92e6c7988
--- /dev/null
+++ b/pyload/plugins/accounts/ShareRapidCom.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import mktime, strptime
+from pyload.plugins.Account import Account
+
+
+class ShareRapidCom(Account):
+ __name__ = "ShareRapidCom"
+ __type__ = "account"
+ __version__ = "0.34"
+
+ __description__ = """MegaRapid.cz account plugin"""
+ __author_name__ = ("MikyWoW", "zoidberg")
+ __author_mail__ = ("mikywow@seznam.cz", "zoidberg@mujmail.cz")
+
+ login_timeout = 60
+
+
+ def loadAccountInfo(self, user, req):
+ src = req.load("http://megarapid.cz/mujucet/", decode=True)
+
+ m = re.search(ur'<td>Max. počet paralelních stahování: </td><td>(\d+)', src)
+ if m:
+ data = self.getAccountData(user)
+ data['options']['limitDL'] = [int(m.group(1))]
+
+ m = re.search(ur'<td>Paušální stahování aktivní. Vyprší </td><td><strong>(.*?)</strong>', src)
+ if m:
+ validuntil = mktime(strptime(m.group(1), "%d.%m.%Y - %H:%M"))
+ return {"premium": True, "trafficleft": -1, "validuntil": validuntil}
+
+ m = re.search(r'<tr><td>Kredit</td><td>(.*?) GiB', src)
+ if m:
+ trafficleft = float(m.group(1)) * (1 << 20)
+ return {"premium": True, "trafficleft": trafficleft, "validuntil": -1}
+
+ return {"premium": False, "trafficleft": None, "validuntil": None}
+
+ def login(self, user, data, req):
+ htm = req.load("http://megarapid.cz/prihlaseni/", cookies=True)
+ if "Heslo:" in htm:
+ start = htm.index('id="inp_hash" name="hash" value="')
+ htm = htm[start + 33:]
+ hashes = htm[0:32]
+ htm = req.load("http://megarapid.cz/prihlaseni/",
+ post={"hash": hashes,
+ "login": user,
+ "pass1": data['password'],
+ "remember": 0,
+ "sbmt": u"Přihlásit"}, cookies=True)
diff --git a/pyload/plugins/accounts/ShareonlineBiz.py b/pyload/plugins/accounts/ShareonlineBiz.py
new file mode 100644
index 000000000..0f6e61fab
--- /dev/null
+++ b/pyload/plugins/accounts/ShareonlineBiz.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+
+
+class ShareonlineBiz(Account):
+ __name__ = "ShareonlineBiz"
+ __type__ = "account"
+ __version__ = "0.24"
+
+ __description__ = """Share-online.biz account plugin"""
+ __author_name__ = ("mkaay", "zoidberg")
+ __author_mail__ = ("mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+
+ def getUserAPI(self, user, req):
+ return req.load("http://api.share-online.biz/account.php",
+ {"username": user, "password": self.accounts[user]['password'], "act": "userDetails"})
+
+ def loadAccountInfo(self, user, req):
+ src = self.getUserAPI(user, req)
+
+ info = {}
+ for line in src.splitlines():
+ if "=" in line:
+ key, value = line.split("=")
+ info[key] = value
+ self.logDebug(info)
+
+ if "dl" in info and info['dl'].lower() != "not_available":
+ req.cj.setCookie("share-online.biz", "dl", info['dl'])
+ if "a" in info and info['a'].lower() != "not_available":
+ req.cj.setCookie("share-online.biz", "a", info['a'])
+
+ return {"validuntil": int(info['expire_date']) if "expire_date" in info else -1,
+ "trafficleft": -1,
+ "premium": True if ("dl" in info or "a" in info) and (info['group'] != "Sammler") else False}
+
+ def login(self, user, data, req):
+ src = self.getUserAPI(user, req)
+ if "EXCEPTION" in src:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/SimplyPremiumCom.py b/pyload/plugins/accounts/SimplyPremiumCom.py
new file mode 100644
index 000000000..5958c1f34
--- /dev/null
+++ b/pyload/plugins/accounts/SimplyPremiumCom.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Account import Account
+
+
+class SimplyPremiumCom(Account):
+ __name__ = "SimplyPremiumCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Simply-Premium.com account plugin"""
+ __author_name__ = "EvolutionClip"
+ __author_mail__ = "evolutionclip@live.de"
+
+
+ def loadAccountInfo(self, user, req):
+ json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
+ self.logDebug("JSON data: " + json_data)
+ json_data = json_loads(json_data)
+
+ if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
+ return {"premium": False}
+
+ #Time package
+ validuntil = float(json_data['result']['timeend'])
+ #Traffic package
+ # {"trafficleft": int(traffic) / 1024, "validuntil": -1}
+ #trafficleft = int(json_data['result']['traffic'] / 1024)
+
+ #return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ return {"premium": True, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("simply-premium.com", "lang", "EN")
+
+ if data['password'] == '' or data['password'] == '0':
+ post_data = {"key": user}
+ else:
+ post_data = {"login_name": user, "login_pass": data['password']}
+
+ html = req.load("http://www.simply-premium.com/login.php", post=post_data)
+
+ if 'logout' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/SimplydebridCom.py b/pyload/plugins/accounts/SimplydebridCom.py
new file mode 100644
index 000000000..169b27e0b
--- /dev/null
+++ b/pyload/plugins/accounts/SimplydebridCom.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+
+
+class SimplydebridCom(Account):
+ __name__ = "SimplydebridCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Simply-Debrid.com account plugin"""
+ __author_name__ = "Kagenoshin"
+ __author_mail__ = "kagenoshin@gmx.ch"
+
+
+ def loadAccountInfo(self, user, req):
+ get_data = {'login': 2, 'u': self.loginname, 'p': self.password}
+ response = req.load("http://simply-debrid.com/api.php", get=get_data, decode=True)
+ data = [x.strip() for x in response.split(";")]
+ if str(data[0]) != "1":
+ return {"premium": False}
+ else:
+ return {"trafficleft": -1, "validuntil": mktime(strptime(str(data[2]), "%d/%m/%Y"))}
+
+ def login(self, user, data, req):
+ self.loginname = user
+ self.password = data['password']
+ get_data = {'login': 1, 'u': self.loginname, 'p': self.password}
+ response = req.load("http://simply-debrid.com/api.php", get=get_data, decode=True)
+ if response != "02: loggin success":
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/StahnuTo.py b/pyload/plugins/accounts/StahnuTo.py
new file mode 100644
index 000000000..9d4cc6994
--- /dev/null
+++ b/pyload/plugins/accounts/StahnuTo.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Account import Account
+from pyload.utils import parseFileSize
+
+
+class StahnuTo(Account):
+ __name__ = "StahnuTo"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """StahnuTo account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.stahnu.to/")
+
+ m = re.search(r'>VIP: (\d+.*)<', html)
+ trafficleft = parseFileSize(m.group(1)) * 1024 if m else 0
+
+ return {"premium": trafficleft > (512 * 1024), "trafficleft": trafficleft, "validuntil": -1}
+
+ def login(self, user, data, req):
+ html = req.load("http://www.stahnu.to/login.php", post={
+ "username": user,
+ "password": data['password'],
+ "submit": "Login"})
+
+ if not '<a href="logout.php">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/TurbobitNet.py b/pyload/plugins/accounts/TurbobitNet.py
new file mode 100644
index 000000000..d4221a97a
--- /dev/null
+++ b/pyload/plugins/accounts/TurbobitNet.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+
+
+class TurbobitNet(Account):
+ __name__ = "TurbobitNet"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """TurbobitNet account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://turbobit.net")
+
+ m = re.search(r'<u>Turbo Access</u> to ([0-9.]+)', html)
+ if m:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), "%d.%m.%Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("turbobit.net", "user_lang", "en")
+
+ html = req.load("http://turbobit.net/user/login", post={
+ "user[login]": user,
+ "user[pass]": data['password'],
+ "user[submit]": "Login"})
+
+ if not '<div class="menu-item user-name">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UlozTo.py b/pyload/plugins/accounts/UlozTo.py
new file mode 100644
index 000000000..01fb134e8
--- /dev/null
+++ b/pyload/plugins/accounts/UlozTo.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Account import Account
+
+
+class UlozTo(Account):
+ __name__ = "UlozTo"
+ __type__ = "account"
+ __version__ = "0.06"
+
+ __description__ = """Uloz.to account plugin"""
+ __author_name__ = ("zoidberg", "pulpe")
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ TRAFFIC_LEFT_PATTERN = r'<li class="menu-kredit"><a href="/kredit" title="[^"]*?GB = ([0-9.]+) MB"'
+
+
+ def loadAccountInfo(self, user, req):
+ #this cookie gets lost somehow after each request
+ self.phpsessid = req.cj.getCookie("ULOSESSID")
+ html = req.load("http://www.ulozto.net/", decode=True)
+ req.cj.setCookie("www.ulozto.net", "ULOSESSID", self.phpsessid)
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = int(float(m.group(1).replace(' ', '').replace(',', '.')) * 1000 * 1.048) if m else 0
+ self.premium = True if trafficleft else False
+
+ return {"validuntil": -1, "trafficleft": trafficleft}
+
+ def login(self, user, data, req):
+ login_page = req.load('http://www.ulozto.net/?do=web-login', decode=True)
+ action = re.findall('<form action="(.+?)"', login_page)[1].replace('&amp;', '&')
+ token = re.search('_token_" value="(.+?)"', login_page).group(1)
+
+ html = req.load('http://www.ulozto.net'+action, post={
+ "_token_": token,
+ "login": "Submit",
+ "password": data['password'],
+ "username": user
+ }, decode=True)
+
+ if '<div class="flash error">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UnrestrictLi.py b/pyload/plugins/accounts/UnrestrictLi.py
new file mode 100644
index 000000000..39a75f959
--- /dev/null
+++ b/pyload/plugins/accounts/UnrestrictLi.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Account import Account
+from pyload.common.json_layer import json_loads
+
+
+class UnrestrictLi(Account):
+ __name__ = "UnrestrictLi"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Unrestrict.li account plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def loadAccountInfo(self, user, req):
+ json_data = req.load('http://unrestrict.li/api/jdownloader/user.php?format=json')
+ self.logDebug("JSON data: " + json_data)
+ json_data = json_loads(json_data)
+
+ if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
+ return {"premium": False}
+
+ validuntil = json_data['result']['expires']
+ trafficleft = int(json_data['result']['traffic'] / 1024)
+
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("unrestrict.li", "lang", "EN")
+ html = req.load("https://unrestrict.li/sign_in")
+
+ if 'solvemedia' in html:
+ self.logError("A Captcha is required. Go to http://unrestrict.li/sign_in and login, then retry")
+ return
+
+ post_data = {"username": user, "password": data['password'],
+ "remember_me": "remember", "signin": "Sign in"}
+ html = req.load("https://unrestrict.li/sign_in", post=post_data)
+
+ if 'sign_out' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UploadedTo.py b/pyload/plugins/accounts/UploadedTo.py
new file mode 100644
index 000000000..64bbeac6e
--- /dev/null
+++ b/pyload/plugins/accounts/UploadedTo.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import time
+
+from pyload.plugins.Account import Account
+
+
+class UploadedTo(Account):
+ __name__ = "UploadedTo"
+ __type__ = "account"
+ __version__ = "0.26"
+
+ __description__ = """Uploaded.to account plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def loadAccountInfo(self, user, req):
+
+ req.load("http://uploaded.net/language/en")
+ html = req.load("http://uploaded.net/me")
+
+ premium = '<a href="register"><em>Premium</em>' in html or '<em>Premium</em></th>' in html
+
+ if premium:
+ raw_traffic = re.search(r'<th colspan="2"><b class="cB">([^<]+)', html).group(1).replace('.', '')
+ raw_valid = re.search(r"<td>Duration:</td>\s*<th>([^<]+)", html, re.MULTILINE).group(1).strip()
+
+ traffic = int(self.parseTraffic(raw_traffic))
+
+ if raw_valid == "unlimited":
+ validuntil = -1
+ else:
+ raw_valid = re.findall(r"(\d+) (Week|weeks|days|day|hours|hour)", raw_valid)
+ validuntil = time()
+ for n, u in raw_valid:
+ validuntil += int(n) * 60 * 60 * {"Week": 168, "weeks": 168, "days": 24,
+ "day": 24, "hours": 1, "hour": 1}[u]
+
+ return {"validuntil": validuntil, "trafficleft": traffic, "maxtraffic": 50 * 1024 * 1024}
+ else:
+ return {"premium": False, "validuntil": -1}
+
+ def login(self, user, data, req):
+
+ req.load("http://uploaded.net/language/en")
+ req.cj.setCookie("uploaded.net", "lang", "en")
+
+ page = req.load("http://uploaded.net/io/login", post={"id": user, "pw": data['password'], "_": ""})
+
+ if "User and password do not match!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UploadheroCom.py b/pyload/plugins/accounts/UploadheroCom.py
new file mode 100644
index 000000000..1cb0ab698
--- /dev/null
+++ b/pyload/plugins/accounts/UploadheroCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+import datetime
+import time
+
+from pyload.plugins.Account import Account
+
+
+class UploadheroCom(Account):
+ __name__ = "UploadheroCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Uploadhero.co account plugin"""
+ __author_name__ = "mcmyst"
+ __author_mail__ = "mcmyst@hotmail.fr"
+
+
+ def loadAccountInfo(self, user, req):
+ premium_pattern = re.compile('Il vous reste <span class="bleu">([0-9]+)</span> jours premium.')
+
+ data = self.getAccountData(user)
+ page = req.load("http://uploadhero.co/my-account")
+
+ if premium_pattern.search(page):
+ end_date = datetime.date.today() + datetime.timedelta(days=int(premium_pattern.search(page).group(1)))
+ end_date = time.mktime(future.timetuple())
+ account_info = {"validuntil": end_date, "trafficleft": -1, "premium": True}
+ else:
+ account_info = {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ return account_info
+
+ def login(self, user, data, req):
+ page = req.load("http://uploadhero.co/lib/connexion.php",
+ post={"pseudo_login": user, "password_login": data['password']})
+
+ if "mot de passe invalide" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UploadingCom.py b/pyload/plugins/accounts/UploadingCom.py
new file mode 100644
index 000000000..9ac674b71
--- /dev/null
+++ b/pyload/plugins/accounts/UploadingCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+from time import time, strptime, mktime
+import re
+
+from pyload.plugins.Account import Account
+
+
+class UploadingCom(Account):
+ __name__ = "UploadingCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Uploading.com account plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def loadAccountInfo(self, user, req):
+ src = req.load("http://uploading.com/")
+ premium = True
+ if "UPGRADE TO PREMIUM" in src:
+ return {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ m = re.search("Valid Until:(.*?)<", src)
+ if m:
+ validuntil = int(mktime(strptime(m.group(1).strip(), "%b %d, %Y")))
+ else:
+ validuntil = -1
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": True}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("uploading.com", "lang", "1")
+ req.cj.setCookie("uploading.com", "language", "1")
+ req.cj.setCookie("uploading.com", "setlang", "en")
+ req.cj.setCookie("uploading.com", "_lang", "en")
+ req.load("http://uploading.com/")
+ req.load("http://uploading.com/general/login_form/?JsHttpRequest=%s-xml" % long(time() * 1000),
+ post={"email": user, "password": data['password'], "remember": "on"})
diff --git a/pyload/plugins/accounts/UptoboxCom.py b/pyload/plugins/accounts/UptoboxCom.py
new file mode 100644
index 000000000..7f9618da8
--- /dev/null
+++ b/pyload/plugins/accounts/UptoboxCom.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSPAccount import XFSPAccount
+
+
+class UptoboxCom(XFSPAccount):
+ __name__ = "UptoboxCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """DDLStorage.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ MAIN_PAGE = "http://uptobox.com/"
+
+ VALID_UNTIL_PATTERN = r'>Premium.[Aa]ccount expire: ([^<]+)</strong>'
diff --git a/pyload/plugins/accounts/YibaishiwuCom.py b/pyload/plugins/accounts/YibaishiwuCom.py
new file mode 100644
index 000000000..3898c3cef
--- /dev/null
+++ b/pyload/plugins/accounts/YibaishiwuCom.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Account import Account
+
+
+class YibaishiwuCom(Account):
+ __name__ = "YibaishiwuCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """115.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ ACCOUNT_INFO_PATTERN = r'var USER_PERMISSION = {(.*?)}'
+
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://115.com/", decode=True)
+
+ m = re.search(self.ACCOUNT_INFO_PATTERN, html, re.S)
+ premium = True if (m and 'is_vip: 1' in m.group(1)) else False
+ validuntil = trafficleft = (-1 if m else 0)
+ return dict({"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium})
+
+ def login(self, user, data, req):
+ html = req.load('http://passport.115.com/?ac=login', post={
+ "back": "http://www.115.com/",
+ "goto": "http://115.com/",
+ "login[account]": user,
+ "login[passwd]": data['password']
+ }, decode=True)
+
+ if not 'var USER_PERMISSION = {' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/ZeveraCom.py b/pyload/plugins/accounts/ZeveraCom.py
new file mode 100644
index 000000000..d84000359
--- /dev/null
+++ b/pyload/plugins/accounts/ZeveraCom.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+
+
+class ZeveraCom(Account):
+ __name__ = "ZeveraCom"
+ __type__ = "account"
+ __version__ = "0.21"
+
+ __description__ = """Zevera.com account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAPIData(req)
+ if data == "No traffic":
+ account_info = {"trafficleft": 0, "validuntil": 0, "premium": False}
+ else:
+ account_info = {
+ "trafficleft": int(data['availabletodaytraffic']) * 1024,
+ "validuntil": mktime(strptime(data['endsubscriptiondate'], "%Y/%m/%d %H:%M:%S")),
+ "premium": True
+ }
+ return account_info
+
+ def login(self, user, data, req):
+ self.loginname = user
+ self.password = data['password']
+ if self.getAPIData(req) == "No traffic":
+ self.wrongPassword()
+
+ def getAPIData(self, req, just_header=False, **kwargs):
+ get_data = {
+ 'cmd': 'accountinfo',
+ 'login': self.loginname,
+ 'pass': self.password
+ }
+ get_data.update(kwargs)
+
+ response = req.load("http://www.zevera.com/jDownloader.ashx", get=get_data,
+ decode=True, just_header=just_header)
+ self.logDebug(response)
+
+ if ':' in response:
+ if not just_header:
+ response = response.replace(',', '\n')
+ return dict((y.strip().lower(), z.strip()) for (y, z) in
+ [x.split(':', 1) for x in response.splitlines() if ':' in x])
+ else:
+ return response
diff --git a/pyload/plugins/accounts/__init__.py b/pyload/plugins/accounts/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/accounts/__init__.py
diff --git a/pyload/plugins/container/CCF.py b/pyload/plugins/container/CCF.py
new file mode 100644
index 000000000..89ac90097
--- /dev/null
+++ b/pyload/plugins/container/CCF.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from os import makedirs
+from os.path import exists
+from urllib2 import build_opener
+
+from pyload.lib.MultipartPostHandler import MultipartPostHandler
+
+from pyload.plugins.Container import Container
+from pyload.utils import safe_join
+
+
+class CCF(Container):
+ __name__ = "CCF"
+ __version__ = "0.2"
+
+ __pattern__ = r'.+\.ccf'
+
+ __description__ = """CCF container decrypter plugin"""
+ __author_name__ = "Willnix"
+ __author_mail__ = "Willnix@pyload.org"
+
+
+ def decrypt(self, pyfile):
+
+ infile = pyfile.url.replace("\n", "")
+
+ opener = build_opener(MultipartPostHandler)
+ params = {"src": "ccf",
+ "filename": "test.ccf",
+ "upload": open(infile, "rb")}
+ tempdlc_content = opener.open('http://service.jdownloader.net/dlcrypt/getDLC.php', params).read()
+
+ download_folder = self.config['general']['download_folder']
+
+ tempdlc_name = safe_join(download_folder, "tmp_%s.dlc" % pyfile.name)
+ tempdlc = open(tempdlc_name, "w")
+ tempdlc.write(re.search(r'<dlc>(.*)</dlc>', tempdlc_content, re.DOTALL).group(1))
+ tempdlc.close()
+
+ self.urls = [tempdlc_name]
diff --git a/pyload/plugins/container/DLC_25.pyc b/pyload/plugins/container/DLC_25.pyc
new file mode 100644
index 000000000..b8fde0051
--- /dev/null
+++ b/pyload/plugins/container/DLC_25.pyc
Binary files differ
diff --git a/pyload/plugins/container/DLC_26.pyc b/pyload/plugins/container/DLC_26.pyc
new file mode 100644
index 000000000..41a4e0cb8
--- /dev/null
+++ b/pyload/plugins/container/DLC_26.pyc
Binary files differ
diff --git a/pyload/plugins/container/DLC_27.pyc b/pyload/plugins/container/DLC_27.pyc
new file mode 100644
index 000000000..a6bffaf74
--- /dev/null
+++ b/pyload/plugins/container/DLC_27.pyc
Binary files differ
diff --git a/pyload/plugins/container/LinkList.py b/pyload/plugins/container/LinkList.py
new file mode 100644
index 000000000..b8941ee29
--- /dev/null
+++ b/pyload/plugins/container/LinkList.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+import codecs
+
+from pyload.plugins.Container import Container
+from pyload.utils import fs_encode
+
+
+class LinkList(Container):
+ __name__ = "LinkList"
+ __version__ = "0.12"
+
+ __pattern__ = r'.+\.txt'
+ __config__ = [("clear", "bool", "Clear Linklist after adding", False),
+ ("encoding", "string", "File encoding (default utf-8)", "")]
+
+ __description__ = """Read link lists in txt format"""
+ __author_name__ = ("spoob", "jeix")
+ __author_mail__ = ("spoob@pyload.org", "jeix@hasnomail.com")
+
+
+ def decrypt(self, pyfile):
+ try:
+ file_enc = codecs.lookup(self.getConfig("encoding")).name
+ except:
+ file_enc = "utf-8"
+
+ print repr(pyfile.url)
+ print pyfile.url
+
+ file_name = fs_encode(pyfile.url)
+
+ txt = codecs.open(file_name, 'r', file_enc)
+ links = txt.readlines()
+ curPack = "Parsed links from %s" % pyfile.name
+
+ packages = {curPack:[],}
+
+ for link in links:
+ link = link.strip()
+ if not link:
+ continue
+
+ if link.startswith(";"):
+ continue
+ if link.startswith("[") and link.endswith("]"):
+ # new package
+ curPack = link[1:-1]
+ packages[curPack] = []
+ continue
+ packages[curPack].append(link)
+ txt.close()
+
+ # empty packages fix
+
+ delete = []
+
+ for key,value in packages.iteritems():
+ if not value:
+ delete.append(key)
+
+ for key in delete:
+ del packages[key]
+
+ if self.getConfig("clear"):
+ try:
+ txt = open(file_name, 'wb')
+ txt.close()
+ except:
+ self.logWarning(_("LinkList could not be cleared."))
+
+ for name, links in packages.iteritems():
+ self.packages.append((name, links, name))
diff --git a/pyload/plugins/container/RSDF.py b/pyload/plugins/container/RSDF.py
new file mode 100644
index 000000000..2b4d4c686
--- /dev/null
+++ b/pyload/plugins/container/RSDF.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from pyload.plugins.Container import Container
+
+
+class RSDF(Container):
+ __name__ = "RSDF"
+ __version__ = "0.22"
+
+ __pattern__ = r'.+\.rsdf'
+
+ __description__ = """RSDF container decrypter plugin"""
+ __author_name__ = ("RaNaN", "spoob")
+ __author_mail__ = ("RaNaN@pyload.org", "spoob@pyload.org")
+
+
+ def decrypt(self, pyfile):
+
+ from Crypto.Cipher import AES
+
+ infile = pyfile.url.replace("\n", "")
+ Key = binascii.unhexlify('8C35192D964DC3182C6F84F3252239EB4A320D2500000000')
+
+ IV = binascii.unhexlify('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')
+ IV_Cipher = AES.new(Key, AES.MODE_ECB)
+ IV = IV_Cipher.encrypt(IV)
+
+ obj = AES.new(Key, AES.MODE_CFB, IV)
+
+ rsdf = open(infile, 'r')
+
+ data = rsdf.read()
+ rsdf.close()
+
+ if re.search(r"<title>404 - Not Found</title>", data) is None:
+ data = binascii.unhexlify(''.join(data.split()))
+ data = data.splitlines()
+
+ for link in data:
+ if not link:
+ continue
+ link = base64.b64decode(link)
+ link = obj.decrypt(link)
+ decryptedUrl = link.replace('CCF: ', '')
+ self.urls.append(decryptedUrl)
+
+ self.log.debug("%s: adding package %s with %d links" % (self.__name__,pyfile.package().name,len(links)))
diff --git a/pyload/plugins/container/__init__.py b/pyload/plugins/container/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/container/__init__.py
diff --git a/pyload/plugins/crypter/BitshareComFolder.py b/pyload/plugins/crypter/BitshareComFolder.py
new file mode 100644
index 000000000..cfb6fc1a0
--- /dev/null
+++ b/pyload/plugins/crypter/BitshareComFolder.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class BitshareComFolder(SimpleCrypter):
+ __name__ = "BitshareComFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
+
+ __description__ = """Bitshare.com folder decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r'<a href="(http://bitshare.com/files/.+)">.+</a></td>'
+ TITLE_PATTERN = r'View public folder "(?P<title>.+)"</h1>'
diff --git a/pyload/plugins/crypter/C1neonCom.py b/pyload/plugins/crypter/C1neonCom.py
new file mode 100644
index 000000000..2d1e91ef6
--- /dev/null
+++ b/pyload/plugins/crypter/C1neonCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class C1neonCom(DeadCrypter):
+ __name__ = "C1neonCom"
+ __type__ = "crypter"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?c1neon.com/.*?'
+
+ __description__ = """C1neon.com decrypter plugin"""
+ __author_name__ = "godofdream"
+ __author_mail__ = "soilfiction@gmail.com"
diff --git a/pyload/plugins/crypter/ChipDe.py b/pyload/plugins/crypter/ChipDe.py
new file mode 100644
index 000000000..29a248693
--- /dev/null
+++ b/pyload/plugins/crypter/ChipDe.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class ChipDe(Crypter):
+ __name__ = "ChipDe"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?chip.de/video/.*\.html'
+
+ __description__ = """Chip.de decrypter plugin"""
+ __author_name__ = "4Christopher"
+ __author_mail__ = "4Christopher@gmx.de"
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+ try:
+ f = re.search(r'"(http://video.chip.de/\d+?/.*)"', self.html)
+ except:
+ self.fail('Failed to find the URL')
+ else:
+ self.urls = [f.group(1)]
+ self.logDebug('The file URL is %s' % self.urls[0])
diff --git a/pyload/plugins/crypter/CrockoComFolder.py b/pyload/plugins/crypter/CrockoComFolder.py
new file mode 100644
index 000000000..200b3333e
--- /dev/null
+++ b/pyload/plugins/crypter/CrockoComFolder.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class CrockoComFolder(SimpleCrypter):
+ __name__ = "CrockoComFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?crocko.com/f/.*'
+
+ __description__ = """Crocko.com folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ LINK_PATTERN = r'<td class="last"><a href="([^"]+)">download</a>'
diff --git a/pyload/plugins/crypter/CryptItCom.py b/pyload/plugins/crypter/CryptItCom.py
new file mode 100644
index 000000000..3de00847e
--- /dev/null
+++ b/pyload/plugins/crypter/CryptItCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class CryptItCom(DeadCrypter):
+ __name__ = "CryptItCom"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?crypt-it\.com/(s|e|d|c)/[\w]+'
+
+ __description__ = """Crypt-it.com decrypter plugin"""
+ __author_name__ = "jeix"
+ __author_mail__ = "jeix@hasnomail.de"
diff --git a/pyload/plugins/crypter/CzshareComFolder.py b/pyload/plugins/crypter/CzshareComFolder.py
new file mode 100644
index 000000000..94e4f07b3
--- /dev/null
+++ b/pyload/plugins/crypter/CzshareComFolder.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class CzshareComFolder(Crypter):
+ __name__ = "CzshareComFolder"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?(czshare|sdilej)\.(com|cz)/folders/.*'
+
+ __description__ = """Czshare.com folder decrypter plugin, now Sdilej.cz"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FOLDER_PATTERN = r'<tr class="subdirectory">\s*<td>\s*<table>(.*?)</table>'
+ LINK_PATTERN = r'<td class="col2"><a href="([^"]+)">info</a></td>'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ m = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if m is None:
+ self.fail("Parse error (FOLDER)")
+
+ self.urls.extend(re.findall(self.LINK_PATTERN, m.group(1)))
+ if not self.urls:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/DDLMusicOrg.py b/pyload/plugins/crypter/DDLMusicOrg.py
new file mode 100644
index 000000000..be4a92617
--- /dev/null
+++ b/pyload/plugins/crypter/DDLMusicOrg.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import sleep
+
+from pyload.plugins.Crypter import Crypter
+
+
+class DDLMusicOrg(Crypter):
+ __name__ = "DDLMusicOrg"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?ddl-music\.org/captcha/ddlm_cr\d\.php\?\d+\?\d+'
+
+ __description__ = """Ddl-music.org decrypter plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def setup(self):
+ self.multiDL = False
+
+ def decrypt(self, pyfile):
+ html = self.req.load(pyfile.url, cookies=True)
+
+ if re.search(r"Wer dies nicht rechnen kann", html) is not None:
+ self.offline()
+
+ math = re.search(r"(\d+) ([\+-]) (\d+) =\s+<inp", self.html)
+ id = re.search(r"name=\"id\" value=\"(\d+)\"", self.html).group(1)
+ linknr = re.search(r"name=\"linknr\" value=\"(\d+)\"", self.html).group(1)
+
+ solve = ""
+ if math.group(2) == "+":
+ solve = int(math.group(1)) + int(math.group(3))
+ else:
+ solve = int(math.group(1)) - int(math.group(3))
+ sleep(3)
+ htmlwithlink = self.req.load(pyfile.url, cookies=True,
+ post={"calc%s" % linknr: solve, "send%s" % linknr: "Send", "id": id,
+ "linknr": linknr})
+ m = re.search(r"<form id=\"ff\" action=\"(.*?)\" method=\"post\">", htmlwithlink)
+ if m:
+ self.urls = [m.group(1)]
+ else:
+ self.retry()
diff --git a/pyload/plugins/crypter/DailymotionBatch.py b/pyload/plugins/crypter/DailymotionBatch.py
new file mode 100644
index 000000000..d44350c6b
--- /dev/null
+++ b/pyload/plugins/crypter/DailymotionBatch.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Crypter import Crypter
+from pyload.utils import safe_join
+
+
+class DailymotionBatch(Crypter):
+ __name__ = "DailymotionBatch"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?dailymotion\.com/((playlists/)?(?P<TYPE>playlist|user)/)?(?P<ID>[\w^_]+)(?(TYPE)|#)'
+
+ __description__ = """Dailymotion.com channel & playlist decrypter"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+
+ def api_response(self, ref, req=None):
+ url = urljoin("https://api.dailymotion.com/", ref)
+ page = self.load(url, get=req)
+ return json_loads(page)
+
+ def getPlaylistInfo(self, id):
+ ref = "playlist/" + id
+ req = {"fields": "name,owner.screenname"}
+ playlist = self.api_response(ref, req)
+
+ if "error" in playlist:
+ return
+
+ name = playlist['name']
+ owner = playlist['owner.screenname']
+ return name, owner
+
+ def _getPlaylists(self, user_id, page=1):
+ ref = "user/%s/playlists" % user_id
+ req = {"fields": "id", "page": page, "limit": 100}
+ user = self.api_response(ref, req)
+
+ if "error" in user:
+ return
+
+ for playlist in user['list']:
+ yield playlist['id']
+
+ if user['has_more']:
+ for item in self._getPlaylists(user_id, page + 1):
+ yield item
+
+ def getPlaylists(self, user_id):
+ return [(id,) + self.getPlaylistInfo(id) for id in self._getPlaylists(user_id)]
+
+ def _getVideos(self, id, page=1):
+ ref = "playlist/%s/videos" % id
+ req = {"fields": "url", "page": page, "limit": 100}
+ playlist = self.api_response(ref, req)
+
+ if "error" in playlist:
+ return
+
+ for video in playlist['list']:
+ yield video['url']
+
+ if playlist['has_more']:
+ for item in self._getVideos(id, page + 1):
+ yield item
+
+ def getVideos(self, playlist_id):
+ return list(self._getVideos(playlist_id))[::-1]
+
+ def decrypt(self, pyfile):
+ m = re.match(self.__pattern__, pyfile.url)
+ m_id = m.group("ID")
+ m_type = m.group("TYPE")
+
+ if m_type == "playlist":
+ self.logDebug("Url recognized as Playlist")
+ p_info = self.getPlaylistInfo(m_id)
+ playlists = [(m_id,) + p_info] if p_info else None
+ else:
+ self.logDebug("Url recognized as Channel")
+ playlists = self.getPlaylists(m_id)
+ self.logDebug("%s playlist\s found on channel \"%s\"" % (len(playlists), m_id))
+
+ if not playlists:
+ self.fail("No playlist available")
+
+ for p_id, p_name, p_owner in playlists:
+ p_videos = self.getVideos(p_id)
+ p_folder = safe_join(self.config['general']['download_folder'], p_owner, p_name)
+ self.logDebug("%s video\s found on playlist \"%s\"" % (len(p_videos), p_name))
+ self.packages.append((p_name, p_videos, p_folder)) #: folder is NOT recognized by pyload 0.4.9!
diff --git a/pyload/plugins/crypter/DataHuFolder.py b/pyload/plugins/crypter/DataHuFolder.py
new file mode 100644
index 000000000..49dab9159
--- /dev/null
+++ b/pyload/plugins/crypter/DataHuFolder.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DataHuFolder(SimpleCrypter):
+ __name__ = "DataHuFolder"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?data.hu/dir/\w+'
+
+ __description__ = """Data.hu folder decrypter plugin"""
+ __author_name__ = ("crash", "stickell")
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r"<a href='(http://data\.hu/get/.+)' target='_blank'>\1</a>"
+ TITLE_PATTERN = ur'<title>(?P<title>.+) Let\xf6lt\xe9se</title>'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ if u'K\xe9rlek add meg a jelsz\xf3t' in self.html: # Password protected
+ password = self.getPassword()
+ if password is '':
+ self.fail("No password specified, please set right password on Add package form and retry")
+ self.logDebug('The folder is password protected', 'Using password: ' + password)
+ self.html = self.load(pyfile.url, post={'mappa_pass': password}, decode=True)
+ if u'Hib\xe1s jelsz\xf3' in self.html: # Wrong password
+ self.fail("Incorrect password, please set right password on Add package form and retry")
+
+ package_name, folder_name = self.getPackageNameAndFolder()
+
+ package_links = re.findall(self.LINK_PATTERN, self.html)
+ self.logDebug('Package has %d links' % len(package_links))
+
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/DdlstorageComFolder.py b/pyload/plugins/crypter/DdlstorageComFolder.py
new file mode 100644
index 000000000..7469610f1
--- /dev/null
+++ b/pyload/plugins/crypter/DdlstorageComFolder.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter, create_getInfo
+
+
+class DdlstorageComFolder(DeadCrypter):
+ __name__ = "DdlstorageComFolder"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?ddlstorage\.com/folder/\w+'
+
+ __description__ = """DDLStorage.com folder decrypter plugin"""
+ __author_name__ = ("godofdream", "stickell")
+ __author_mail__ = ("soilfiction@gmail.com", "l.stickell@yahoo.it")
+
+
+getInfo = create_getInfo(SpeedLoadOrg)
diff --git a/pyload/plugins/crypter/DepositfilesComFolder.py b/pyload/plugins/crypter/DepositfilesComFolder.py
new file mode 100644
index 000000000..e308305ae
--- /dev/null
+++ b/pyload/plugins/crypter/DepositfilesComFolder.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DepositfilesComFolder(SimpleCrypter):
+ __name__ = "DepositfilesComFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?depositfiles.com/folders/\w+'
+
+ __description__ = """Depositfiles.com folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ LINK_PATTERN = r'<div class="progressName"[^>]*>\s*<a href="([^"]+)" title="[^"]*" target="_blank">'
diff --git a/pyload/plugins/crypter/Dereferer.py b/pyload/plugins/crypter/Dereferer.py
new file mode 100644
index 000000000..6a7ac8c67
--- /dev/null
+++ b/pyload/plugins/crypter/Dereferer.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.Crypter import Crypter
+
+
+class Dereferer(Crypter):
+ __name__ = "Dereferer"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://([^/]+)/.*?(?P<url>(ht|f)tps?(://|%3A%2F%2F).*)'
+
+ __description__ = """Crypter for dereferers"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def decrypt(self, pyfile):
+ link = re.match(self.__pattern__, pyfile.url).group('url')
+ self.urls = [unquote(link).rstrip('+')]
diff --git a/pyload/plugins/crypter/DlProtectCom.py b/pyload/plugins/crypter/DlProtectCom.py
new file mode 100644
index 000000000..2c9e282be
--- /dev/null
+++ b/pyload/plugins/crypter/DlProtectCom.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from base64 import urlsafe_b64encode
+from time import time
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DlProtectCom(SimpleCrypter):
+ __name__ = "DlProtectCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?dl-protect\.com/((en|fr)/)?(?P<ID>\w+)'
+
+ __description__ = """Dl-protect.com decrypter plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ OFFLINE_PATTERN = r'>Unfortunately, the link you are looking for is not found'
+
+
+ def getLinks(self):
+ # Direct link with redirect
+ if not re.match(r"http://(?:www\.)?dl-protect\.com", self.req.http.lastEffectiveURL):
+ return [self.req.http.lastEffectiveURL]
+
+ #id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ key = re.search(r'name="id_key" value="(.+?)"', self.html).group(1)
+
+ post_req = {"id_key": key, "submitform": ""}
+
+ if self.OFFLINE_PATTERN in self.html:
+ self.offline()
+ elif ">Please click on continue to see the content" in self.html:
+ post_req.update({"submitform": "Continue"})
+ else:
+ mstime = int(round(time() * 1000))
+ b64time = "_" + urlsafe_b64encode(str(mstime)).replace("=", "%3D")
+
+ post_req.update({"i": b64time, "submitform": "Decrypt+link"})
+
+ if ">Password :" in self.html:
+ post_req['pwd'] = self.getPassword()
+
+ if ">Security Code" in self.html:
+ captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
+ captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
+ captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
+
+ post_req['secure'] = captcha_code
+
+ self.html = self.load(self.pyfile.url, post=post_req)
+
+ for errmsg in (">The password is incorrect", ">The security code is incorrect"):
+ if errmsg in self.html:
+ self.fail(errmsg[1:])
+
+ pattern = r'<a href="([^/].+?)" target="_blank">'
+ return re.findall(pattern, self.html)
diff --git a/pyload/plugins/crypter/DontKnowMe.py b/pyload/plugins/crypter/DontKnowMe.py
new file mode 100644
index 000000000..b16992b27
--- /dev/null
+++ b/pyload/plugins/crypter/DontKnowMe.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.Crypter import Crypter
+
+
+class DontKnowMe(Crypter):
+ __name__ = "DontKnowMe"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?dontknow.me/at/\?.+$'
+
+ __description__ = """DontKnow.me decrypter plugin"""
+ __author_name__ = "selaux"
+ __author_mail__ = None
+
+ LINK_PATTERN = r'http://dontknow.me/at/\?(.+)$'
+
+
+ def decrypt(self, pyfile):
+ link = re.findall(self.LINK_PATTERN, pyfile.url)[0]
+ self.urls = [unquote(link)]
diff --git a/pyload/plugins/crypter/DuckCryptInfo.py b/pyload/plugins/crypter/DuckCryptInfo.py
new file mode 100644
index 000000000..a7afa2d22
--- /dev/null
+++ b/pyload/plugins/crypter/DuckCryptInfo.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.lib.BeautifulSoup import BeautifulSoup
+
+from pyload.plugins.Crypter import Crypter
+
+
+class DuckCryptInfo(Crypter):
+ __name__ = "DuckCryptInfo"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?duckcrypt.info/(folder|wait|link)/(\w+)/?(\w*)'
+
+ __description__ = """DuckCrypt.info decrypter plugin"""
+ __author_name__ = "godofdream"
+ __author_mail__ = "soilfiction@gmail.com"
+
+ TIMER_PATTERN = r'<span id="timer">(.*)</span>'
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+ # seems we don't need to wait
+ #src = self.req.load(str(url))
+ #m = re.search(self.TIMER_PATTERN, src)
+ #if m:
+ # self.logDebug("Sleeping for" % m.group(1))
+ # self.setWait(int(m.group(1)) ,False)
+ m = re.match(self.__pattern__, url)
+ if m is None:
+ self.fail('Weird error in link')
+ if str(m.group(1)) == "link":
+ self.handleLink(url)
+ else:
+ self.handleFolder(m)
+
+ def handleFolder(self, m):
+ src = self.load("http://duckcrypt.info/ajax/auth.php?hash=" + str(m.group(2)))
+ m = re.match(self.__pattern__, src)
+ self.logDebug("Redirectet to " + str(m.group(0)))
+ src = self.load(str(m.group(0)))
+ soup = BeautifulSoup(src)
+ cryptlinks = soup.findAll("div", attrs={"class": "folderbox"})
+ self.logDebug("Redirectet to " + str(cryptlinks))
+ if not cryptlinks:
+ self.fail('no links m - (Plugin out of date?)')
+ for clink in cryptlinks:
+ if clink.find("a"):
+ self.handleLink(clink.find("a")['href'])
+
+ def handleLink(self, url):
+ src = self.load(url)
+ soup = BeautifulSoup(src)
+ self.urls = [soup.find("iframe")['src']]
+ if not self.urls:
+ self.logDebug('no links m - (Plugin out of date?)')
diff --git a/pyload/plugins/crypter/DuploadOrgFolder.py b/pyload/plugins/crypter/DuploadOrgFolder.py
new file mode 100644
index 000000000..ca76cff75
--- /dev/null
+++ b/pyload/plugins/crypter/DuploadOrgFolder.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DuploadOrgFolder(SimpleCrypter):
+ __name__ = "DuploadOrgFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?dupload\.org/folder/\d+/'
+
+ __description__ = """Dupload.org folder decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r'<td style="[^"]+"><a href="(http://[^"]+)" target="_blank">[^<]+</a></td>'
diff --git a/pyload/plugins/crypter/EasybytezComFolder.py b/pyload/plugins/crypter/EasybytezComFolder.py
new file mode 100644
index 000000000..163f2bdf3
--- /dev/null
+++ b/pyload/plugins/crypter/EasybytezComFolder.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class EasybytezComFolder(SimpleCrypter):
+ __name__ = "EasybytezComFolder"
+ __type__ = "crypter"
+ __version__ = "0.06"
+
+ __pattern__ = r'http://(?:www\.)?easybytez\.com/users/(?P<ID>\d+/\d+)'
+
+ __description__ = """Easybytez.com decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ URL_REPLACEMENTS = [(__pattern__, r"http://www.easybytez.com/users/\g<ID>?per_page=10000")]
+
+ LINK_PATTERN = r'<td><a href="(http://www\.easybytez\.com/\w+)" target="_blank">.+(?:</a>)?</td>'
+ TITLE_PATTERN = r'<Title>Files of \d+: (?P<title>.+) folder</Title>'
diff --git a/pyload/plugins/crypter/EmbeduploadCom.py b/pyload/plugins/crypter/EmbeduploadCom.py
new file mode 100644
index 000000000..476767f94
--- /dev/null
+++ b/pyload/plugins/crypter/EmbeduploadCom.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+from pyload.network.HTTPRequest import BadHeader
+
+
+class EmbeduploadCom(Crypter):
+ __name__ = "EmbeduploadCom"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?embedupload.com/\?d=.*'
+ __config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "embedupload"),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
+
+ __description__ = """EmbedUpload.com decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ LINK_PATTERN = r'<div id="([^"]+)"[^>]*>\s*<a href="([^"]+)" target="_blank" (?:class="DownloadNow"|style="color:red")>'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ tmp_links = []
+
+ m = re.findall(self.LINK_PATTERN, self.html)
+ if m:
+ prefered_set = set(self.getConfig("preferedHoster").split('|'))
+ prefered_set = map(lambda s: s.lower().split('.')[0], prefered_set)
+ print "PF", prefered_set
+ tmp_links.extend([x[1] for x in m if x[0] in prefered_set])
+ self.urls = self.getLocation(tmp_links)
+
+ if not self.urls:
+ ignored_set = set(self.getConfig("ignoredHoster").split('|'))
+ ignored_set = map(lambda s: s.lower().split('.')[0], ignored_set)
+ print "IG", ignored_set
+ tmp_links.extend([x[1] for x in m if x[0] not in ignored_set])
+ self.urls = self.getLocation(tmp_links)
+
+ if not self.urls:
+ self.fail('Could not extract any links')
+
+ def getLocation(self, tmp_links):
+ new_links = []
+ for link in tmp_links:
+ try:
+ header = self.load(link, just_header=True)
+ if "location" in header:
+ new_links.append(header['location'])
+ except BadHeader:
+ pass
+ return new_links
diff --git a/pyload/plugins/crypter/FilebeerInfoFolder.py b/pyload/plugins/crypter/FilebeerInfoFolder.py
new file mode 100644
index 000000000..ee577a865
--- /dev/null
+++ b/pyload/plugins/crypter/FilebeerInfoFolder.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class FilebeerInfoFolder(DeadCrypter):
+ __name__ = "FilebeerInfoFolder"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?filebeer\.info/(\d+~f).*'
+
+ __description__ = """Filebeer.info folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
diff --git a/pyload/plugins/crypter/FilecloudIoFolder.py b/pyload/plugins/crypter/FilecloudIoFolder.py
new file mode 100644
index 000000000..577dd43a3
--- /dev/null
+++ b/pyload/plugins/crypter/FilecloudIoFolder.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilecloudIoFolder(SimpleCrypter):
+ __name__ = "FilecloudIoFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?(filecloud\.io|ifile\.it)/_\w+'
+
+ __description__ = """Filecloud.io folder decrypter plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ LINK_PATTERN = r'href="(http://filecloud.io/\w+)" title'
+ TITLE_PATTERN = r'>(?P<title>.+?) - filecloud.io<'
diff --git a/pyload/plugins/crypter/FilefactoryComFolder.py b/pyload/plugins/crypter/FilefactoryComFolder.py
new file mode 100644
index 000000000..6886fa5b1
--- /dev/null
+++ b/pyload/plugins/crypter/FilefactoryComFolder.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilefactoryComFolder(SimpleCrypter):
+ __name__ = "FilefactoryComFolder"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'https?://(?:www\.)?filefactory\.com/(?:f|folder)/\w+'
+
+ __description__ = """Filefactory.com folder decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r'<td><a href="([^"]+)">'
+ TITLE_PATTERN = r'<h1>Files in <span>(?P<title>.+)</span></h1>'
+ PAGES_PATTERN = r'data-paginator-totalPages="(?P<pages>\d+)"'
+
+ SH_COOKIES = [('.filefactory.com', 'locale', 'en_US.utf8')]
+
+
+ def loadPage(self, page_n):
+ return self.load(self.pyfile.url, get={'page': page_n})
diff --git a/pyload/plugins/crypter/FilerNetFolder.py b/pyload/plugins/crypter/FilerNetFolder.py
new file mode 100644
index 000000000..4acb7e165
--- /dev/null
+++ b/pyload/plugins/crypter/FilerNetFolder.py
@@ -0,0 +1,22 @@
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilerNetFolder(SimpleCrypter):
+ __name__ = "FilerNetFolder"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'https?://filer\.net/folder/\w{16}'
+
+ __description__ = """Filer.net decrypter plugin"""
+ __author_name_ = ("nath_schwarz", "stickell")
+ __author_mail_ = ("nathan.notwhite@gmail.com", "l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'href="(/get/\w{16})">(?!<)'
+ TITLE_PATTERN = r'<h3>(?P<title>.+) - <small'
+
+
+ def getLinks(self):
+ return ['http://filer.net%s' % link for link in re.findall(self.LINK_PATTERN, self.html)]
diff --git a/pyload/plugins/crypter/FileserveComFolder.py b/pyload/plugins/crypter/FileserveComFolder.py
new file mode 100644
index 000000000..52e1df6b4
--- /dev/null
+++ b/pyload/plugins/crypter/FileserveComFolder.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Crypter import Crypter
+
+
+class FileserveComFolder(Crypter):
+ __name__ = "FileserveComFolder"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?fileserve.com/list/\w+'
+
+ __description__ = """FileServe.com folder decrypter plugin"""
+ __author_name__ = "fionnc"
+ __author_mail__ = "fionnc@gmail.com"
+
+ FOLDER_PATTERN = r'<table class="file_list">(.*?)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" class="sheet_icon wbold">'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ new_links = []
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if folder is None:
+ self.fail("Parse error (FOLDER)")
+
+ new_links.extend(re.findall(self.LINK_PATTERN, folder.group(1)))
+
+ if new_links:
+ self.urls = [map(lambda s: "http://fileserve.com%s" % s, new_links)]
+ else:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/FilestubeCom.py b/pyload/plugins/crypter/FilestubeCom.py
new file mode 100644
index 000000000..fc80762d1
--- /dev/null
+++ b/pyload/plugins/crypter/FilestubeCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilestubeCom(SimpleCrypter):
+ __name__ = "FilestubeCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?filestube\.(?:com|to)/\w+'
+
+ __description__ = """Filestube.com decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r'<a class=\"file-link-main(?: noref)?\" [^>]* href=\"(http://[^\"]+)'
+ TITLE_PATTERN = r'<h1\s*> (?P<title>.+) download\s*</h1>'
diff --git a/pyload/plugins/crypter/FiletramCom.py b/pyload/plugins/crypter/FiletramCom.py
new file mode 100644
index 000000000..6620adc12
--- /dev/null
+++ b/pyload/plugins/crypter/FiletramCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FiletramCom(SimpleCrypter):
+ __name__ = "FiletramCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?filetram.com/[^/]+/.+'
+
+ __description__ = """Filetram.com decrypter plugin"""
+ __author_name__ = ("igel", "stickell")
+ __author_mail__ = ("igelkun@myopera.com", "l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'\s+(http://.+)'
+ TITLE_PATTERN = r'<title>(?P<title>[^<]+) - Free Download[^<]*</title>'
diff --git a/pyload/plugins/crypter/FiredriveComFolder.py b/pyload/plugins/crypter/FiredriveComFolder.py
new file mode 100644
index 000000000..072a548a2
--- /dev/null
+++ b/pyload/plugins/crypter/FiredriveComFolder.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FiredriveComFolder(SimpleCrypter):
+ __name__ = "FiredriveComFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?(firedrive|putlocker)\.com/share/.+'
+
+ __description__ = """Firedrive.com folder decrypter plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ LINK_PATTERN = r'<div class="pf_item pf_(file|folder).+?public=\'(.+?)\''
+ TITLE_PATTERN = r'>Shared Folder "(?P<title>.+)" | Firedrive<'
+ OFFLINE_PATTERN = r'class="sad_face_image"|>No such page here.<'
+ TEMP_OFFLINE_PATTERN = r'>(File Temporarily Unavailable|Server Error. Try again later)'
+
+
+ def getLinks(self):
+ return map(lambda x: "http://www.firedrive.com/%s/%s" %
+ ("share" if x[0] == "folder" else "file", x[1]),
+ re.findall(self.LINK_PATTERN, self.html))
diff --git a/pyload/plugins/crypter/FourChanOrg.py b/pyload/plugins/crypter/FourChanOrg.py
new file mode 100644
index 000000000..2d3bfa07a
--- /dev/null
+++ b/pyload/plugins/crypter/FourChanOrg.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+#
+# Based on 4chandl by Roland Beermann (https://gist.github.com/enkore/3492599)
+
+import re
+
+from pyload.plugins.Crypter import Crypter
+
+
+class FourChanOrg(Crypter):
+ __name__ = "FourChanOrg"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?boards\.4chan.org/\w+/res/(\d+)'
+
+ __description__ = """4chan.org folder decrypter plugin"""
+ __author_name__ = None
+ __author_mail__ = None
+
+
+ def decrypt(self, pyfile):
+ pagehtml = self.load(pyfile.url)
+ images = set(re.findall(r'(images\.4chan\.org/[^/]*/src/[^"<]*)', pagehtml))
+ self.urls = ["http://" + image for image in images]
diff --git a/pyload/plugins/crypter/FreakhareComFolder.py b/pyload/plugins/crypter/FreakhareComFolder.py
new file mode 100644
index 000000000..fca1b26a1
--- /dev/null
+++ b/pyload/plugins/crypter/FreakhareComFolder.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FreakhareComFolder(SimpleCrypter):
+ __name__ = "FreakhareComFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?freakshare\.com/folder/.+'
+
+ __description__ = """Freakhare.com folder decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r'<a href="(http://freakshare.com/files/[^"]+)" target="_blank">'
+ TITLE_PATTERN = r'Folder:</b> (?P<title>.+)'
+ PAGES_PATTERN = r'Pages: +(?P<pages>\d+)'
+
+
+ def loadPage(self, page_n):
+ if not hasattr(self, 'f_id') and not hasattr(self, 'f_md5'):
+ m = re.search(r'http://freakshare.com/\?x=folder&f_id=(\d+)&f_md5=(\w+)', self.html)
+ if m:
+ self.f_id = m.group(1)
+ self.f_md5 = m.group(2)
+ return self.load('http://freakshare.com/', get={'x': 'folder',
+ 'f_id': self.f_id,
+ 'f_md5': self.f_md5,
+ 'entrys': '20',
+ 'page': page_n - 1,
+ 'order': ''}, decode=True)
diff --git a/pyload/plugins/crypter/FreetexthostCom.py b/pyload/plugins/crypter/FreetexthostCom.py
new file mode 100644
index 000000000..e56d638f0
--- /dev/null
+++ b/pyload/plugins/crypter/FreetexthostCom.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FreetexthostCom(SimpleCrypter):
+ __name__ = "FreetexthostCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?freetexthost\.com/\w+'
+
+ __description__ = """Freetexthost.com decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def getLinks(self):
+ m = re.search(r'<div id="contentsinner">\s*(.+)<div class="viewcount">', self.html, re.DOTALL)
+ if m is None:
+ self.fail('Unable to extract links | Plugin may be out-of-date')
+ links = m.group(1)
+ return links.strip().split("<br />\r\n")
diff --git a/pyload/plugins/crypter/FshareVnFolder.py b/pyload/plugins/crypter/FshareVnFolder.py
new file mode 100644
index 000000000..1706d97e0
--- /dev/null
+++ b/pyload/plugins/crypter/FshareVnFolder.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FshareVnFolder(SimpleCrypter):
+ __name__ = "FshareVnFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?fshare.vn/folder/.*'
+
+ __description__ = """Fshare.vn folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ LINK_PATTERN = r'<li class="w_80pc"><a href="([^"]+)" target="_blank">'
diff --git a/pyload/plugins/crypter/GooGl.py b/pyload/plugins/crypter/GooGl.py
new file mode 100644
index 000000000..ae48c61b5
--- /dev/null
+++ b/pyload/plugins/crypter/GooGl.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Crypter import Crypter
+from pyload.common.json_layer import json_loads
+
+
+class GooGl(Crypter):
+ __name__ = "GooGl"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?goo\.gl/\w+'
+
+ __description__ = """Goo.gl decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ API_URL = "https://www.googleapis.com/urlshortener/v1/url"
+
+
+ def decrypt(self, pyfile):
+ rep = self.load(self.API_URL, get={'shortUrl': pyfile.url})
+ self.logDebug('JSON data: ' + rep)
+ rep = json_loads(rep)
+
+ if 'longUrl' in rep:
+ self.urls = [rep['longUrl']]
+ else:
+ self.fail('Unable to expand shortened link')
diff --git a/pyload/plugins/crypter/HoerbuchIn.py b/pyload/plugins/crypter/HoerbuchIn.py
new file mode 100644
index 000000000..7c934a6f2
--- /dev/null
+++ b/pyload/plugins/crypter/HoerbuchIn.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.lib.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
+
+from pyload.plugins.Crypter import Crypter
+
+
+class HoerbuchIn(Crypter):
+ __name__ = "HoerbuchIn"
+ __type__ = "crypter"
+ __version__ = "0.6"
+
+ __pattern__ = r'http://(?:www\.)?hoerbuch\.in/(wp/horbucher/\d+/.+/|tp/out.php\?.+|protection/folder_\d+\.html)'
+
+ __description__ = """Hoerbuch.in decrypter plugin"""
+ __author_name__ = ("spoob", "mkaay")
+ __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de")
+
+ article = re.compile("http://(?:www\.)?hoerbuch\.in/wp/horbucher/\d+/.+/")
+ protection = re.compile("http://(?:www\.)?hoerbuch\.in/protection/folder_\d+.html")
+
+
+ def decrypt(self, pyfile):
+ self.pyfile = pyfile
+
+ if self.article.match(pyfile.url):
+ src = self.load(pyfile.url)
+ soup = BeautifulSoup(src, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
+
+ abookname = soup.find("a", attrs={"rel": "bookmark"}).text
+ for a in soup.findAll("a", attrs={"href": self.protection}):
+ package = "%s (%s)" % (abookname, a.previousSibling.previousSibling.text[:-1])
+ links = self.decryptFolder(a['href'])
+
+ self.packages.append((package, links, package))
+ else:
+ self.urls = self.decryptFolder(pyfile.url)
+
+ def decryptFolder(self, url):
+ m = self.protection.search(url)
+ if m is None:
+ self.fail("Bad URL")
+ url = m.group(0)
+
+ self.pyfile.url = url
+ src = self.req.load(url, post={"viewed": "adpg"})
+
+ links = []
+ pattern = re.compile("http://www\.hoerbuch\.in/protection/(\w+)/(.*?)\"")
+ for hoster, lid in pattern.findall(src):
+ self.req.lastURL = url
+ self.load("http://www.hoerbuch.in/protection/%s/%s" % (hoster, lid))
+ links.append(self.req.lastEffectiveURL)
+
+ return links
diff --git a/pyload/plugins/crypter/HotfileFolderCom.py b/pyload/plugins/crypter/HotfileFolderCom.py
new file mode 100644
index 000000000..1b5ce28b6
--- /dev/null
+++ b/pyload/plugins/crypter/HotfileFolderCom.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Crypter import Crypter
+
+
+class HotfileFolderCom(Crypter):
+ __name__ = "HotfileFolderCom"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?hotfile.com/list/\w+/\w+'
+
+ __description__ = """Hotfile.com folder decrypter plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ name = re.findall(
+ r'<img src="/i/folder.gif" width="23" height="14" style="margin-bottom: -2px;" />([^<]+)', html,
+ re.MULTILINE)[0].replace("/", "")
+ new_links = re.findall(r'href="(http://(www.)?hotfile\.com/dl/\d+/[0-9a-zA-Z]+[^"]+)', html)
+
+ new_links = [x[0] for x in new_links]
+
+ self.packages = [(name, new_links, name)]
diff --git a/pyload/plugins/crypter/ILoadTo.py b/pyload/plugins/crypter/ILoadTo.py
new file mode 100644
index 000000000..16f813926
--- /dev/null
+++ b/pyload/plugins/crypter/ILoadTo.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class ILoadTo(DeadCrypter):
+ __name__ = "ILoadTo"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?iload\.to/go/\d+-[\w\.-]+/'
+
+ __description__ = """Iload.to decrypter plugin"""
+ __author_name__ = "hzpz"
+ __author_mail__ = None
diff --git a/pyload/plugins/crypter/ImgurComAlbum.py b/pyload/plugins/crypter/ImgurComAlbum.py
new file mode 100644
index 000000000..5e8be3a5d
--- /dev/null
+++ b/pyload/plugins/crypter/ImgurComAlbum.py
@@ -0,0 +1,24 @@
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+from pyload.utils import uniqify
+
+
+class ImgurComAlbum(SimpleCrypter):
+ __name__ = "ImgurComAlbum"
+ __type__ = "crypter"
+ __version__ = "0.4"
+
+ __pattern__ = r'https?://(?:www\.|m\.)?imgur\.com/(a|gallery|)/?\w{5,7}'
+
+ __description__ = """Imgur.com decrypter plugin"""
+ __author_name_ = "nath_schwarz"
+ __author_mail_ = "nathan.notwhite@gmail.com"
+
+ TITLE_PATTERN = r'(?P<title>.+) - Imgur'
+ LINK_PATTERN = r'i\.imgur\.com/\w{7}s?\.(?:jpeg|jpg|png|gif|apng)'
+
+
+ def getLinks(self):
+ f = lambda url: "http://" + re.sub(r'(\w{7})s\.', r'\1.', url)
+ return uniqify(map(f, re.findall(self.LINK_PATTERN, self.html)))
diff --git a/pyload/plugins/crypter/LetitbitNetFolder.py b/pyload/plugins/crypter/LetitbitNetFolder.py
new file mode 100644
index 000000000..b03ea27b2
--- /dev/null
+++ b/pyload/plugins/crypter/LetitbitNetFolder.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class LetitbitNetFolder(Crypter):
+ __name__ = "LetitbitNetFolder"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?letitbit.net/folder/\w+'
+
+ __description__ = """Letitbit.net folder decrypter plugin"""
+ __author_name__ = ("DHMH", "z00nx")
+ __author_mail__ = ("webmaster@pcProfil.de", "z00nx0@gmail.com")
+
+ FOLDER_PATTERN = r'<table>(.*)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" target="_blank">'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if folder is None:
+ self.fail("Parse error (FOLDER)")
+
+ self.urls.extend(re.findall(self.LINK_PATTERN, folder.group(0)))
+
+ if not self.urls:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/LinkSaveIn.py b/pyload/plugins/crypter/LinkSaveIn.py
new file mode 100644
index 000000000..84dd8172e
--- /dev/null
+++ b/pyload/plugins/crypter/LinkSaveIn.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+#
+# * cnl2 and web links are skipped if JS is not available (instead of failing the package)
+# * only best available link source is used (priority: cnl2>rsdf>ccf>dlc>web
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+from pyload.plugins.Crypter import Crypter
+from pyload.unescape import unescape
+
+
+class LinkSaveIn(Crypter):
+ __name__ = "LinkSaveIn"
+ __type__ = "crypter"
+ __version__ = "2.01"
+
+ __pattern__ = r'http://(?:www\.)?linksave.in/(?P<id>\w+)$'
+
+ __description__ = """LinkSave.in decrypter plugin"""
+ __author_name__ = "fragonib"
+ __author_mail__ = "fragonib[AT]yahoo[DOT]es"
+
+ # Constants
+ _JK_KEY_ = "jk"
+ _CRYPTED_KEY_ = "crypted"
+ HOSTER_NAME = "linksave.in"
+
+
+ def setup(self):
+ self.html = None
+ self.fileid = None
+ self.captcha = False
+ self.package = None
+ self.preferred_sources = ["cnl2", "rsdf", "ccf", "dlc", "web"]
+
+ def decrypt(self, pyfile):
+ # Init
+ self.package = pyfile.package()
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+ self.req.cj.setCookie(self.HOSTER_NAME, "Linksave_Language", "english")
+
+ # Request package
+ self.html = self.load(pyfile.url)
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ for type_ in self.preferred_sources:
+ package_links.extend(self.handleLinkSource(type_))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
+
+ def isOnline(self):
+ if "<big>Error 404 - Folder not found!</big>" in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isPasswordProtected(self):
+ if re.search(r'''<input.*?type="password"''', self.html):
+ self.logDebug("Links are password protected")
+ return True
+
+ def isCaptchaProtected(self):
+ if "<b>Captcha:</b>" in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ post = {"id": self.fileid, "besucherpasswort": password, 'login': 'submit'}
+ self.html = self.load(self.pyfile.url, post=post)
+
+ def unlockCaptchaProtection(self):
+ captcha_hash = re.search(r'name="hash" value="([^"]+)', self.html).group(1)
+ captcha_url = re.search(r'src=".(/captcha/cap.php\?hsh=[^"]+)', self.html).group(1)
+ captcha_code = self.decryptCaptcha("http://linksave.in" + captcha_url, forceUser=True)
+ self.html = self.load(self.pyfile.url, post={"id": self.fileid, "hash": captcha_hash, "code": captcha_code})
+
+ def getPackageInfo(self):
+ name = self.pyfile.package().name
+ folder = self.pyfile.package().folder
+ self.logDebug("Defaulting to pyfile name [%s] and folder [%s] for package" % (name, folder))
+ return name, folder
+
+ def handleErrors(self):
+ if "The visitorpassword you have entered is wrong" in self.html:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.captcha:
+ if "Wrong code. Please retry" in self.html:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleLinkSource(self, type_):
+ if type_ == "cnl2":
+ return self.handleCNL2()
+ elif type_ in ("rsdf", "ccf", "dlc"):
+ return self.handleContainer(type_)
+ elif type_ == "web":
+ return self.handleWebLinks()
+ else:
+ self.fail('unknown source type "%s" (this is probably a bug)' % type_)
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Search for Web links")
+ if not self.js:
+ self.logDebug("no JS -> skip Web links")
+ else:
+ #@TODO: Gather paginated web links
+ pattern = r'<a href="http://linksave\.in/(\w{43})"'
+ ids = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(ids))
+ for i, weblink_id in enumerate(ids):
+ try:
+ webLink = "http://linksave.in/%s" % weblink_id
+ self.logDebug("Decrypting Web link %d, %s" % (i + 1, webLink))
+ fwLink = "http://linksave.in/fw-%s" % weblink_id
+ response = self.load(fwLink)
+ jscode = re.findall(r'<script type="text/javascript">(.*)</script>', response)[-1]
+ jseval = self.js.eval("document = { write: function(e) { return e; } }; %s" % jscode)
+ dlLink = re.search(r'http://linksave\.in/dl-\w+', jseval).group(0)
+ self.logDebug("JsEngine returns value [%s] for redirection link" % dlLink)
+ response = self.load(dlLink)
+ link = unescape(re.search(r'<iframe src="(.+?)"', response).group(1))
+ package_links.append(link)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (webLink, detail))
+ return package_links
+
+ def handleContainer(self, type_):
+ package_links = []
+ type_ = type_.lower()
+ self.logDebug('Seach for %s Container links' % type_.upper())
+ if not type_.isalnum(): # check to prevent broken re-pattern (cnl2,rsdf,ccf,dlc,web are all alpha-numeric)
+ self.fail('unknown container type "%s" (this is probably a bug)' % type_)
+ pattern = r"\('%s_link'\).href=unescape\('(.*?\.%s)'\)" % (type_, type_)
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Found %d %s Container links" % (len(containersLinks), type_.upper()))
+ for containerLink in containersLinks:
+ link = "http://linksave.in/%s" % unescape(containerLink)
+ package_links.append(link)
+ return package_links
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Search for CNL2 links")
+ if not self.js:
+ self.logDebug("no JS -> skip CNL2 links")
+ elif 'cnl2_load' in self.html:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+ return package_links
+
+ def _getCipherParams(self):
+ # Get jk
+ jk_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._JK_KEY_
+ vjk = re.findall(jk_re, self.html)
+
+ # Get crypted
+ crypted_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._CRYPTED_KEY_
+ vcrypted = re.findall(crypted_re, self.html)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/LinkdecrypterCom.py b/pyload/plugins/crypter/LinkdecrypterCom.py
new file mode 100644
index 000000000..b6ca2ec4f
--- /dev/null
+++ b/pyload/plugins/crypter/LinkdecrypterCom.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class LinkdecrypterCom(Crypter):
+ __name__ = "LinkdecrypterCom"
+ __type__ = "crypter"
+ __version__ = "0.27"
+
+ __pattern__ = None
+
+ __description__ = """Linkdecrypter.com"""
+ __author_name__ = ("zoidberg", "flowlee")
+ __author_mail__ = ("zoidberg@mujmail.cz", "")
+
+ TEXTAREA_PATTERN = r'<textarea name="links" wrap="off" readonly="1" class="caja_des">(.+)</textarea>'
+ PASSWORD_PATTERN = r'<input type="text" name="password"'
+ CAPTCHA_PATTERN = r'<img class="captcha" src="(.+?)"(.*?)>'
+ REDIR_PATTERN = r'<i>(Click <a href="./">here</a> if your browser does not redirect you).</i>'
+
+
+ def decrypt(self, pyfile):
+
+ self.passwords = self.getPassword().splitlines()
+
+ # API not working anymore
+ self.urls = self.decryptHTML()
+ if not self.urls:
+ self.fail('Could not extract any links')
+
+ def decryptAPI(self):
+
+ get_dict = {"t": "link", "url": self.pyfile.url, "lcache": "1"}
+ self.html = self.load('http://linkdecrypter.com/api', get=get_dict)
+ if self.html.startswith('http://'):
+ return self.html.splitlines()
+
+ if self.html == 'INTERRUPTION(PASSWORD)':
+ for get_dict['pass'] in self.passwords:
+ self.html = self.load('http://linkdecrypter.com/api', get=get_dict)
+ if self.html.startswith('http://'):
+ return self.html.splitlines()
+
+ self.logError('API', self.html)
+ if self.html == 'INTERRUPTION(PASSWORD)':
+ self.fail("No or incorrect password")
+
+ return None
+
+ def decryptHTML(self):
+
+ retries = 5
+
+ post_dict = {"link_cache": "on", "pro_links": self.pyfile.url, "modo_links": "text"}
+ self.html = self.load('http://linkdecrypter.com/', post=post_dict, cookies=True, decode=True)
+
+ while self.passwords or retries:
+ m = re.search(self.TEXTAREA_PATTERN, self.html, flags=re.DOTALL)
+ if m:
+ return [x for x in m.group(1).splitlines() if '[LINK-ERROR]' not in x]
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_url = 'http://linkdecrypter.com/' + m.group(1)
+ result_type = "positional" if "getPos" in m.group(2) else "textual"
+
+ m = re.search(r"<p><i><b>([^<]+)</b></i></p>", self.html)
+ msg = m.group(1) if m else ""
+ self.logInfo("Captcha protected link", result_type, msg)
+
+ captcha = self.decryptCaptcha(captcha_url, result_type=result_type)
+ if result_type == "positional":
+ captcha = "%d|%d" % captcha
+ self.html = self.load('http://linkdecrypter.com/', post={"captcha": captcha}, decode=True)
+ retries -= 1
+
+ elif self.PASSWORD_PATTERN in self.html:
+ if self.passwords:
+ password = self.passwords.pop(0)
+ self.logInfo("Password protected link, trying " + password)
+ self.html = self.load('http://linkdecrypter.com/', post={'password': password}, decode=True)
+ else:
+ self.fail("No or incorrect password")
+
+ else:
+ retries -= 1
+ self.html = self.load('http://linkdecrypter.com/', cookies=True, decode=True)
+
+ return None
diff --git a/pyload/plugins/crypter/LixIn.py b/pyload/plugins/crypter/LixIn.py
new file mode 100644
index 000000000..1d812b0e3
--- /dev/null
+++ b/pyload/plugins/crypter/LixIn.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Crypter import Crypter
+
+
+class LixIn(Crypter):
+ __name__ = "LixIn"
+ __type__ = "crypter"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(www.)?lix.in/(?P<id>.*)'
+
+ __description__ = """Lix.in decrypter plugin"""
+ __author_name__ = "spoob"
+ __author_mail__ = "spoob@pyload.org"
+
+ CAPTCHA_PATTERN = r'<img src="(?P<image>captcha_img.php\?.*?)"'
+ SUBMIT_PATTERN = r"value='continue.*?'"
+ LINK_PATTERN = r'name="ifram" src="(?P<link>.*?)"'
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+
+ m = re.match(self.__pattern__, url)
+ if m is None:
+ self.fail("couldn't identify file id")
+
+ id = m.group("id")
+ self.logDebug("File id is %s" % id)
+
+ self.html = self.req.load(url, decode=True)
+
+ m = re.search(self.SUBMIT_PATTERN, self.html)
+ if m is None:
+ self.fail("link doesn't seem valid")
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ for _ in xrange(5):
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ self.logDebug("trying captcha")
+ captcharesult = self.decryptCaptcha("http://lix.in/" + m.group("image"))
+ self.html = self.req.load(url, decode=True,
+ post={"capt": captcharesult, "submit": "submit", "tiny": id})
+ else:
+ self.logDebug("no captcha/captcha solved")
+ else:
+ self.html = self.req.load(url, decode=True, post={"submit": "submit", "tiny": id})
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.fail("can't find destination url")
+ else:
+ self.urls = [m.group("link")]
+ self.logDebug("Found link %s, adding to package" % self.urls[0])
diff --git a/pyload/plugins/crypter/LofCc.py b/pyload/plugins/crypter/LofCc.py
new file mode 100644
index 000000000..6c91a55ec
--- /dev/null
+++ b/pyload/plugins/crypter/LofCc.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class LofCc(DeadCrypter):
+ __name__ = "LofCc"
+ __type__ = "crypter"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?lof.cc/(.*)'
+
+ __description__ = """Lof.cc decrypter plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
diff --git a/pyload/plugins/crypter/MBLinkInfo.py b/pyload/plugins/crypter/MBLinkInfo.py
new file mode 100644
index 000000000..8516ff6e4
--- /dev/null
+++ b/pyload/plugins/crypter/MBLinkInfo.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class MBLinkInfo(DeadCrypter):
+ __name__ = "MBLinkInfo"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?mblink\.info/?\?id=(\d+)'
+
+ __description__ = """MBLink.info decrypter plugin"""
+ __author_name__ = ("Gummibaer", "stickell")
+ __author_mail__ = ("Gummibaer@wiki-bierkiste.de", "l.stickell@yahoo.it")
diff --git a/pyload/plugins/crypter/MediafireComFolder.py b/pyload/plugins/crypter/MediafireComFolder.py
new file mode 100644
index 000000000..4ea904e89
--- /dev/null
+++ b/pyload/plugins/crypter/MediafireComFolder.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+from pyload.plugins.hoster.MediafireCom import checkHTMLHeader
+from pyload.common.json_layer import json_loads
+
+
+class MediafireComFolder(Crypter):
+ __name__ = "MediafireComFolder"
+ __type__ = "crypter"
+ __version__ = "0.14"
+
+ __pattern__ = r'http://(?:www\.)?mediafire\.com/(folder/|\?sharekey=|\?\w{13}($|[/#]))'
+
+ __description__ = """Mediafire.com folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FOLDER_KEY_PATTERN = r"var afI= '(\w+)';"
+ FILE_URL_PATTERN = r'<meta property="og:url" content="http://www.mediafire.com/\?(\w+)"/>'
+
+
+ def decrypt(self, pyfile):
+ url, result = checkHTMLHeader(pyfile.url)
+ self.logDebug('Location (%d): %s' % (result, url))
+
+ if result == 0:
+ # load and parse html
+ html = self.load(pyfile.url)
+ m = re.search(self.FILE_URL_PATTERN, html)
+ if m:
+ # file page
+ self.urls.append("http://www.mediafire.com/file/%s" % m.group(1))
+ else:
+ # folder page
+ m = re.search(self.FOLDER_KEY_PATTERN, html)
+ if m:
+ folder_key = m.group(1)
+ self.logDebug("FOLDER KEY: %s" % folder_key)
+
+ json_resp = json_loads(self.load(
+ "http://www.mediafire.com/api/folder/get_info.php?folder_key=%s&response_format=json&version=1" % folder_key))
+ #self.logInfo(json_resp)
+ if json_resp['response']['result'] == "Success":
+ for link in json_resp['response']['folder_info']['files']:
+ self.urls.append("http://www.mediafire.com/file/%s" % link['quickkey'])
+ else:
+ self.fail(json_resp['response']['message'])
+ elif result == 1:
+ self.offline()
+ else:
+ self.urls.append(url)
+
+ if not self.urls:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/Movie2kTo.py b/pyload/plugins/crypter/Movie2kTo.py
new file mode 100644
index 000000000..b6a554758
--- /dev/null
+++ b/pyload/plugins/crypter/Movie2kTo.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class Movie2kTo(DeadCrypter):
+ __name__ = "Movie2kTo"
+ __type__ = "crypter"
+ __version__ = "0.51"
+
+ __pattern__ = r'http://(?:www\.)?movie2k\.to/(.*)\.html'
+
+ __description__ = """Movie2k.to decrypter plugin"""
+ __author_name__ = "4Christopher"
+ __author_mail__ = "4Christopher@gmx.de"
diff --git a/pyload/plugins/crypter/MultiUpOrg.py b/pyload/plugins/crypter/MultiUpOrg.py
new file mode 100644
index 000000000..96553a09a
--- /dev/null
+++ b/pyload/plugins/crypter/MultiUpOrg.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+import re
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class MultiUpOrg(SimpleCrypter):
+ __name__ = "MultiUpOrg"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?multiup\.org/(en|fr)/(?P<TYPE>project|download|miror)/\w+(/\w+)?'
+
+ __description__ = """MultiUp.org crypter plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ TITLE_PATTERN = r'<title>.*(Project|Projet|ownload|élécharger) (?P<title>.+?) (\(|- )'
+
+
+ def getLinks(self):
+ m_type = re.match(self.__pattern__, self.pyfile.url).group("TYPE")
+
+ if m_type == "project":
+ pattern = r'\n(http://www\.multiup\.org/(?:en|fr)/download/.*)'
+ else:
+ pattern = r'style="width:97%;text-align:left".*\n.*href="(.*)"'
+ if m_type == "download":
+ dl_pattern = r'href="(.*)">.*\n.*<h5>DOWNLOAD</h5>'
+ miror_page = urljoin("http://www.multiup.org", re.search(dl_pattern, self.html).group(1))
+ self.html = self.load(miror_page)
+
+ return re.findall(pattern, self.html)
diff --git a/pyload/plugins/crypter/MultiloadCz.py b/pyload/plugins/crypter/MultiloadCz.py
new file mode 100644
index 000000000..be7950e98
--- /dev/null
+++ b/pyload/plugins/crypter/MultiloadCz.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class MultiloadCz(Crypter):
+ __name__ = "MultiloadCz"
+ __type__ = "crypter"
+ __version__ = "0.4"
+
+ __pattern__ = r'http://(?:[^/]*\.)?multiload.cz/(stahnout|slozka)/.*'
+ __config__ = [("usedHoster", "str", "Prefered hoster list (bar-separated) ", ""),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
+
+ __description__ = """Multiload.cz decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FOLDER_PATTERN = r'<form action="" method="get"><textarea[^>]*>([^>]*)</textarea></form>'
+ LINK_PATTERN = r'<p class="manager-server"><strong>([^<]+)</strong></p><p class="manager-linky"><a href="([^"]+)">'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.match(self.__pattern__, pyfile.url).group(1) == "slozka":
+ m = re.search(self.FOLDER_PATTERN, self.html)
+ if m:
+ self.urls.extend(m.group(1).split())
+ else:
+ m = re.findall(self.LINK_PATTERN, self.html)
+ if m:
+ prefered_set = set(self.getConfig("usedHoster").split('|'))
+ self.urls.extend([x[1] for x in m if x[0] in prefered_set])
+
+ if not self.urls:
+ ignored_set = set(self.getConfig("ignoredHoster").split('|'))
+ self.urls.extend([x[1] for x in m if x[0] not in ignored_set])
+
+ if not self.urls:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/MultiuploadCom.py b/pyload/plugins/crypter/MultiuploadCom.py
new file mode 100644
index 000000000..5aa77e5f5
--- /dev/null
+++ b/pyload/plugins/crypter/MultiuploadCom.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import time
+
+from pyload.plugins.Crypter import Crypter
+from pyload.common.json_layer import json_loads
+
+
+class MultiuploadCom(Crypter):
+ __name__ = "MultiuploadCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?multiupload.com/(\w+)'
+ __config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "multiupload"),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
+
+ __description__ = """MultiUpload.com decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ ML_LINK_PATTERN = r'<div id="downloadbutton_" style=""><a href="([^"]+)"'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+ m = re.search(self.ML_LINK_PATTERN, self.html)
+ ml_url = m.group(1) if m else None
+
+ json_list = json_loads(self.load("http://multiupload.com/progress/", get={
+ "d": re.match(self.__pattern__, pyfile.url).group(1),
+ "r": str(int(time() * 1000))
+ }))
+
+ prefered_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("preferedHoster").split('|')))
+
+ if ml_url and 'multiupload' in prefered_set:
+ self.urls.append(ml_url)
+
+ for link in json_list:
+ if link['service'].lower() in prefered_set and int(link['status']) and not int(link['deleted']):
+ url = self.getLocation(link['url'])
+ if url:
+ self.urls.append(url)
+
+ if not self.urls:
+ ignored_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("ignoredHoster").split('|')))
+
+ if 'multiupload' not in ignored_set:
+ self.urls.append(ml_url)
+
+ for link in json_list:
+ if link['service'].lower() not in ignored_set and int(link['status']) and not int(link['deleted']):
+ url = self.getLocation(link['url'])
+ if url:
+ self.urls.append(url)
+
+ if not self.urls:
+ self.fail('Could not extract any links')
+
+ def getLocation(self, url):
+ header = self.load(url, just_header=True)
+ return header['location'] if "location" in header else None
diff --git a/pyload/plugins/crypter/NCryptIn.py b/pyload/plugins/crypter/NCryptIn.py
new file mode 100644
index 000000000..70c541d02
--- /dev/null
+++ b/pyload/plugins/crypter/NCryptIn.py
@@ -0,0 +1,303 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+
+from pyload.plugins.Crypter import Crypter
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+class NCryptIn(Crypter):
+ __name__ = "NCryptIn"
+ __type__ = "crypter"
+ __version__ = "1.32"
+
+ __pattern__ = r'http://(?:www\.)?ncrypt.in/(?P<type>folder|link|frame)-([^/\?]+)'
+
+ __description__ = """NCrypt.in decrypter plugin"""
+ __author_name__ = ("fragonib", "stickell")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es", "l.stickell@yahoo.it")
+
+ JK_KEY = "jk"
+ CRYPTED_KEY = "crypted"
+
+ NAME_PATTERN = r'<meta name="description" content="(?P<N>[^"]+)"'
+
+
+ def setup(self):
+ self.package = None
+ self.html = None
+ self.cleanedHtml = None
+ self.links_source_order = ["cnl2", "rsdf", "ccf", "dlc", "web"]
+ self.protection_type = None
+
+ def decrypt(self, pyfile):
+ # Init
+ self.package = pyfile.package()
+ package_links = []
+ package_name = self.package.name
+ folder_name = self.package.folder
+
+ # Deal with single links
+ if self.isSingleLink():
+ package_links.extend(self.handleSingleLink())
+
+ # Deal with folders
+ else:
+
+ # Request folder home
+ self.html = self.requestFolderHome()
+ self.cleanedHtml = self.removeHtmlCrap(self.html)
+ if not self.isOnline():
+ self.offline()
+
+ # Check for folder protection
+ if self.isProtected():
+ self.html = self.unlockProtection()
+ self.cleanedHtml = self.removeHtmlCrap(self.html)
+ self.handleErrors()
+
+ # Prepare package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ for link_source_type in self.links_source_order:
+ package_links.extend(self.handleLinkSource(link_source_type))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack and return links
+ if not package_links:
+ self.fail('Could not extract any links')
+ self.packages = [(package_name, package_links, folder_name)]
+
+ def isSingleLink(self):
+ link_type = re.match(self.__pattern__, self.pyfile.url).group('type')
+ return link_type in ("link", "frame")
+
+ def requestFolderHome(self):
+ return self.load(self.pyfile.url, decode=True)
+
+ def removeHtmlCrap(self, content):
+ patterns = (r'(type="hidden".*?(name=".*?")?.*?value=".*?")',
+ r'display:none;">(.*?)</(div|span)>',
+ r'<div\s+class="jdownloader"(.*?)</div>',
+ r'<table class="global">(.*?)</table>',
+ r'<iframe\s+style="display:none(.*?)</iframe>')
+ for pattern in patterns:
+ rexpr = re.compile(pattern, re.DOTALL)
+ content = re.sub(rexpr, "", content)
+ return content
+
+ def isOnline(self):
+ if "Your folder does not exist" in self.cleanedHtml:
+ self.logDebug("File not m")
+ return False
+ return True
+
+ def isProtected(self):
+ form = re.search(r'<form.*?name.*?protected.*?>(.*?)</form>', self.cleanedHtml, re.DOTALL)
+ if form is not None:
+ content = form.group(1)
+ for keyword in ("password", "captcha"):
+ if keyword in content:
+ self.protection_type = keyword
+ self.logDebug("Links are %s protected" % self.protection_type)
+ return True
+ return False
+
+ def getPackageInfo(self):
+ m = re.search(self.NAME_PATTERN, self.html)
+ if m:
+ name = folder = m.group('N').strip()
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+ else:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not m, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+ return name, folder
+
+ def unlockProtection(self):
+
+ postData = {}
+
+ form = re.search(r'<form name="protected"(.*?)</form>', self.cleanedHtml, re.DOTALL).group(1)
+
+ # Submit package password
+ if "password" in form:
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ postData['password'] = password
+
+ # Resolve anicaptcha
+ if "anicaptcha" in form:
+ self.logDebug("Captcha protected")
+ captchaUri = re.search(r'src="(/temp/anicaptcha/[^"]+)', form).group(1)
+ captcha = self.decryptCaptcha("http://ncrypt.in" + captchaUri)
+ self.logDebug("Captcha resolved [%s]" % captcha)
+ postData['captcha'] = captcha
+
+ # Resolve recaptcha
+ if "recaptcha" in form:
+ self.logDebug("ReCaptcha protected")
+ captcha_key = re.search(r'\?k=(.*?)"', form).group(1)
+ self.logDebug("Resolving ReCaptcha with key [%s]" % captcha_key)
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(captcha_key)
+ postData['recaptcha_challenge_field'] = challenge
+ postData['recaptcha_response_field'] = code
+
+ # Resolve circlecaptcha
+ if "circlecaptcha" in form:
+ self.logDebug("CircleCaptcha protected")
+ captcha_img_url = "http://ncrypt.in/classes/captcha/circlecaptcha.php"
+ coords = self.decryptCaptcha(captcha_img_url, forceUser=True, imgtype="png", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+ postData['circle.x'] = coords[0]
+ postData['circle.y'] = coords[1]
+
+ # Unlock protection
+ postData['submit_protected'] = 'Continue to folder'
+ return self.load(self.pyfile.url, post=postData, decode=True)
+
+ def handleErrors(self):
+ if self.protection_type == "password":
+ if "This password is invalid!" in self.cleanedHtml:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.protection_type == "captcha":
+ if "The securitycheck was wrong!" in self.cleanedHtml:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleLinkSource(self, link_source_type):
+ # Check for JS engine
+ require_js_engine = link_source_type in ("cnl2", "rsdf", "ccf", "dlc")
+ if require_js_engine and not self.js:
+ self.logDebug("No JS engine available, skip %s links" % link_source_type)
+ return []
+
+ # Select suitable handler
+ if link_source_type == 'single':
+ return self.handleSingleLink()
+ if link_source_type == 'cnl2':
+ return self.handleCNL2()
+ elif link_source_type in ("rsdf", "ccf", "dlc"):
+ return self.handleContainer(link_source_type)
+ elif link_source_type == "web":
+ return self.handleWebLinks()
+ else:
+ self.fail('unknown source type "%s" (this is probably a bug)' % link_source_type)
+
+ def handleSingleLink(self):
+
+ self.logDebug("Handling Single link")
+ package_links = []
+
+ # Decrypt single link
+ decrypted_link = self.decryptLink(self.pyfile.url)
+ if decrypted_link:
+ package_links.append(decrypted_link)
+
+ return package_links
+
+ def handleCNL2(self):
+
+ self.logDebug("Handling CNL2 links")
+ package_links = []
+
+ if 'cnl2_output' in self.cleanedHtml:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+
+ return package_links
+
+ def handleContainers(self):
+
+ self.logDebug("Handling Container links")
+ package_links = []
+
+ pattern = r"/container/(rsdf|dlc|ccf)/([a-z0-9]+)"
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Container links" % len(containersLinks))
+ for containerLink in containersLinks:
+ link = "http://ncrypt.in/container/%s/%s.%s" % (containerLink[0], containerLink[1], containerLink[0])
+ package_links.append(link)
+
+ return package_links
+
+ def handleWebLinks(self):
+
+ self.logDebug("Handling Web links")
+ pattern = r"(http://ncrypt\.in/link-.*?=)"
+ links = re.findall(pattern, self.html)
+
+ package_links = []
+ self.logDebug("Decrypting %d Web links" % len(links))
+ for i, link in enumerate(links):
+ self.logDebug("Decrypting Web link %d, %s" % (i + 1, link))
+ decrypted_link = self.decrypt(link)
+ if decrypted_link:
+ package_links.append(decrypted_link)
+
+ return package_links
+
+ def decryptLink(self, link):
+ try:
+ url = link.replace("link-", "frame-")
+ link = self.load(url, just_header=True)['location']
+ return link
+ except Exception, detail:
+ self.logDebug("Error decrypting link %s, %s" % (link, detail))
+
+ def _getCipherParams(self):
+
+ pattern = r'<input.*?name="%s".*?value="(.*?)"'
+
+ # Get jk
+ jk_re = pattern % NCryptIn.JK_KEY
+ vjk = re.findall(jk_re, self.html)
+
+ # Get crypted
+ crypted_re = pattern % NCryptIn.CRYPTED_KEY
+ vcrypted = re.findall(crypted_re, self.html)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Block has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/NetfolderIn.py b/pyload/plugins/crypter/NetfolderIn.py
new file mode 100644
index 000000000..858755e5c
--- /dev/null
+++ b/pyload/plugins/crypter/NetfolderIn.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class NetfolderIn(SimpleCrypter):
+ __name__ = "NetfolderIn"
+ __type__ = "crypter"
+ __version__ = "0.6"
+
+ __pattern__ = r'http://(?:www\.)?netfolder.in/((?P<id1>\w+)/\w+|folder.php\?folder_id=(?P<id2>\w+))'
+
+ __description__ = """NetFolder.in decrypter plugin"""
+ __author_name__ = ("RaNaN", "fragonib")
+ __author_mail__ = ("RaNaN@pyload.org", "fragonib[AT]yahoo[DOT]es")
+
+ TITLE_PATTERN = r'<div class="Text">Inhalt des Ordners <span(.*)>(?P<title>.+)</span></div>'
+
+
+ def decrypt(self, pyfile):
+ # Request package
+ self.html = self.load(pyfile.url)
+
+ # Check for password protection
+ if self.isPasswordProtected():
+ self.html = self.submitPassword()
+ if not self.html:
+ self.fail("Incorrect password, please set right password on Add package form and retry")
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageNameAndFolder()
+
+ # Get package links
+ package_links = self.getLinks()
+
+ # Set package
+ self.packages = [(package_name, package_links, folder_name)]
+
+ def isPasswordProtected(self):
+ if '<input type="password" name="password"' in self.html:
+ self.logDebug("Links are password protected")
+ return True
+ return False
+
+ def submitPassword(self):
+ # Gather data
+ try:
+ m = re.match(self.__pattern__, self.pyfile.url)
+ id = max(m.group('id1'), m.group('id2'))
+ except AttributeError:
+ self.logDebug("Unable to get package id from url [%s]" % self.pyfile.url)
+ return
+ url = "http://netfolder.in/folder.php?folder_id=" + id
+ password = self.getPassword()
+
+ # Submit package password
+ post = {'password': password, 'save': 'Absenden'}
+ self.logDebug("Submitting password [%s] for protected links with id [%s]" % (password, id))
+ html = self.load(url, {}, post)
+
+ # Check for invalid password
+ if '<div class="InPage_Error">' in html:
+ self.logDebug("Incorrect password, please set right password on Edit package form and retry")
+ return None
+
+ return html
+
+ def getLinks(self):
+ links = re.search(r'name="list" value="(.*?)"', self.html).group(1).split(",")
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/NosvideoCom.py b/pyload/plugins/crypter/NosvideoCom.py
new file mode 100644
index 000000000..e1c9e2c55
--- /dev/null
+++ b/pyload/plugins/crypter/NosvideoCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class NosvideoCom(SimpleCrypter):
+ __name__ = "NosvideoCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?nosvideo\.com/\?v=\w+'
+
+ __description__ = """Nosvideo.com decrypter plugin"""
+ __author_name__ = "igel"
+ __author_mail__ = "igelkun@myopera.com"
+
+ LINK_PATTERN = r'href="(http://(?:w{3}\.)?nosupload.com/\?d=\w+)"'
+ TITLE_PATTERN = r'<[tT]itle>Watch (?P<title>.+)</[tT]itle>'
diff --git a/pyload/plugins/crypter/OneKhDe.py b/pyload/plugins/crypter/OneKhDe.py
new file mode 100644
index 000000000..320eaf6c6
--- /dev/null
+++ b/pyload/plugins/crypter/OneKhDe.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.unescape import unescape
+from pyload.plugins.Crypter import Crypter
+
+
+class OneKhDe(Crypter):
+ __name__ = "OneKhDe"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?1kh.de/f/'
+
+ __description__ = """1kh.de decrypter plugin"""
+ __author_name__ = "spoob"
+ __author_mail__ = "spoob@pyload.org"
+
+
+ def __init__(self, parent):
+ Crypter.__init__(self, parent)
+ self.parent = parent
+ self.html = None
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ return True
+
+ def proceed(self, url, location):
+ url = self.parent.url
+ self.html = self.req.load(url)
+ link_ids = re.findall(r"<a id=\"DownloadLink_(\d*)\" href=\"http://1kh.de/", self.html)
+ for id in link_ids:
+ new_link = unescape(
+ re.search("width=\"100%\" src=\"(.*)\"></iframe>", self.req.load("http://1kh.de/l/" + id)).group(1))
+ self.urls.append(new_link)
diff --git a/pyload/plugins/crypter/OronComFolder.py b/pyload/plugins/crypter/OronComFolder.py
new file mode 100644
index 000000000..9b5fb3959
--- /dev/null
+++ b/pyload/plugins/crypter/OronComFolder.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class OronComFolder(DeadCrypter):
+ __name__ = "OronComFolder"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?oron.com/folder/\w+'
+
+ __description__ = """Oron.com folder decrypter plugin"""
+ __author_name__ = "DHMH"
+ __author_mail__ = "webmaster@pcProfil.de"
diff --git a/pyload/plugins/crypter/PastebinCom.py b/pyload/plugins/crypter/PastebinCom.py
new file mode 100644
index 000000000..8e394ac3a
--- /dev/null
+++ b/pyload/plugins/crypter/PastebinCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class PastebinCom(SimpleCrypter):
+ __name__ = "PastebinCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?pastebin\.com/\w+'
+
+ __description__ = """Pastebin.com decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ LINK_PATTERN = r'<div class="de\d+">(https?://[^ <]+)(?:[^<]*)</div>'
+ TITLE_PATTERN = r'<div class="paste_box_line1" title="(?P<title>[^"]+)">'
diff --git a/pyload/plugins/crypter/QuickshareCzFolder.py b/pyload/plugins/crypter/QuickshareCzFolder.py
new file mode 100644
index 000000000..5d99cbffd
--- /dev/null
+++ b/pyload/plugins/crypter/QuickshareCzFolder.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class QuickshareCzFolder(Crypter):
+ __name__ = "QuickshareCzFolder"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?quickshare.cz/slozka-\d+.*'
+
+ __description__ = """Quickshare.cz folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FOLDER_PATTERN = r'<textarea[^>]*>(.*?)</textarea>'
+ LINK_PATTERN = r'(http://www.quickshare.cz/\S+)'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ m = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if m is None:
+ self.fail("Parse error (FOLDER)")
+ self.urls.extend(re.findall(self.LINK_PATTERN, m.group(1)))
+
+ if not self.urls:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/RSLayerCom.py b/pyload/plugins/crypter/RSLayerCom.py
new file mode 100644
index 000000000..ded550a50
--- /dev/null
+++ b/pyload/plugins/crypter/RSLayerCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class RSLayerCom(DeadCrypter):
+ __name__ = "RSLayerCom"
+ __type__ = "crypter"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?rs-layer.com/directory-'
+
+ __description__ = """RS-Layer.com decrypter plugin"""
+ __author_name__ = "hzpz"
+ __author_mail__ = None
diff --git a/pyload/plugins/crypter/RelinkUs.py b/pyload/plugins/crypter/RelinkUs.py
new file mode 100644
index 000000000..74228d41a
--- /dev/null
+++ b/pyload/plugins/crypter/RelinkUs.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+import os
+
+from Crypto.Cipher import AES
+from pyload.plugins.Crypter import Crypter
+
+
+class RelinkUs(Crypter):
+ __name__ = "RelinkUs"
+ __type__ = "crypter"
+ __version__ = "3.0"
+
+ __pattern__ = r'http://(?:www\.)?relink.us/(f/|((view|go).php\?id=))(?P<id>.+)'
+
+ __description__ = """Relink.us decrypter plugin"""
+ __author_name__ = "fragonib"
+ __author_mail__ = "fragonib[AT]yahoo[DOT]es"
+
+ # Constants
+ PREFERRED_LINK_SOURCES = ["cnl2", "dlc", "web"]
+
+ OFFLINE_TOKEN = r'<title>Tattooside'
+ PASSWORD_TOKEN = r'container_password\.php'
+ PASSWORD_ERROR_ROKEN = r'You have entered an incorrect password'
+ PASSWORD_SUBMIT_URL = r'http://www\.relink\.us/container_password\.php'
+ CAPTCHA_TOKEN = r'container_captcha\.php'
+ CAPTCHA_ERROR_ROKEN = r'You have solved the captcha wrong'
+ CAPTCHA_IMG_URL = r'http://www\.relink\.us/core/captcha/circlecaptcha\.php'
+ CAPTCHA_SUBMIT_URL = r'http://www\.relink\.us/container_captcha\.php'
+ FILE_TITLE_REGEX = r'<th>Title</th><td><i>(.*)</i></td></tr>'
+ FILE_NOTITLE = r'No title'
+
+ CNL2_FORM_REGEX = r'<form id="cnl_form-(.*?)</form>'
+ CNL2_FORMINPUT_REGEX = r'<input.*?name="%s".*?value="(.*?)"'
+ CNL2_JK_KEY = "jk"
+ CNL2_CRYPTED_KEY = "crypted"
+ DLC_LINK_REGEX = r'<a href=".*?" class="dlc_button" target="_blank">'
+ DLC_DOWNLOAD_URL = r'http://www\.relink\.us/download\.php'
+ WEB_FORWARD_REGEX = r"getFile\('(?P<link>.+)'\)"
+ WEB_FORWARD_URL = r'http://www\.relink\.us/frame\.php'
+ WEB_LINK_REGEX = r'<iframe name="Container" height="100%" frameborder="no" width="100%" src="(?P<link>.+)"></iframe>'
+
+
+ def setup(self):
+ self.fileid = None
+ self.package = None
+ self.password = None
+ self.html = None
+ self.captcha = False
+
+ def decrypt(self, pyfile):
+ # Init
+ self.initPackage(pyfile)
+
+ # Request package
+ self.requestPackage()
+
+ # Check for online
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ for sources in self.PREFERRED_LINK_SOURCES:
+ package_links.extend(self.handleLinkSource(sources))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
+
+ def initPackage(self, pyfile):
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+ self.package = pyfile.package()
+ self.password = self.getPassword()
+
+ def requestPackage(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ def isOnline(self):
+ if self.OFFLINE_TOKEN in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isPasswordProtected(self):
+ if self.PASSWORD_TOKEN in self.html:
+ self.logDebug("Links are password protected")
+ return True
+
+ def isCaptchaProtected(self):
+ if self.CAPTCHA_TOKEN in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+ def unlockPasswordProtection(self):
+ self.logDebug("Submitting password [%s] for protected links" % self.password)
+ passwd_url = self.PASSWORD_SUBMIT_URL + "?id=%s" % self.fileid
+ passwd_data = {'id': self.fileid, 'password': self.password, 'pw': 'submit'}
+ self.html = self.load(passwd_url, post=passwd_data, decode=True)
+
+ def unlockCaptchaProtection(self):
+ self.logDebug("Request user positional captcha resolving")
+ captcha_img_url = self.CAPTCHA_IMG_URL + "?id=%s" % self.fileid
+ coords = self.decryptCaptcha(captcha_img_url, forceUser=True, imgtype="png", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+ captcha_post_url = self.CAPTCHA_SUBMIT_URL + "?id=%s" % self.fileid
+ captcha_post_data = {'button.x': coords[0], 'button.y': coords[1], 'captcha': 'submit'}
+ self.html = self.load(captcha_post_url, post=captcha_post_data, decode=True)
+
+ def getPackageInfo(self):
+ name = folder = None
+
+ # Try to get info from web
+ m = re.search(self.FILE_TITLE_REGEX, self.html)
+ if m is not None:
+ title = m.group(1).strip()
+ if not self.FILE_NOTITLE in title:
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+
+ # Fallback to defaults
+ if not name or not folder:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+
+ # Return package info
+ return name, folder
+
+ def handleErrors(self):
+ if self.PASSWORD_ERROR_ROKEN in self.html:
+ msg = "Incorrect password, please set right password on 'Edit package' form and retry"
+ self.logDebug(msg)
+ self.fail(msg)
+
+ if self.captcha:
+ if self.CAPTCHA_ERROR_ROKEN in self.html:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleLinkSource(self, source):
+ if source == 'cnl2':
+ return self.handleCNL2Links()
+ elif source == 'dlc':
+ return self.handleDLCLinks()
+ elif source == 'web':
+ return self.handleWEBLinks()
+ else:
+ self.fail('Unknown source [%s] (this is probably a bug)' % source)
+
+ def handleCNL2Links(self):
+ self.logDebug("Search for CNL2 links")
+ package_links = []
+ m = re.search(self.CNL2_FORM_REGEX, self.html, re.DOTALL)
+ if m is not None:
+ cnl2_form = m.group(1)
+ try:
+ (vcrypted, vjk) = self._getCipherParams(cnl2_form)
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.logDebug("Unable to decrypt CNL2 links")
+ return package_links
+
+ def handleDLCLinks(self):
+ self.logDebug('Search for DLC links')
+ package_links = []
+ m = re.search(self.DLC_LINK_REGEX, self.html)
+ if m is not None:
+ container_url = self.DLC_DOWNLOAD_URL + "?id=%s&dlc=1" % self.fileid
+ self.logDebug("Downloading DLC container link [%s]" % container_url)
+ try:
+ dlc = self.load(container_url)
+ dlc_filename = self.fileid + ".dlc"
+ dlc_filepath = os.path.join(self.config['general']['download_folder'], dlc_filename)
+ f = open(dlc_filepath, "wb")
+ f.write(dlc)
+ f.close()
+ package_links.append(dlc_filepath)
+ except:
+ self.logDebug("Unable to download DLC container")
+ return package_links
+
+ def handleWEBLinks(self):
+ self.logDebug("Search for WEB links")
+ package_links = []
+ fw_params = re.findall(self.WEB_FORWARD_REGEX, self.html)
+ self.logDebug("Decrypting %d Web links" % len(fw_params))
+ for index, fw_param in enumerate(fw_params):
+ try:
+ fw_url = self.WEB_FORWARD_URL + "?%s" % fw_param
+ self.logDebug("Decrypting Web link %d, %s" % (index + 1, fw_url))
+ fw_response = self.load(fw_url, decode=True)
+ dl_link = re.search(self.WEB_LINK_REGEX, fw_response).group('link')
+ package_links.append(dl_link)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (index, detail))
+ self.setWait(4)
+ self.wait()
+ return package_links
+
+ def _getCipherParams(self, cnl2_form):
+ # Get jk
+ jk_re = self.CNL2_FORMINPUT_REGEX % self.CNL2_JK_KEY
+ vjk = re.findall(jk_re, cnl2_form, re.IGNORECASE)
+
+ # Get crypted
+ crypted_re = self.CNL2_FORMINPUT_REGEX % RelinkUs.CNL2_CRYPTED_KEY
+ vcrypted = re.findall(crypted_re, cnl2_form, re.IGNORECASE)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/SafelinkingNet.py b/pyload/plugins/crypter/SafelinkingNet.py
new file mode 100644
index 000000000..62dcc6021
--- /dev/null
+++ b/pyload/plugins/crypter/SafelinkingNet.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.lib.BeautifulSoup import BeautifulSoup
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Crypter import Crypter
+from pyload.plugins.internal.CaptchaService import SolveMedia
+
+
+class SafelinkingNet(Crypter):
+ __name__ = "SafelinkingNet"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:www\.)?safelinking.net/([pd])/\w+'
+
+ __description__ = """Safelinking.net decrypter plugin"""
+ __author_name__ = "quareevo"
+ __author_mail__ = "quareevo@arcor.de"
+
+ SOLVEMEDIA_PATTERN = "solvemediaApiKey = '([\w\.\-_]+)';"
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+ if re.match(self.__pattern__, url).group(1) == "d":
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.load(url)
+ m = re.search("^Location: (.+)$", self.req.http.header, re.MULTILINE)
+ if m:
+ self.urls = [m.group(1)]
+ else:
+ self.fail("Couldn't find forwarded Link")
+
+ else:
+ password = ""
+ postData = {"post-protect": "1"}
+
+ self.html = self.load(url)
+
+ if "link-password" in self.html:
+ password = pyfile.package().password
+ postData['link-password'] = password
+
+ if "altcaptcha" in self.html:
+ for _ in xrange(5):
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captchaKey = m.group(1)
+ captcha = SolveMedia(self)
+ captchaProvider = "Solvemedia"
+ else:
+ self.fail("Error parsing captcha")
+
+ challenge, response = captcha.challenge(captchaKey)
+ postData['adcopy_challenge'] = challenge
+ postData['adcopy_response'] = response
+
+ self.html = self.load(url, post=postData)
+ if "The password you entered was incorrect" in self.html:
+ self.fail("Incorrect Password")
+ if not "The CAPTCHA code you entered was wrong" in self.html:
+ break
+
+ pyfile.package().password = ""
+ soup = BeautifulSoup(self.html)
+ scripts = soup.findAll("script")
+ for s in scripts:
+ if "d_links" in s.text:
+ break
+ m = re.search('d_links":(\[.*?\])', s.text)
+ if m:
+ linkDict = json_loads(m.group(1))
+ for link in linkDict:
+ if not "http://" in link['full']:
+ self.urls.append("https://safelinking.net/d/" + link['full'])
+ else:
+ self.urls.append(link['full'])
diff --git a/pyload/plugins/crypter/SecuredIn.py b/pyload/plugins/crypter/SecuredIn.py
new file mode 100644
index 000000000..fc2667586
--- /dev/null
+++ b/pyload/plugins/crypter/SecuredIn.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class SecuredIn(DeadCrypter):
+ __name__ = "SecuredIn"
+ __type__ = "crypter"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?secured\.in/download-[\d]+-[\w]{8}\.html'
+
+ __description__ = """Secured.in decrypter plugin"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
diff --git a/pyload/plugins/crypter/SerienjunkiesOrg.py b/pyload/plugins/crypter/SerienjunkiesOrg.py
new file mode 100644
index 000000000..c7f7b1892
--- /dev/null
+++ b/pyload/plugins/crypter/SerienjunkiesOrg.py
@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+
+import random
+import re
+
+from time import sleep
+
+from pyload.lib.BeautifulSoup import BeautifulSoup
+
+from pyload.plugins.Crypter import Crypter
+from pyload.unescape import unescape
+
+
+class SerienjunkiesOrg(Crypter):
+ __name__ = "SerienjunkiesOrg"
+ __type__ = "crypter"
+ __version__ = "0.39"
+
+ __pattern__ = r'http://(?:www\.)?(serienjunkies.org|dokujunkies.org)/.*?'
+ __config__ = [("changeNameSJ", "Packagename;Show;Season;Format;Episode", "Take SJ.org name", "Show"),
+ ("changeNameDJ", "Packagename;Show;Format;Episode", "Take DJ.org name", "Show"),
+ ("randomPreferred", "bool", "Randomize Preferred-List", False),
+ ("hosterListMode", "OnlyOne;OnlyPreferred(One);OnlyPreferred(All);All",
+ "Use for hosters (if supported)", "All"),
+ ("hosterList", "str", "Preferred Hoster list (comma separated)",
+ "RapidshareCom,UploadedTo,NetloadIn,FilefactoryCom,FreakshareNet,FilebaseTo,HotfileCom,DepositfilesCom,EasyshareCom,KickloadCom"),
+ ("ignoreList", "str", "Ignored Hoster list (comma separated)", "MegauploadCom")]
+
+ __description__ = """Serienjunkies.org decrypter plugin"""
+ __author_name__ = ("mkaay", "godofdream")
+ __author_mail__ = ("mkaay@mkaay.de", "soilfiction@gmail.com")
+
+
+ def setup(self):
+ self.multiDL = False
+
+ def getSJSrc(self, url):
+ src = self.req.load(str(url))
+ if "This website is not available in your country" in src:
+ self.fail("Not available in your country")
+ if not src.find("Enter Serienjunkies") == -1:
+ sleep(1)
+ src = self.req.load(str(url))
+ return src
+
+ def handleShow(self, url):
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ packageName = self.pyfile.package().name
+ if self.getConfig("changeNameSJ") == "Show":
+ found = unescape(soup.find("h2").find("a").string.split(' &#8211;')[0])
+ if found:
+ packageName = found
+
+ nav = soup.find("div", attrs={"id": "scb"})
+
+ package_links = []
+ for a in nav.findAll("a"):
+ if self.getConfig("changeNameSJ") == "Show":
+ package_links.append(a['href'])
+ else:
+ package_links.append(a['href'] + "#hasName")
+ if self.getConfig("changeNameSJ") == "Show":
+ self.packages.append((packageName, package_links, packageName))
+ else:
+ self.core.files.addLinks(package_links, self.pyfile.package().id)
+
+ def handleSeason(self, url):
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ post = soup.find("div", attrs={"class": "post-content"})
+ ps = post.findAll("p")
+
+ seasonName = unescape(soup.find("a", attrs={"rel": "bookmark"}).string).replace("&#8211;", "-")
+ groups = {}
+ gid = -1
+ for p in ps:
+ if re.search("<strong>Sprache|<strong>Format", str(p)):
+ var = p.findAll("strong")
+ opts = {"Sprache": "", "Format": ""}
+ for v in var:
+ n = unescape(v.string).strip()
+ n = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', n)
+ if n.strip() not in opts:
+ continue
+ val = v.nextSibling
+ if not val:
+ continue
+ val = val.replace("|", "").strip()
+ val = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', val)
+ opts[n.strip()] = val.strip()
+ gid += 1
+ groups[gid] = {}
+ groups[gid]['ep'] = {}
+ groups[gid]['opts'] = opts
+ elif re.search("<strong>Download:", str(p)):
+ parts = str(p).split("<br />")
+ if re.search("<strong>", parts[0]):
+ ename = re.search('<strong>(.*?)</strong>', parts[0]).group(1).strip().decode("utf-8").replace(
+ "&#8211;", "-")
+ groups[gid]['ep'][ename] = {}
+ parts.remove(parts[0])
+ for part in parts:
+ hostername = re.search(r" \| ([-a-zA-Z0-9]+\.\w+)", part)
+ if hostername:
+ hostername = hostername.group(1)
+ groups[gid]['ep'][ename][hostername] = []
+ links = re.findall('href="(.*?)"', part)
+ for link in links:
+ groups[gid]['ep'][ename][hostername].append(link + "#hasName")
+
+ links = []
+ for g in groups.values():
+ for ename in g['ep']:
+ links.extend(self.getpreferred(g['ep'][ename]))
+ if self.getConfig("changeNameSJ") == "Episode":
+ self.packages.append((ename, links, ename))
+ links = []
+ package = "%s (%s, %s)" % (seasonName, g['opts']['Format'], g['opts']['Sprache'])
+ if self.getConfig("changeNameSJ") == "Format":
+ self.packages.append((package, links, package))
+ links = []
+ if (self.getConfig("changeNameSJ") == "Packagename") or re.search("#hasName", url):
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ elif (self.getConfig("changeNameSJ") == "Season") or not re.search("#hasName", url):
+ self.packages.append((seasonName, links, seasonName))
+
+ def handleEpisode(self, url):
+ src = self.getSJSrc(url)
+ if not src.find(
+ "Du hast das Download-Limit &uuml;berschritten! Bitte versuche es sp&auml;ter nocheinmal.") == -1:
+ self.fail(_("Downloadlimit reached"))
+ else:
+ soup = BeautifulSoup(src)
+ form = soup.find("form")
+ h1 = soup.find("h1")
+
+ if h1.get("class") == "wrap":
+ captchaTag = soup.find(attrs={"src": re.compile("^/secure/")})
+ if not captchaTag:
+ sleep(5)
+ self.retry()
+
+ captchaUrl = "http://download.serienjunkies.org" + captchaTag['src']
+ result = self.decryptCaptcha(str(captchaUrl), imgtype="png")
+ sinp = form.find(attrs={"name": "s"})
+
+ self.req.lastURL = str(url)
+ sj = self.load(str(url), post={'s': sinp['value'], 'c': result, 'action': "Download"})
+
+ soup = BeautifulSoup(sj)
+ rawLinks = soup.findAll(attrs={"action": re.compile("^http://download.serienjunkies.org/")})
+
+ if not len(rawLinks) > 0:
+ sleep(1)
+ self.retry()
+ return
+
+ self.correctCaptcha()
+
+ links = []
+ for link in rawLinks:
+ frameUrl = link['action'].replace("/go-", "/frame/go-")
+ links.append(self.handleFrame(frameUrl))
+ if re.search("#hasName", url) or ((self.getConfig("changeNameSJ") == "Packagename") and
+ (self.getConfig("changeNameDJ") == "Packagename")):
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ else:
+ if h1.text[2] == "_":
+ eName = h1.text[3:]
+ else:
+ eName = h1.text
+ self.packages.append((eName, links, eName))
+
+ def handleOldStyleLink(self, url):
+ sj = self.req.load(str(url))
+ soup = BeautifulSoup(sj)
+ form = soup.find("form", attrs={"action": re.compile("^http://serienjunkies.org")})
+ captchaTag = form.find(attrs={"src": re.compile("^/safe/secure/")})
+ captchaUrl = "http://serienjunkies.org" + captchaTag['src']
+ result = self.decryptCaptcha(str(captchaUrl))
+ url = form['action']
+ sinp = form.find(attrs={"name": "s"})
+
+ self.req.load(str(url), post={'s': sinp['value'], 'c': result, 'dl.start': "Download"}, cookies=False,
+ just_header=True)
+ decrypted = self.req.lastEffectiveURL
+ if decrypted == str(url):
+ self.retry()
+ self.core.files.addLinks([decrypted], self.pyfile.package().id)
+
+ def handleFrame(self, url):
+ self.req.load(str(url))
+ return self.req.lastEffectiveURL
+
+ def handleShowDJ(self, url):
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ post = soup.find("div", attrs={"id": "page_post"})
+ ps = post.findAll("p")
+ found = unescape(soup.find("h2").find("a").string.split(' &#8211;')[0])
+ if found:
+ seasonName = found
+
+ groups = {}
+ gid = -1
+ for p in ps:
+ if re.search("<strong>Sprache|<strong>Format", str(p)):
+ var = p.findAll("strong")
+ opts = {"Sprache": "", "Format": ""}
+ for v in var:
+ n = unescape(v.string).strip()
+ n = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', n)
+ if n.strip() not in opts:
+ continue
+ val = v.nextSibling
+ if not val:
+ continue
+ val = val.replace("|", "").strip()
+ val = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', val)
+ opts[n.strip()] = val.strip()
+ gid += 1
+ groups[gid] = {}
+ groups[gid]['ep'] = {}
+ groups[gid]['opts'] = opts
+ elif re.search("<strong>Download:", str(p)):
+ parts = str(p).split("<br />")
+ if re.search("<strong>", parts[0]):
+ ename = re.search('<strong>(.*?)</strong>', parts[0]).group(1).strip().decode("utf-8").replace(
+ "&#8211;", "-")
+ groups[gid]['ep'][ename] = {}
+ parts.remove(parts[0])
+ for part in parts:
+ hostername = re.search(r" \| ([-a-zA-Z0-9]+\.\w+)", part)
+ if hostername:
+ hostername = hostername.group(1)
+ groups[gid]['ep'][ename][hostername] = []
+ links = re.findall('href="(.*?)"', part)
+ for link in links:
+ groups[gid]['ep'][ename][hostername].append(link + "#hasName")
+
+ links = []
+ for g in groups.values():
+ for ename in g['ep']:
+ links.extend(self.getpreferred(g['ep'][ename]))
+ if self.getConfig("changeNameDJ") == "Episode":
+ self.packages.append((ename, links, ename))
+ links = []
+ package = "%s (%s, %s)" % (seasonName, g['opts']['Format'], g['opts']['Sprache'])
+ if self.getConfig("changeNameDJ") == "Format":
+ self.packages.append((package, links, package))
+ links = []
+ if (self.getConfig("changeNameDJ") == "Packagename") or re.search("#hasName", url):
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ elif (self.getConfig("changeNameDJ") == "Show") or not re.search("#hasName", url):
+ self.packages.append((seasonName, links, seasonName))
+
+ def handleCategoryDJ(self, url):
+ package_links = []
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ content = soup.find("div", attrs={"id": "content"})
+ for a in content.findAll("a", attrs={"rel": "bookmark"}):
+ package_links.append(a['href'])
+ self.core.files.addLinks(package_links, self.pyfile.package().id)
+
+ def decrypt(self, pyfile):
+ showPattern = re.compile("^http://serienjunkies.org/serie/(.*)/$")
+ seasonPattern = re.compile("^http://serienjunkies.org/.*?/(.*)/$")
+ episodePattern = re.compile("^http://download.serienjunkies.org/f-.*?.html(#hasName)?$")
+ oldStyleLink = re.compile("^http://serienjunkies.org/safe/(.*)$")
+ categoryPatternDJ = re.compile("^http://dokujunkies.org/.*?(.*)$")
+ showPatternDJ = re.compile(r"^http://dokujunkies.org/.*?/(.*)\.html(#hasName)?$")
+ framePattern = re.compile("^http://download.(serienjunkies.org|dokujunkies.org)/frame/go-.*?/$")
+ url = pyfile.url
+ if framePattern.match(url):
+ self.packages.append((pyfile.package().name, [self.handleFrame(url)], pyfile.package().name))
+ elif episodePattern.match(url):
+ self.handleEpisode(url)
+ elif oldStyleLink.match(url):
+ self.handleOldStyleLink(url)
+ elif showPattern.match(url):
+ self.handleShow(url)
+ elif showPatternDJ.match(url):
+ self.handleShowDJ(url)
+ elif seasonPattern.match(url):
+ self.handleSeason(url)
+ elif categoryPatternDJ.match(url):
+ self.handleCategoryDJ(url)
+
+ #selects the preferred hoster, after that selects any hoster (ignoring the one to ignore)
+ def getpreferred(self, hosterlist):
+
+ result = []
+ preferredList = self.getConfig("hosterList").strip().lower().replace(
+ '|', ',').replace('.', '').replace(';', ',').split(',')
+ if (self.getConfig("randomPreferred") is True) and (
+ self.getConfig("hosterListMode") in ["OnlyOne", "OnlyPreferred(One)"]):
+ random.shuffle(preferredList)
+ # we don't want hosters be read two times
+ hosterlist2 = hosterlist.copy()
+
+ for preferred in preferredList:
+ for Hoster in hosterlist:
+ if preferred == Hoster.lower().replace('.', ''):
+ for Part in hosterlist[Hoster]:
+ self.logDebug("selected " + Part)
+ result.append(str(Part))
+ del (hosterlist2[Hoster])
+ if self.getConfig("hosterListMode") in ["OnlyOne", "OnlyPreferred(One)"]:
+ return result
+
+ ignorelist = self.getConfig("ignoreList").strip().lower().replace(
+ '|', ',').replace('.', '').replace(';', ',').split(',')
+ if self.getConfig('hosterListMode') in ["OnlyOne", "All"]:
+ for Hoster in hosterlist2:
+ if Hoster.strip().lower().replace('.', '') not in ignorelist:
+ for Part in hosterlist2[Hoster]:
+ self.logDebug("selected2 " + Part)
+ result.append(str(Part))
+
+ if self.getConfig('hosterListMode') == "OnlyOne":
+ return result
+ return result
diff --git a/pyload/plugins/crypter/ShareLinksBiz.py b/pyload/plugins/crypter/ShareLinksBiz.py
new file mode 100644
index 000000000..132d2160b
--- /dev/null
+++ b/pyload/plugins/crypter/ShareLinksBiz.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+from pyload.plugins.Crypter import Crypter
+
+
+class ShareLinksBiz(Crypter):
+ __name__ = "ShareLinksBiz"
+ __type__ = "crypter"
+ __version__ = "1.13"
+
+ __pattern__ = r'http://(?:www\.)?(share-links|s2l)\.biz/(?P<ID>_?\w+)'
+
+ __description__ = """Share-Links.biz decrypter plugin"""
+ __author_name__ = "fragonib"
+ __author_mail__ = "fragonib[AT]yahoo[DOT]es"
+
+
+ def setup(self):
+ self.baseUrl = None
+ self.fileId = None
+ self.package = None
+ self.html = None
+ self.captcha = False
+
+ def decrypt(self, pyfile):
+ # Init
+ self.initFile(pyfile)
+
+ # Request package
+ url = self.baseUrl + '/' + self.fileId
+ self.html = self.load(url, decode=True)
+
+ # Unblock server (load all images)
+ self.unblockServer()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Extract package links
+ package_links = []
+ package_links.extend(self.handleWebLinks())
+ package_links.extend(self.handleContainers())
+ package_links.extend(self.handleCNL2())
+ package_links = set(package_links)
+
+ # Get package info
+ package_name, package_folder = self.getPackageInfo()
+
+ # Pack
+ self.packages = [(package_name, package_links, package_folder)]
+
+ def initFile(self, pyfile):
+ url = pyfile.url
+ if 's2l.biz' in url:
+ url = self.load(url, just_header=True)['location']
+ self.baseUrl = "http://www.%s.biz" % re.match(self.__pattern__, url).group(1)
+ self.fileId = re.match(self.__pattern__, url).group('ID')
+ self.package = pyfile.package()
+
+ def isOnline(self):
+ if "No usable content was found" in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isPasswordProtected(self):
+ if re.search(r'''<form.*?id="passwordForm".*?>''', self.html):
+ self.logDebug("Links are protected")
+ return True
+ return False
+
+ def isCaptchaProtected(self):
+ if '<map id="captchamap"' in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+ def unblockServer(self):
+ imgs = re.findall(r"(/template/images/.*?\.gif)", self.html)
+ for img in imgs:
+ self.load(self.baseUrl + img)
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ post = {"password": password, 'login': 'Submit form'}
+ url = self.baseUrl + '/' + self.fileId
+ self.html = self.load(url, post=post, decode=True)
+
+ def unlockCaptchaProtection(self):
+ # Get captcha map
+ captchaMap = self._getCaptchaMap()
+ self.logDebug("Captcha map with [%d] positions" % len(captchaMap.keys()))
+
+ # Request user for captcha coords
+ m = re.search(r'<img src="/captcha.gif\?d=(.*?)&amp;PHPSESSID=(.*?)&amp;legend=1"', self.html)
+ captchaUrl = self.baseUrl + '/captcha.gif?d=%s&PHPSESSID=%s' % (m.group(1), m.group(2))
+ self.logDebug("Waiting user for correct position")
+ coords = self.decryptCaptcha(captchaUrl, forceUser=True, imgtype="gif", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+
+ # Resolve captcha
+ href = self._resolveCoords(coords, captchaMap)
+ if href is None:
+ self.logDebug("Invalid captcha resolving, retrying")
+ self.invalidCaptcha()
+ self.setWait(5, False)
+ self.wait()
+ self.retry()
+ url = self.baseUrl + href
+ self.html = self.load(url, decode=True)
+
+ def _getCaptchaMap(self):
+ mapp = {}
+ for m in re.finditer(r'<area shape="rect" coords="(.*?)" href="(.*?)"', self.html):
+ rect = eval('(' + m.group(1) + ')')
+ href = m.group(2)
+ mapp[rect] = href
+ return mapp
+
+ def _resolveCoords(self, coords, captchaMap):
+ x, y = coords
+ for rect, href in captchaMap.items():
+ x1, y1, x2, y2 = rect
+ if (x >= x1 and x <= x2) and (y >= y1 and y <= y2):
+ return href
+
+ def handleErrors(self):
+ if "The inserted password was wrong" in self.html:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.captcha:
+ if "Your choice was wrong" in self.html:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.setWait(5)
+ self.wait()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def getPackageInfo(self):
+ name = folder = None
+
+ # Extract from web package header
+ title_re = r'<h2><img.*?/>(.*)</h2>'
+ m = re.search(title_re, self.html, re.DOTALL)
+ if m is not None:
+ title = m.group(1).strip()
+ if 'unnamed' not in title:
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+
+ # Fallback to defaults
+ if not name or not folder:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+
+ # Return package info
+ return name, folder
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Handling Web links")
+
+ #@TODO: Gather paginated web links
+ pattern = r"javascript:_get\('(.*?)', \d+, ''\)"
+ ids = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(ids))
+ for i, ID in enumerate(ids):
+ try:
+ self.logDebug("Decrypting Web link %d, [%s]" % (i + 1, ID))
+ dwLink = self.baseUrl + "/get/lnk/" + ID
+ response = self.load(dwLink)
+ code = re.search(r'frm/(\d+)', response).group(1)
+ fwLink = self.baseUrl + "/get/frm/" + code
+ response = self.load(fwLink)
+ jscode = re.search(r'<script language="javascript">\s*eval\((.*)\)\s*</script>', response,
+ re.DOTALL).group(1)
+ jscode = self.js.eval("f = %s" % jscode)
+ jslauncher = "window=''; parent={frames:{Main:{location:{href:''}}},location:''}; %s; parent.frames.Main.location.href"
+ dlLink = self.js.eval(jslauncher % jscode)
+ self.logDebug("JsEngine returns value [%s] for redirection link" % dlLink)
+ package_links.append(dlLink)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link [%s], %s" % (ID, detail))
+ return package_links
+
+ def handleContainers(self):
+ package_links = []
+ self.logDebug("Handling Container links")
+
+ pattern = r"javascript:_get\('(.*?)', 0, '(rsdf|ccf|dlc)'\)"
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Container links" % len(containersLinks))
+ for containerLink in containersLinks:
+ link = "%s/get/%s/%s" % (self.baseUrl, containerLink[1], containerLink[0])
+ package_links.append(link)
+ return package_links
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Handling CNL2 links")
+
+ if '/lib/cnl2/ClicknLoad.swf' in self.html:
+ try:
+ (crypted, jk) = self._getCipherParams()
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+ return package_links
+
+ def _getCipherParams(self):
+ # Request CNL2
+ code = re.search(r'ClicknLoad.swf\?code=(.*?)"', self.html).group(1)
+ url = "%s/get/cnl2/%s" % (self.baseUrl, code)
+ response = self.load(url)
+ params = response.split(";;")
+
+ # Get jk
+ strlist = list(base64.standard_b64decode(params[1]))
+ strlist.reverse()
+ jk = ''.join(strlist)
+
+ # Get crypted
+ strlist = list(base64.standard_b64decode(params[2]))
+ strlist.reverse()
+ crypted = ''.join(strlist)
+
+ # Log and return
+ return crypted, jk
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Block has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/ShareRapidComFolder.py b/pyload/plugins/crypter/ShareRapidComFolder.py
new file mode 100644
index 000000000..c8e95be1c
--- /dev/null
+++ b/pyload/plugins/crypter/ShareRapidComFolder.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class ShareRapidComFolder(SimpleCrypter):
+ __name__ = "ShareRapidComFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?((share(-?rapid\.(biz|com|cz|info|eu|net|org|pl|sk)|-(central|credit|free|net)\.cz|-ms\.net)|(s-?rapid|rapids)\.(cz|sk))|(e-stahuj|mediatack|premium-rapidshare|rapidshare-premium|qiuck)\.cz|kadzet\.com|stahuj-zdarma\.eu|strelci\.net|universal-share\.com)/(slozka/.+)'
+
+ __description__ = """Share-Rapid.com folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ LINK_PATTERN = r'<td class="soubor"[^>]*><a href="([^"]+)">'
diff --git a/pyload/plugins/crypter/SpeedLoadOrgFolder.py b/pyload/plugins/crypter/SpeedLoadOrgFolder.py
new file mode 100644
index 000000000..fff119a93
--- /dev/null
+++ b/pyload/plugins/crypter/SpeedLoadOrgFolder.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class SpeedLoadOrgFolder(DeadCrypter):
+ __name__ = "SpeedLoadOrgFolder"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?speedload\.org/(\d+~f$|folder/\d+/)'
+
+ __description__ = """Speedload decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
diff --git a/pyload/plugins/crypter/StealthTo.py b/pyload/plugins/crypter/StealthTo.py
new file mode 100644
index 000000000..24489a1b3
--- /dev/null
+++ b/pyload/plugins/crypter/StealthTo.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class StealthTo(DeadCrypter):
+ __name__ = "StealthTo"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?stealth\.to/folder/.+'
+
+ __description__ = """Stealth.to decrypter plugin"""
+ __author_name__ = "spoob"
+ __author_mail__ = "spoob@pyload.org"
diff --git a/pyload/plugins/crypter/TnyCz.py b/pyload/plugins/crypter/TnyCz.py
new file mode 100644
index 000000000..879941ba4
--- /dev/null
+++ b/pyload/plugins/crypter/TnyCz.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+import re
+
+
+class TnyCz(SimpleCrypter):
+ __name__ = "TnyCz"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?tny\.cz/\w+'
+
+ __description__ = """Tny.cz decrypter plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ TITLE_PATTERN = r'<title>(?P<title>.+) - .+</title>'
+
+
+ def getLinks(self):
+ m = re.search(r'<a id=\'save_paste\' href="(.+save\.php\?hash=.+)">', self.html)
+ return re.findall(".+", self.load(m.group(1), decode=True)) if m else None
diff --git a/pyload/plugins/crypter/TrailerzoneInfo.py b/pyload/plugins/crypter/TrailerzoneInfo.py
new file mode 100644
index 000000000..7be3beef0
--- /dev/null
+++ b/pyload/plugins/crypter/TrailerzoneInfo.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class TrailerzoneInfo(DeadCrypter):
+ __name__ = "TrailerzoneInfo"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?trailerzone.info/.*?'
+
+ __description__ = """TrailerZone.info decrypter plugin"""
+ __author_name__ = "godofdream"
+ __author_mail__ = "soilfiction@gmail.com"
diff --git a/pyload/plugins/crypter/TurbobitNetFolder.py b/pyload/plugins/crypter/TurbobitNetFolder.py
new file mode 100644
index 000000000..48b28c28a
--- /dev/null
+++ b/pyload/plugins/crypter/TurbobitNetFolder.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+from pyload.common.json_layer import json_loads
+
+
+class TurbobitNetFolder(SimpleCrypter):
+ __name__ = "TurbobitNetFolder"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?turbobit\.net/download/folder/(?P<ID>\w+)'
+
+ __description__ = """Turbobit.net folder decrypter plugin"""
+ __author_name__ = ("stickell", "Walter Purcaro")
+ __author_mail__ = ("l.stickell@yahoo.it", "vuolter@gmail.com")
+
+ TITLE_PATTERN = r"src='/js/lib/grid/icon/folder.png'> <span>(?P<title>.+?)</span>"
+
+
+ def _getLinks(self, id, page=1):
+ gridFile = self.load("http://turbobit.net/downloadfolder/gridFile",
+ get={"rootId": id, "rows": 200, "page": page}, decode=True)
+ grid = json_loads(gridFile)
+
+ if grid['rows']:
+ for i in grid['rows']:
+ yield i['id']
+ for id in self._getLinks(id, page + 1):
+ yield id
+ else:
+ return
+
+ def getLinks(self):
+ id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ fixurl = lambda id: "http://turbobit.net/%s.html" % id
+ return map(fixurl, self._getLinks(id))
diff --git a/pyload/plugins/crypter/TusfilesNetFolder.py b/pyload/plugins/crypter/TusfilesNetFolder.py
new file mode 100644
index 000000000..f4f1c7723
--- /dev/null
+++ b/pyload/plugins/crypter/TusfilesNetFolder.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import math
+import re
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class TusfilesNetFolder(SimpleCrypter):
+ __name__ = "TusfilesNetFolder"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?tusfiles\.net/go/(?P<ID>\w+)/?'
+
+ __description__ = """Tusfiles.net folder decrypter plugin"""
+ __author_name__ = ("Walter Purcaro", "stickell")
+ __author_mail__ = ("vuolter@gmail.com", "l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'<TD align=left><a href="(.*?)">'
+ TITLE_PATTERN = r'<Title>.*?\: (?P<title>.+) folder</Title>'
+ PAGES_PATTERN = r'>\((?P<pages>\d+) \w+\)<'
+
+ URL_REPLACEMENTS = [(__pattern__, r'https://www.tusfiles.net/go/\g<ID>/')]
+
+
+ def loadPage(self, page_n):
+ return self.load(urljoin(self.pyfile.url, str(page_n)), decode=True)
+
+ def handleMultiPages(self):
+ pages = re.search(self.PAGES_PATTERN, self.html)
+ if pages:
+ pages = int(math.ceil(int(pages.group('pages')) / 25.0))
+ else:
+ return
+
+ for p in xrange(2, pages + 1):
+ self.html = self.loadPage(p)
+ self.package_links += self.getLinks()
diff --git a/pyload/plugins/crypter/UlozToFolder.py b/pyload/plugins/crypter/UlozToFolder.py
new file mode 100644
index 000000000..2cc440a5d
--- /dev/null
+++ b/pyload/plugins/crypter/UlozToFolder.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.Crypter import Crypter
+
+
+class UlozToFolder(Crypter):
+ __name__ = "UlozToFolder"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj.cz|zachowajto.pl)/(m|soubory)/.*'
+
+ __description__ = """Uloz.to folder decrypter plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FOLDER_PATTERN = r'<ul class="profile_files">(.*?)</ul>'
+ LINK_PATTERN = r'<br /><a href="/([^"]+)">[^<]+</a>'
+ NEXT_PAGE_PATTERN = r'<a class="next " href="/([^"]+)">&nbsp;</a>'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ new_links = []
+ for i in xrange(1, 100):
+ self.logInfo("Fetching links from page %i" % i)
+ m = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if m is None:
+ self.fail("Parse error (FOLDER)")
+
+ new_links.extend(re.findall(self.LINK_PATTERN, m.group(1)))
+ m = re.search(self.NEXT_PAGE_PATTERN, html)
+ if m:
+ html = self.load("http://ulozto.net/" + m.group(1))
+ else:
+ break
+ else:
+ self.logInfo("Limit of 99 pages reached, aborting")
+
+ if new_links:
+ self.urls = [map(lambda s: "http://ulozto.net/%s" % s, new_links)]
+ else:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/UploadableChFolder.py b/pyload/plugins/crypter/UploadableChFolder.py
new file mode 100644
index 000000000..3be8b0167
--- /dev/null
+++ b/pyload/plugins/crypter/UploadableChFolder.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class UploadableChFolder(SimpleCrypter):
+ __name__ = "UploadableChFolder"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?uploadable\.ch/list/\w+'
+
+ __description__ = """ Uploadable.ch folder decrypter plugin """
+ __author_name__ = ("guidobelix", "Walter Purcaro")
+ __author_mail__ = ("guidobelix@hotmail.it", "vuolter@gmail.com")
+
+
+ LINK_PATTERN = r'"(.+?)" class="icon_zipfile">'
+ TITLE_PATTERN = r'<div class="folder"><span>&nbsp;</span>(?P<title>.+?)</div>'
+ OFFLINE_PATTERN = r'We are sorry... The URL you entered cannot be found on the server.'
+ TEMP_OFFLINE_PATTERN = r'<div class="icon_err">'
diff --git a/pyload/plugins/crypter/UploadedToFolder.py b/pyload/plugins/crypter/UploadedToFolder.py
new file mode 100644
index 000000000..31977409d
--- /dev/null
+++ b/pyload/plugins/crypter/UploadedToFolder.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class UploadedToFolder(SimpleCrypter):
+ __name__ = "UploadedToFolder"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?(uploaded|ul)\.(to|net)/(f|folder|list)/(?P<id>\w+)'
+
+ __description__ = """UploadedTo decrypter plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ PLAIN_PATTERN = r'<small class="date"><a href="(?P<plain>[\w/]+)" onclick='
+ TITLE_PATTERN = r'<title>(?P<title>[^<]+)</title>'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+
+ package_name, folder_name = self.getPackageNameAndFolder()
+
+ m = re.search(self.PLAIN_PATTERN, self.html)
+ if m:
+ plain_link = 'http://uploaded.net/' + m.group('plain')
+ else:
+ self.fail('Parse error - Unable to find plain url list')
+
+ self.html = self.load(plain_link)
+ package_links = self.html.split('\n')[:-1]
+ self.logDebug('Package has %d links' % len(package_links))
+
+ self.packages = [(package_name, package_links, folder_name)]
diff --git a/pyload/plugins/crypter/WiiReloadedOrg.py b/pyload/plugins/crypter/WiiReloadedOrg.py
new file mode 100644
index 000000000..7dfe574ab
--- /dev/null
+++ b/pyload/plugins/crypter/WiiReloadedOrg.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class WiiReloadedOrg(DeadCrypter):
+ __name__ = "WiiReloadedOrg"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?wii-reloaded\.org/protect/get\.php\?i=.+'
+
+ __description__ = """Wii-Reloaded.org decrypter plugin"""
+ __author_name__ = "hzpz"
+ __author_mail__ = None
diff --git a/pyload/plugins/crypter/XupPl.py b/pyload/plugins/crypter/XupPl.py
new file mode 100644
index 000000000..8d09e28a3
--- /dev/null
+++ b/pyload/plugins/crypter/XupPl.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Crypter import Crypter
+
+
+class XupPl(Crypter):
+ __name__ = "XupPl"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?xup\.pl/.*'
+
+ __description__ = """Xup.pl decrypter plugin"""
+ __author_name__ = "z00nx"
+ __author_mail__ = "z00nx0@gmail.com"
+
+
+ def decrypt(self, pyfile):
+ header = self.load(pyfile.url, just_header=True)
+ if 'location' in header:
+ self.urls = [header['location']]
+ else:
+ self.fail('Unable to find link')
diff --git a/pyload/plugins/crypter/YoutubeBatch.py b/pyload/plugins/crypter/YoutubeBatch.py
new file mode 100644
index 000000000..5b7cb6530
--- /dev/null
+++ b/pyload/plugins/crypter/YoutubeBatch.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Crypter import Crypter
+from pyload.utils import safe_join
+
+API_URL = "AIzaSyCKnWLNlkX-L4oD1aEzqqhRw1zczeD6_k0"
+
+
+class YoutubeBatch(Crypter):
+ __name__ = "YoutubeBatch"
+ __type__ = "crypter"
+ __version__ = "1.00"
+
+ __pattern__ = r'https?://(?:www\.|m\.)?youtube\.com/(?P<TYPE>user|playlist|view_play_list)(/|.*?[?&](?:list|p)=)(?P<ID>[\w-]+)'
+ __config__ = [("likes", "bool", "Grab user (channel) liked videos", False),
+ ("favorites", "bool", "Grab user (channel) favorite videos", False),
+ ("uploads", "bool", "Grab channel unplaylisted videos", True)]
+
+ __description__ = """Youtube.com channel & playlist decrypter plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+
+ def api_response(self, ref, req):
+ req.update({"key": API_KEY})
+ url = urljoin("https://www.googleapis.com/youtube/v3/", ref)
+ page = self.load(url, get=req)
+ return json_loads(page)
+
+ def getChannel(self, user):
+ channels = self.api_response("channels", {"part": "id,snippet,contentDetails", "forUsername": user, "maxResults": "50"})
+ if channels['items']:
+ channel = channels['items'][0]
+ return {"id": channel['id'],
+ "title": channel['snippet']['title'],
+ "relatedPlaylists": channel['contentDetails']['relatedPlaylists'],
+ "user": user} # One lone channel for user?
+
+ def getPlaylist(self, p_id):
+ playlists = self.api_response("playlists", {"part": "snippet", "id": p_id})
+ if playlists['items']:
+ playlist = playlists['items'][0]
+ return {"id": p_id,
+ "title": playlist['snippet']['title'],
+ "channelId": playlist['snippet']['channelId'],
+ "channelTitle": playlist['snippet']['channelTitle']}
+
+ def _getPlaylists(self, id, token=None):
+ req = {"part": "id", "maxResults": "50", "channelId": id}
+ if token:
+ req.update({"pageToken": token})
+
+ playlists = self.api_response("playlists", req)
+
+ for playlist in playlists['items']:
+ yield playlist['id']
+
+ if "nextPageToken" in playlists:
+ for item in self._getPlaylists(id, playlists['nextPageToken']):
+ yield item
+
+ def getPlaylists(self, ch_id):
+ return map(self.getPlaylist, self._getPlaylists(ch_id))
+
+ def _getVideosId(self, id, token=None):
+ req = {"part": "contentDetails", "maxResults": "50", "playlistId": id}
+ if token:
+ req.update({"pageToken": token})
+
+ playlist = self.api_response("playlistItems", req)
+
+ for item in playlist['items']:
+ yield item['contentDetails']['videoId']
+
+ if "nextPageToken" in playlist:
+ for item in self._getVideosId(id, playlist['nextPageToken']):
+ yield item
+
+ def getVideosId(self, p_id):
+ return list(self._getVideosId(p_id))
+
+ def decrypt(self, pyfile):
+ m = re.match(self.__pattern__, pyfile.url)
+ m_id = m.group("ID")
+ m_type = m.group("TYPE")
+
+ if m_type == "user":
+ self.logDebug("Url recognized as Channel")
+ user = m_id
+ channel = self.getChannel(user)
+
+ if channel:
+ playlists = self.getPlaylists(channel['id'])
+ self.logDebug("%s playlist\s found on channel \"%s\"" % (len(playlists), channel['title']))
+
+ relatedplaylist = {p_name: self.getPlaylist(p_id) for p_name, p_id in channel['relatedPlaylists'].iteritems()}
+ self.logDebug("Channel's related playlists found = %s" % relatedplaylist.keys())
+
+ relatedplaylist['uploads']['title'] = "Unplaylisted videos"
+ relatedplaylist['uploads']['checkDups'] = True #: checkDups flag
+
+ for p_name, p_data in relatedplaylist.iteritems():
+ if self.getConfig(p_name):
+ p_data['title'] += " of " + user
+ playlists.append(p_data)
+ else:
+ playlists = []
+ else:
+ self.logDebug("Url recognized as Playlist")
+ playlists = [self.getPlaylist(m_id)]
+
+ if not playlists:
+ self.fail("No playlist available")
+
+ addedvideos = []
+ urlize = lambda x: "https://www.youtube.com/watch?v=" + x
+ for p in playlists:
+ p_name = p['title']
+ p_videos = self.getVideosId(p['id'])
+ p_folder = safe_join(self.config['general']['download_folder'], p['channelTitle'], p_name)
+ self.logDebug("%s video\s found on playlist \"%s\"" % (len(p_videos), p_name))
+
+ if not p_videos:
+ continue
+ elif "checkDups" in p:
+ p_urls = [urlize(v_id) for v_id in p_videos if v_id not in addedvideos]
+ self.logDebug("%s video\s available on playlist \"%s\" after duplicates cleanup" % (len(p_urls), p_name))
+ else:
+ p_urls = map(urlize, p_videos)
+
+ self.packages.append((p_name, p_urls, p_folder)) #: folder is NOT recognized by pyload 0.4.9!
+
+ addedvideos.extend(p_videos)
diff --git a/pyload/plugins/crypter/__init__.py b/pyload/plugins/crypter/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/crypter/__init__.py
diff --git a/pyload/plugins/hooks/AlldebridCom.py b/pyload/plugins/hooks/AlldebridCom.py
new file mode 100644
index 000000000..8eade2941
--- /dev/null
+++ b/pyload/plugins/hooks/AlldebridCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class AlldebridCom(MultiHoster):
+ __name__ = "AlldebridCom"
+ __type__ = "hook"
+ __version__ = "0.13"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("https", "bool", "Enable HTTPS", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Alldebrid.com hook plugin"""
+ __author_name__ = "Andy Voigt"
+ __author_mail__ = "spamsales@online.de"
+
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://www.alldebrid.com/api.php?action=get_host").replace("\"", "").strip()
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/hooks/BypassCaptcha.py b/pyload/plugins/hooks/BypassCaptcha.py
new file mode 100644
index 000000000..9558ba4c4
--- /dev/null
+++ b/pyload/plugins/hooks/BypassCaptcha.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+from thread import start_new_thread
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.Hook import Hook
+
+
+class BypassCaptchaException(Exception):
+
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<BypassCaptchaException %s>" % self.err
+
+ def __repr__(self):
+ return "<BypassCaptchaException %s>" % self.err
+
+
+class BypassCaptcha(Hook):
+ __name__ = "BypassCaptcha"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force BC even if client is connected", False),
+ ("passkey", "password", "Passkey", "")]
+
+ __description__ = """Send captchas to BypassCaptcha.com"""
+ __author_name__ = ("RaNaN", "Godofdream", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "soilfcition@gmail.com", "zoidberg@mujmail.cz")
+
+ PYLOAD_KEY = "4f771155b640970d5607f919a615bdefc67e7d32"
+
+ SUBMIT_URL = "http://bypasscaptcha.com/upload.php"
+ RESPOND_URL = "http://bypasscaptcha.com/check_value.php"
+ GETCREDITS_URL = "http://bypasscaptcha.com/ex_left.php"
+
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.GETCREDITS_URL, post={"key": self.getConfig("passkey")})
+
+ data = dict([x.split(' ', 1) for x in response.splitlines()])
+ return int(data['Left'])
+
+ def submit(self, captcha, captchaType="file", match=None):
+ req = getRequest()
+
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ response = req.load(self.SUBMIT_URL,
+ post={"vendor_key": self.PYLOAD_KEY,
+ "key": self.getConfig("passkey"),
+ "gen_task_id": "1",
+ "file": (FORM_FILE, captcha)},
+ multipart=True)
+ finally:
+ req.close()
+
+ data = dict([x.split(' ', 1) for x in response.splitlines()])
+ if not data or "Value" not in data:
+ raise BypassCaptchaException(response)
+
+ result = data['Value']
+ ticket = data['TaskId']
+ self.logDebug("result %s : %s" % (ticket, result))
+
+ return ticket, result
+
+ def respond(self, ticket, success):
+ try:
+ response = getURL(self.RESPOND_URL, post={"task_id": ticket, "key": self.getConfig("passkey"),
+ "cv": 1 if success else 0})
+ except BadHeader, e:
+ self.logError("Could not send response.", str(e))
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo("Your %s account has not enough credits" % self.__name__)
+
+ def captchaCorrect(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ self.respond(task.data['ticket'], True)
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ self.respond(task.data['ticket'], False)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except BypassCaptchaException, e:
+ task.error = e.getCode()
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hooks/Captcha9kw.py b/pyload/plugins/hooks/Captcha9kw.py
new file mode 100644
index 000000000..fcb5dd7c1
--- /dev/null
+++ b/pyload/plugins/hooks/Captcha9kw.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import time
+
+from base64 import b64encode
+from thread import start_new_thread
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hook import Hook
+
+
+class Captcha9kw(Hook):
+ __name__ = "Captcha9kw"
+ __type__ = "hook"
+ __version__ = "0.09"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force CT even if client is connected", True),
+ ("https", "bool", "Enable HTTPS", False),
+ ("confirm", "bool", "Confirm Captcha (Cost +6)", False),
+ ("captchaperhour", "int", "Captcha per hour (max. 9999)", 9999),
+ ("prio", "int", "Prio 1-10 (Cost +1-10)", 0),
+ ("selfsolve", "bool",
+ "If enabled and you have a 9kw client active only you will get your captcha to solve it (Selfsolve)",
+ False),
+ ("timeout", "int", "Timeout (max. 300)", 300),
+ ("passkey", "password", "API key", "")]
+
+ __description__ = """Send captchas to 9kw.eu"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+ API_URL = "://www.9kw.eu/index.cgi"
+
+
+ def setup(self):
+ self.API_URL = "https" + self.API_URL if self.getConfig("https") else "http" + self.API_URL
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.API_URL, get={"apikey": self.getConfig("passkey"), "pyload": "1", "source": "pyload",
+ "action": "usercaptchaguthaben"})
+
+ if response.isdigit():
+ self.logInfo(_("%s credits left") % response)
+ self.info['credits'] = credits = int(response)
+ return credits
+ else:
+ self.logError(response)
+ return 0
+
+ def processCaptcha(self, task):
+ result = None
+
+ with open(task.captchaFile, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+ self.logDebug("%s : %s" % (task.captchaFile, data))
+ if task.isPositional():
+ mouse = 1
+ else:
+ mouse = 0
+
+ response = getURL(self.API_URL, post={
+ "apikey": self.getConfig("passkey"),
+ "prio": self.getConfig("prio"),
+ "confirm": self.getConfig("confirm"),
+ "captchaperhour": self.getConfig("captchaperhour"),
+ "maxtimeout": self.getConfig("timeout"),
+ "selfsolve": self.getConfig("selfsolve"),
+ "pyload": "1",
+ "source": "pyload",
+ "base64": "1",
+ "mouse": mouse,
+ "file-upload-01": data,
+ "action": "usercaptchaupload"})
+
+ if response.isdigit():
+ self.logInfo(_("New CaptchaID from upload: %s : %s") % (response, task.captchaFile))
+
+ for _ in xrange(1, 100, 1):
+ response2 = getURL(self.API_URL, get={"apikey": self.getConfig("passkey"), "id": response,
+ "pyload": "1", "source": "pyload",
+ "action": "usercaptchacorrectdata"})
+
+ if response2 != "":
+ break
+
+ time.sleep(3)
+
+ result = response2
+ task.data['ticket'] = response
+ self.logInfo("result %s : %s" % (response, result))
+ task.setResult(result)
+ else:
+ self.logError("Bad upload: %s" % response)
+ return False
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual() and not task.isPositional():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.setWaiting(self.getConfig("timeout"))
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logError(_("Your Captcha 9kw.eu Account has not enough credits"))
+
+ def captchaCorrect(self, task):
+ if "ticket" in task.data:
+
+ try:
+ response = getURL(self.API_URL,
+ post={"action": "usercaptchacorrectback",
+ "apikey": self.getConfig("passkey"),
+ "api_key": self.getConfig("passkey"),
+ "correct": "1",
+ "pyload": "1",
+ "source": "pyload",
+ "id": task.data['ticket']})
+ self.logInfo("Request correct: %s" % response)
+
+ except BadHeader, e:
+ self.logError("Could not send correct request.", str(e))
+ else:
+ self.logError("No CaptchaID for correct request (task %s) found." % task)
+
+ def captchaInvalid(self, task):
+ if "ticket" in task.data:
+
+ try:
+ response = getURL(self.API_URL,
+ post={"action": "usercaptchacorrectback",
+ "apikey": self.getConfig("passkey"),
+ "api_key": self.getConfig("passkey"),
+ "correct": "2",
+ "pyload": "1",
+ "source": "pyload",
+ "id": task.data['ticket']})
+ self.logInfo("Request refund: %s" % response)
+
+ except BadHeader, e:
+ self.logError("Could not send refund request.", str(e))
+ else:
+ self.logError("No CaptchaID for not correct request (task %s) found." % task)
diff --git a/pyload/plugins/hooks/CaptchaBrotherhood.py b/pyload/plugins/hooks/CaptchaBrotherhood.py
new file mode 100644
index 000000000..81325be92
--- /dev/null
+++ b/pyload/plugins/hooks/CaptchaBrotherhood.py
@@ -0,0 +1,157 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import StringIO
+import pycurl
+
+from PIL import Image
+from thread import start_new_thread
+from time import sleep
+from urllib import urlencode
+
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.Hook import Hook
+
+
+class CaptchaBrotherhoodException(Exception):
+
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<CaptchaBrotherhoodException %s>" % self.err
+
+ def __repr__(self):
+ return "<CaptchaBrotherhoodException %s>" % self.err
+
+
+class CaptchaBrotherhood(Hook):
+ __name__ = "CaptchaBrotherhood"
+ __type__ = "hook"
+ __version__ = "0.05"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Password", "")]
+
+ __description__ = """Send captchas to CaptchaBrotherhood.com"""
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ API_URL = "http://www.captchabrotherhood.com/"
+
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.API_URL + "askCredits.aspx",
+ get={"username": self.getConfig("username"), "password": self.getConfig("passkey")})
+ if not response.startswith("OK"):
+ raise CaptchaBrotherhoodException(response)
+ else:
+ credits = int(response[3:])
+ self.logInfo(_("%d credits left") % credits)
+ self.info['credits'] = credits
+ return credits
+
+ def submit(self, captcha, captchaType="file", match=None):
+ try:
+ img = Image.open(captcha)
+ output = StringIO.StringIO()
+ self.logDebug("CAPTCHA IMAGE", img, img.format, img.mode)
+ if img.format in ("GIF", "JPEG"):
+ img.save(output, img.format)
+ else:
+ if img.mode != "RGB":
+ img = img.convert("RGB")
+ img.save(output, "JPEG")
+ data = output.getvalue()
+ output.close()
+ except Exception, e:
+ raise CaptchaBrotherhoodException("Reading or converting captcha image failed: %s" % e)
+
+ req = getRequest()
+
+ url = "%ssendNewCaptcha.aspx?%s" % (self.API_URL,
+ urlencode({"username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "captchaSource": "pyLoad",
+ "timeout": "80"}))
+
+ req.c.setopt(pycurl.URL, url)
+ req.c.setopt(pycurl.POST, 1)
+ req.c.setopt(pycurl.POSTFIELDS, data)
+ req.c.setopt(pycurl.HTTPHEADER, ["Content-Type: text/html"])
+
+ try:
+ req.c.perform()
+ response = req.getResponse()
+ except Exception, e:
+ raise CaptchaBrotherhoodException("Submit captcha image failed")
+
+ req.close()
+
+ if not response.startswith("OK"):
+ raise CaptchaBrotherhoodException(response[1])
+
+ ticket = response[3:]
+
+ for _ in xrange(15):
+ sleep(5)
+ response = self.get_api("askCaptchaResult", ticket)
+ if response.startswith("OK-answered"):
+ return ticket, response[12:]
+
+ raise CaptchaBrotherhoodException("No solution received in time")
+
+ def get_api(self, api, ticket):
+ response = getURL("%s%s.aspx" % (self.API_URL, api),
+ get={"username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "captchaID": ticket})
+ if not response.startswith("OK"):
+ raise CaptchaBrotherhoodException("Unknown response: %s" % response)
+
+ return response
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 10:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+ else:
+ self.logInfo("Your CaptchaBrotherhood Account has not enough credits")
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ response = self.get_api("complainCaptcha", task.data['ticket'])
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except CaptchaBrotherhoodException, e:
+ task.error = e.getCode()
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hooks/Checksum.py b/pyload/plugins/hooks/Checksum.py
new file mode 100644
index 000000000..75ebcdc4c
--- /dev/null
+++ b/pyload/plugins/hooks/Checksum.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import hashlib
+import re
+import zlib
+
+from os import remove
+from os.path import getsize, isfile, splitext
+
+from pyload.plugins.Hook import Hook
+from pyload.utils import safe_join, fs_encode
+
+
+def computeChecksum(local_file, algorithm):
+ if algorithm in getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")):
+ h = getattr(hashlib, algorithm)()
+
+ with open(local_file, 'rb') as f:
+ for chunk in iter(lambda: f.read(128 * h.block_size), b''):
+ h.update(chunk)
+
+ return h.hexdigest()
+
+ elif algorithm in ("adler32", "crc32"):
+ hf = getattr(zlib, algorithm)
+ last = 0
+
+ with open(local_file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ last = hf(chunk, last)
+
+ return "%x" % last
+
+ else:
+ return None
+
+
+class Checksum(Hook):
+ __name__ = "Checksum"
+ __type__ = "hook"
+ __version__ = "0.13"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("check_checksum", "bool", "Check checksum? (If False only size will be verified)", True),
+ ("check_action", "fail;retry;nothing", "What to do if check fails?", "retry"),
+ ("max_tries", "int", "Number of retries", 2),
+ ("retry_action", "fail;nothing", "What to do if all retries fail?", "fail"),
+ ("wait_time", "int", "Time to wait before each retry (seconds)", 1)]
+
+ __description__ = """Verify downloaded file size and checksum"""
+ __author_name__ = ("zoidberg", "Walter Purcaro", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "vuolter@gmail.com", "l.stickell@yahoo.it")
+
+ methods = {'sfv': 'crc32', 'crc': 'crc32', 'hash': 'md5'}
+ regexps = {'sfv': r'^(?P<name>[^;].+)\s+(?P<hash>[0-9A-Fa-f]{8})$',
+ 'md5': r'^(?P<name>[0-9A-Fa-f]{32}) (?P<file>.+)$',
+ 'crc': r'filename=(?P<name>.+)\nsize=(?P<size>\d+)\ncrc32=(?P<hash>[0-9A-Fa-f]{8})$',
+ 'default': r'^(?P<hash>[0-9A-Fa-f]+)\s+\*?(?P<name>.+)$'}
+
+
+ def coreReady(self):
+ if not self.getConfig("check_checksum"):
+ self.logInfo("Checksum validation is disabled in plugin configuration")
+
+ def setup(self):
+ self.algorithms = sorted(
+ getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")), reverse=True)
+ self.algorithms.extend(["crc32", "adler32"])
+ self.formats = self.algorithms + ["sfv", "crc", "hash"]
+
+ def downloadFinished(self, pyfile):
+ """
+ Compute checksum for the downloaded file and compare it with the hash provided by the hoster.
+ pyfile.plugin.check_data should be a dictionary which can contain:
+ a) if known, the exact filesize in bytes (e.g. "size": 123456789)
+ b) hexadecimal hash string with algorithm name as key (e.g. "md5": "d76505d0869f9f928a17d42d66326307")
+ """
+ if hasattr(pyfile.plugin, "check_data") and (isinstance(pyfile.plugin.check_data, dict)):
+ data = pyfile.plugin.check_data.copy()
+ elif hasattr(pyfile.plugin, "api_data") and (isinstance(pyfile.plugin.api_data, dict)):
+ data = pyfile.plugin.api_data.copy()
+ else:
+ return
+
+ self.logDebug(data)
+
+ if not pyfile.plugin.lastDownload:
+ self.checkFailed(pyfile, None, "No file downloaded")
+
+ local_file = fs_encode(pyfile.plugin.lastDownload)
+ #download_folder = self.config['general']['download_folder']
+ #local_file = fs_encode(safe_join(download_folder, pyfile.package().folder, pyfile.name))
+
+ if not isfile(local_file):
+ self.checkFailed(pyfile, None, "File does not exist")
+
+ # validate file size
+ if "size" in data:
+ api_size = int(data['size'])
+ file_size = getsize(local_file)
+ if api_size != file_size:
+ self.logWarning("File %s has incorrect size: %d B (%d expected)" % (pyfile.name, file_size, api_size))
+ self.checkFailed(pyfile, local_file, "Incorrect file size")
+ del data['size']
+
+ # validate checksum
+ if data and self.getConfig("check_checksum"):
+ if "checksum" in data:
+ data['md5'] = data['checksum']
+
+ for key in self.algorithms:
+ if key in data:
+ checksum = computeChecksum(local_file, key.replace("-", "").lower())
+ if checksum:
+ if checksum == data[key].lower():
+ self.logInfo('File integrity of "%s" verified by %s checksum (%s).' %
+ (pyfile.name, key.upper(), checksum))
+ break
+ else:
+ self.logWarning("%s checksum for file %s does not match (%s != %s)" %
+ (key.upper(), pyfile.name, checksum, data[key]))
+ self.checkFailed(pyfile, local_file, "Checksums do not match")
+ else:
+ self.logWarning("Unsupported hashing algorithm: %s" % key.upper())
+ else:
+ self.logWarning("Unable to validate checksum for file %s" % pyfile.name)
+
+ def checkFailed(self, pyfile, local_file, msg):
+ check_action = self.getConfig("check_action")
+ if check_action == "retry":
+ max_tries = self.getConfig("max_tries")
+ retry_action = self.getConfig("retry_action")
+ if pyfile.plugin.retries < max_tries:
+ if local_file:
+ remove(local_file)
+ pyfile.plugin.retry(max_tries=max_tries, wait_time=self.getConfig("wait_time"), reason=msg)
+ elif retry_action == "nothing":
+ return
+ elif check_action == "nothing":
+ return
+ pyfile.plugin.fail(reason=msg)
+
+ def packageFinished(self, pypack):
+ download_folder = safe_join(self.config['general']['download_folder'], pypack.folder, "")
+
+ for link in pypack.getChildren().itervalues():
+ file_type = splitext(link['name'])[1][1:].lower()
+ #self.logDebug(link, file_type)
+
+ if file_type not in self.formats:
+ continue
+
+ hash_file = fs_encode(safe_join(download_folder, link['name']))
+ if not isfile(hash_file):
+ self.logWarning("File not found: %s" % link['name'])
+ continue
+
+ with open(hash_file) as f:
+ text = f.read()
+
+ for m in re.finditer(self.regexps.get(file_type, self.regexps['default']), text):
+ data = m.groupdict()
+ self.logDebug(link['name'], data)
+
+ local_file = fs_encode(safe_join(download_folder, data['name']))
+ algorithm = self.methods.get(file_type, file_type)
+ checksum = computeChecksum(local_file, algorithm)
+ if checksum == data['hash']:
+ self.logInfo('File integrity of "%s" verified by %s checksum (%s).' %
+ (data['name'], algorithm, checksum))
+ else:
+ self.logWarning("%s checksum for file %s does not match (%s != %s)" %
+ (algorithm, data['name'], checksum, data['hash']))
diff --git a/pyload/plugins/hooks/ClickAndLoad.py b/pyload/plugins/hooks/ClickAndLoad.py
new file mode 100644
index 000000000..47163ceef
--- /dev/null
+++ b/pyload/plugins/hooks/ClickAndLoad.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import socket
+import thread
+
+from pyload.plugins.Hook import Hook
+
+
+class ClickAndLoad(Hook):
+ __name__ = "ClickAndLoad"
+ __type__ = "hook"
+ __version__ = "0.22"
+
+ __config__ = [("activated", "bool", "Activated", True),
+ ("extern", "bool", "Allow external link adding", False)]
+
+ __description__ = """Gives abillity to use jd's click and load. depends on webinterface"""
+ __author_name__ = ("RaNaN", "mkaay")
+ __author_mail__ = ("RaNaN@pyload.de", "mkaay@mkaay.de")
+
+
+ def coreReady(self):
+ self.port = int(self.config['webinterface']['port'])
+ if self.config['webinterface']['activated']:
+ try:
+ if self.getConfig("extern"):
+ ip = "0.0.0.0"
+ else:
+ ip = "127.0.0.1"
+
+ thread.start_new_thread(proxy, (self, ip, self.port, 9666))
+ except:
+ self.logError("ClickAndLoad port already in use.")
+
+
+def proxy(self, *settings):
+ thread.start_new_thread(server, (self,) + settings)
+ lock = thread.allocate_lock()
+ lock.acquire()
+ lock.acquire()
+
+
+def server(self, *settings):
+ try:
+ dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ dock_socket.bind((settings[0], settings[2]))
+ dock_socket.listen(5)
+ while True:
+ client_socket = dock_socket.accept()[0]
+ server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server_socket.connect(("127.0.0.1", settings[1]))
+ thread.start_new_thread(forward, (client_socket, server_socket))
+ thread.start_new_thread(forward, (server_socket, client_socket))
+ except socket.error, e:
+ if hasattr(e, "errno"):
+ errno = e.errno
+ else:
+ errno = e.args[0]
+
+ if errno == 98:
+ self.logWarning(_("Click'N'Load: Port 9666 already in use"))
+ return
+ thread.start_new_thread(server, (self,) + settings)
+ except:
+ thread.start_new_thread(server, (self,) + settings)
+
+
+def forward(source, destination):
+ string = ' '
+ while string:
+ string = source.recv(1024)
+ if string:
+ destination.sendall(string)
+ else:
+ #source.shutdown(socket.SHUT_RD)
+ destination.shutdown(socket.SHUT_WR)
diff --git a/pyload/plugins/hooks/DeathByCaptcha.py b/pyload/plugins/hooks/DeathByCaptcha.py
new file mode 100644
index 000000000..57bf9031f
--- /dev/null
+++ b/pyload/plugins/hooks/DeathByCaptcha.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from base64 import b64encode
+from pycurl import FORM_FILE, HTTPHEADER
+from thread import start_new_thread
+from time import sleep
+
+from pyload.common.json_layer import json_loads
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getRequest
+from pyload.plugins.Hook import Hook
+
+
+class DeathByCaptchaException(Exception):
+ DBC_ERRORS = {'not-logged-in': 'Access denied, check your credentials',
+ 'invalid-credentials': 'Access denied, check your credentials',
+ 'banned': 'Access denied, account is suspended',
+ 'insufficient-funds': 'Insufficient account balance to decrypt CAPTCHA',
+ 'invalid-captcha': 'CAPTCHA is not a valid image',
+ 'service-overload': 'CAPTCHA was rejected due to service overload, try again later',
+ 'invalid-request': 'Invalid request',
+ 'timed-out': 'No CAPTCHA solution received in time'}
+
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def getDesc(self):
+ if self.err in self.DBC_ERRORS.keys():
+ return self.DBC_ERRORS[self.err]
+ else:
+ return self.err
+
+ def __str__(self):
+ return "<DeathByCaptchaException %s>" % self.err
+
+ def __repr__(self):
+ return "<DeathByCaptchaException %s>" % self.err
+
+
+class DeathByCaptcha(Hook):
+ __name__ = "DeathByCaptcha"
+ __type__ = "hook"
+ __version__ = "0.03"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("passkey", "password", "Password", ""),
+ ("force", "bool", "Force DBC even if client is connected", False)]
+
+ __description__ = """Send captchas to DeathByCaptcha.com"""
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ API_URL = "http://api.dbcapi.me/api/"
+
+
+ def setup(self):
+ self.info = {}
+
+ def call_api(self, api="captcha", post=False, multipart=False):
+ req = getRequest()
+ req.c.setopt(HTTPHEADER, ["Accept: application/json", "User-Agent: pyLoad %s" % self.core.version])
+
+ if post:
+ if not isinstance(post, dict):
+ post = {}
+ post.update({"username": self.getConfig("username"),
+ "password": self.getConfig("passkey")})
+
+ response = None
+ try:
+ json = req.load("%s%s" % (self.API_URL, api),
+ post=post,
+ multipart=multipart)
+ self.logDebug(json)
+ response = json_loads(json)
+
+ if "error" in response:
+ raise DeathByCaptchaException(response['error'])
+ elif "status" not in response:
+ raise DeathByCaptchaException(str(response))
+
+ except BadHeader, e:
+ if 403 == e.code:
+ raise DeathByCaptchaException('not-logged-in')
+ elif 413 == e.code:
+ raise DeathByCaptchaException('invalid-captcha')
+ elif 503 == e.code:
+ raise DeathByCaptchaException('service-overload')
+ elif e.code in (400, 405):
+ raise DeathByCaptchaException('invalid-request')
+ else:
+ raise
+
+ finally:
+ req.close()
+
+ return response
+
+ def getCredits(self):
+ response = self.call_api("user", True)
+
+ if 'is_banned' in response and response['is_banned']:
+ raise DeathByCaptchaException('banned')
+ elif 'balance' in response and 'rate' in response:
+ self.info.update(response)
+ else:
+ raise DeathByCaptchaException(response)
+
+ def getStatus(self):
+ response = self.call_api("status", False)
+
+ if 'is_service_overloaded' in response and response['is_service_overloaded']:
+ raise DeathByCaptchaException('service-overload')
+
+ def submit(self, captcha, captchaType="file", match=None):
+ #workaround multipart-post bug in HTTPRequest.py
+ if re.match("^[A-Za-z0-9]*$", self.getConfig("passkey")):
+ multipart = True
+ data = (FORM_FILE, captcha)
+ else:
+ multipart = False
+ with open(captcha, 'rb') as f:
+ data = f.read()
+ data = "base64:" + b64encode(data)
+
+ response = self.call_api("captcha", {"captchafile": data}, multipart)
+
+ if "captcha" not in response:
+ raise DeathByCaptchaException(response)
+ ticket = response['captcha']
+
+ for _ in xrange(24):
+ sleep(5)
+ response = self.call_api("captcha/%d" % ticket, False)
+ if response['text'] and response['is_correct']:
+ break
+ else:
+ raise DeathByCaptchaException('timed-out')
+
+ result = response['text']
+ self.logDebug("result %s : %s" % (ticket, result))
+
+ return ticket, result
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ try:
+ self.getStatus()
+ self.getCredits()
+ except DeathByCaptchaException, e:
+ self.logError(e.getDesc())
+ return False
+
+ balance, rate = self.info['balance'], self.info['rate']
+ self.logInfo("Account balance: US$%.3f (%d captchas left at %.2f cents each)" % (balance / 100,
+ balance // rate, rate))
+
+ if balance > rate:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(180)
+ start_new_thread(self.processCaptcha, (task,))
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ try:
+ response = self.call_api("captcha/%d/report" % task.data['ticket'], True)
+ except DeathByCaptchaException, e:
+ self.logError(e.getDesc())
+ except Exception, e:
+ self.logError(e)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except DeathByCaptchaException, e:
+ task.error = e.getCode()
+ self.logError(e.getDesc())
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hooks/DebridItaliaCom.py b/pyload/plugins/hooks/DebridItaliaCom.py
new file mode 100644
index 000000000..4272b758f
--- /dev/null
+++ b/pyload/plugins/hooks/DebridItaliaCom.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class DebridItaliaCom(MultiHoster):
+ __name__ = "DebridItaliaCom"
+ __type__ = "hook"
+ __version__ = "0.07"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Debriditalia.com hook plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def getHoster(self):
+ return ["netload.in", "hotfile.com", "rapidshare.com", "multiupload.com",
+ "uploading.com", "megashares.com", "crocko.com", "filepost.com",
+ "bitshare.com", "share-links.biz", "putlocker.com", "uploaded.to",
+ "speedload.org", "rapidgator.net", "likeupload.net", "cyberlocker.ch",
+ "depositfiles.com", "extabit.com", "filefactory.com", "sharefiles.co",
+ "ryushare.com", "tusfiles.net", "nowvideo.co", "cloudzer.net", "letitbit.net",
+ "easybytez.com", "uptobox.com", "ddlstorage.com"]
diff --git a/pyload/plugins/hooks/DeleteFinished.py b/pyload/plugins/hooks/DeleteFinished.py
new file mode 100644
index 000000000..99aa040bf
--- /dev/null
+++ b/pyload/plugins/hooks/DeleteFinished.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+from pyload.database import style
+from pyload.plugins.Hook import Hook
+
+
+class DeleteFinished(Hook):
+ __name__ = "DeleteFinished"
+ __type__ = "hook"
+ __version__ = "1.09"
+
+ __config__ = [('activated', 'bool', 'Activated', 'False'),
+ ('interval', 'int', 'Delete every (hours)', '72'),
+ ('deloffline', 'bool', 'Delete packages with offline links', 'False')]
+
+ __description__ = """Automatically delete all finished packages from queue"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+
+ ## overwritten methods ##
+ def periodical(self):
+ if not self.info['sleep']:
+ deloffline = self.getConfig('deloffline')
+ mode = '0,1,4' if deloffline else '0,4'
+ msg = 'delete all finished packages in queue list (%s packages with offline links)'
+ self.logInfo(msg % ('including' if deloffline else 'excluding'))
+ self.deleteFinished(mode)
+ self.info['sleep'] = True
+ self.addEvent('packageFinished', self.wakeup)
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == 'interval' and value != self.interval:
+ self.interval = value * 3600
+ self.initPeriodical()
+
+ def unload(self):
+ self.removeEvent('packageFinished', self.wakeup)
+
+ def coreReady(self):
+ self.info = {'sleep': True}
+ interval = self.getConfig('interval')
+ self.pluginConfigChanged('DeleteFinished', 'interval', interval)
+ self.addEvent('packageFinished', self.wakeup)
+
+ ## own methods ##
+ @style.queue
+ def deleteFinished(self, mode):
+ self.c.execute('DELETE FROM packages WHERE NOT EXISTS(SELECT 1 FROM links WHERE package=packages.id AND status NOT IN (%s))' % mode)
+ self.c.execute('DELETE FROM links WHERE NOT EXISTS(SELECT 1 FROM packages WHERE id=links.package)')
+
+ def wakeup(self, pypack):
+ self.removeEvent('packageFinished', self.wakeup)
+ self.info['sleep'] = False
+
+ ## event managing ##
+ def addEvent(self, event, func):
+ """Adds an event listener for event name"""
+ if event in self.m.events:
+ if func in self.m.events[event]:
+ self.logDebug('Function already registered %s' % func)
+ else:
+ self.m.events[event].append(func)
+ else:
+ self.m.events[event] = [func]
+
+ def setup(self):
+ self.m = self.manager
+ self.removeEvent = self.m.removeEvent
diff --git a/pyload/plugins/hooks/DownloadScheduler.py b/pyload/plugins/hooks/DownloadScheduler.py
new file mode 100644
index 000000000..fc2e10aac
--- /dev/null
+++ b/pyload/plugins/hooks/DownloadScheduler.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import localtime
+
+from pyload.plugins.Hook import Hook
+
+
+class DownloadScheduler(Hook):
+ __name__ = "DownloadScheduler"
+ __type__ = "hook"
+ __version__ = "0.21"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("timetable", "str", "List time periods as hh:mm full or number(kB/s)",
+ "0:00 full, 7:00 250, 10:00 0, 17:00 150"),
+ ("abort", "bool", "Abort active downloads when start period with speed 0", False)]
+
+ __description__ = """Download Scheduler"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+
+ def setup(self):
+ self.cb = None # callback to scheduler job; will be by removed hookmanager when hook unloaded
+
+ def coreReady(self):
+ self.updateSchedule()
+
+ def updateSchedule(self, schedule=None):
+ if schedule is None:
+ schedule = self.getConfig("timetable")
+
+ schedule = re.findall("(\d{1,2}):(\d{2})[\s]*(-?\d+)",
+ schedule.lower().replace("full", "-1").replace("none", "0"))
+ if not schedule:
+ self.logError("Invalid schedule")
+ return
+
+ t0 = localtime()
+ now = (t0.tm_hour, t0.tm_min, t0.tm_sec, "X")
+ schedule = sorted([(int(x[0]), int(x[1]), 0, int(x[2])) for x in schedule] + [now])
+
+ self.logDebug("Schedule", schedule)
+
+ for i, v in enumerate(schedule):
+ if v[3] == "X":
+ last, next = schedule[i - 1], schedule[(i + 1) % len(schedule)]
+ self.logDebug("Now/Last/Next", now, last, next)
+
+ self.setDownloadSpeed(last[3])
+
+ next_time = (((24 + next[0] - now[0]) * 60 + next[1] - now[1]) * 60 + next[2] - now[2]) % 86400
+ self.core.scheduler.removeJob(self.cb)
+ self.cb = self.core.scheduler.addJob(next_time, self.updateSchedule, threaded=False)
+
+ def setDownloadSpeed(self, speed):
+ if speed == 0:
+ abort = self.getConfig("abort")
+ self.logInfo("Stopping download server. (Running downloads will %sbe aborted.)" % ('' if abort else 'not '))
+ self.core.api.pauseServer()
+ if abort:
+ self.core.api.stopAllDownloads()
+ else:
+ self.core.api.unpauseServer()
+
+ if speed > 0:
+ self.logInfo("Setting download speed to %d kB/s" % speed)
+ self.core.api.setConfigValue("download", "limit_speed", 1)
+ self.core.api.setConfigValue("download", "max_speed", speed)
+ else:
+ self.logInfo("Setting download speed to FULL")
+ self.core.api.setConfigValue("download", "limit_speed", 0)
+ self.core.api.setConfigValue("download", "max_speed", -1)
diff --git a/pyload/plugins/hooks/EasybytezCom.py b/pyload/plugins/hooks/EasybytezCom.py
new file mode 100644
index 000000000..1ec8a98f1
--- /dev/null
+++ b/pyload/plugins/hooks/EasybytezCom.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class EasybytezCom(MultiHoster):
+ __name__ = "EasybytezCom"
+ __type__ = "hook"
+ __version__ = "0.03"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """EasyBytez.com hook plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def getHoster(self):
+ self.account = self.core.accountManager.getAccountPlugin(self.__name__)
+ user = self.account.selectAccount()[0]
+
+ try:
+ req = self.account.getAccountRequest(user)
+ page = req.load("http://www.easybytez.com")
+
+ m = re.search(r'</textarea>\s*Supported sites:(.*)', page)
+ return m.group(1).split(',')
+ except Exception, e:
+ self.logDebug(e)
+ self.logWarning("Unable to load supported hoster list, using last known")
+ return ["bitshare.com", "crocko.com", "ddlstorage.com", "depositfiles.com", "extabit.com", "hotfile.com",
+ "mediafire.com", "netload.in", "rapidgator.net", "rapidshare.com", "uploading.com", "uload.to",
+ "uploaded.to"]
diff --git a/pyload/plugins/hooks/Ev0InFetcher.py b/pyload/plugins/hooks/Ev0InFetcher.py
new file mode 100644
index 000000000..5c2022bac
--- /dev/null
+++ b/pyload/plugins/hooks/Ev0InFetcher.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, time
+
+from pyload.lib import feedparser
+
+from pyload.plugins.Hook import Hook
+
+
+class Ev0InFetcher(Hook):
+ __name__ = "Ev0InFetcher"
+ __type__ = "hook"
+ __version__ = "0.21"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("interval", "int", "Check interval in minutes", 10),
+ ("queue", "bool", "Move new shows directly to Queue", False),
+ ("shows", "str", "Shows to check for (comma seperated)", ""),
+ ("quality", "xvid;x264;rmvb", "Video Format", "xvid"),
+ ("hoster", "str", "Hoster to use (comma seperated)",
+ "NetloadIn,RapidshareCom,MegauploadCom,HotfileCom")]
+
+ __description__ = """Checks rss feeds for Ev0.in"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def setup(self):
+ self.interval = self.getConfig("interval") * 60
+
+ def filterLinks(self, links):
+ results = self.core.pluginManager.parseUrls(links)
+ sortedLinks = {}
+
+ for url, hoster in results:
+ if hoster not in sortedLinks:
+ sortedLinks[hoster] = []
+ sortedLinks[hoster].append(url)
+
+ for h in self.getConfig("hoster").split(","):
+ try:
+ return sortedLinks[h.strip()]
+ except:
+ continue
+ return []
+
+
+ def periodical(self):
+
+ def normalizefiletitle(filename):
+ filename = filename.replace('.', ' ')
+ filename = filename.replace('_', ' ')
+ filename = filename.lower()
+ return filename
+
+ shows = [s.strip() for s in self.getConfig("shows").split(",")]
+
+ feed = feedparser.parse("http://feeds.feedburner.com/ev0in/%s?format=xml" % self.getConfig("quality"))
+
+ showStorage = {}
+ for show in shows:
+ showStorage[show] = int(self.getStorage("show_%s_lastfound" % show, 0))
+
+ found = False
+ for item in feed['items']:
+ for show, lastfound in showStorage.iteritems():
+ if show.lower() in normalizefiletitle(item['title']) and lastfound < int(mktime(item.date_parsed)):
+ links = self.filterLinks(item['description'].split("<br />"))
+ packagename = item['title'].encode("utf-8")
+ self.logInfo("Ev0InFetcher: new episode '%s' (matched '%s')" % (packagename, show))
+ self.core.api.addPackage(packagename, links, 1 if self.getConfig("queue") else 0)
+ self.setStorage("show_%s_lastfound" % show, int(mktime(item.date_parsed)))
+ found = True
+ if not found:
+ #self.logDebug("Ev0InFetcher: no new episodes found")
+ pass
+
+ for show, lastfound in self.getStorage().iteritems():
+ if int(lastfound) > 0 and int(lastfound) + (3600 * 24 * 30) < int(time()):
+ self.delStorage("show_%s_lastfound" % show)
+ self.logDebug("Ev0InFetcher: cleaned '%s' record" % show)
diff --git a/pyload/plugins/hooks/ExpertDecoders.py b/pyload/plugins/hooks/ExpertDecoders.py
new file mode 100644
index 000000000..ef5409b76
--- /dev/null
+++ b/pyload/plugins/hooks/ExpertDecoders.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+from base64 import b64encode
+from pycurl import LOW_SPEED_TIME
+from thread import start_new_thread
+from uuid import uuid4
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.Hook import Hook
+
+
+class ExpertDecoders(Hook):
+ __name__ = "ExpertDecoders"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Access key", "")]
+
+ __description__ = """Send captchas to expertdecoders.com"""
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ API_URL = "http://www.fasttypers.org/imagepost.ashx"
+
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.API_URL, post={"key": self.getConfig("passkey"), "action": "balance"})
+
+ if response.isdigit():
+ self.logInfo(_("%s credits left") % response)
+ self.info['credits'] = credits = int(response)
+ return credits
+ else:
+ self.logError(response)
+ return 0
+
+ def processCaptcha(self, task):
+ task.data['ticket'] = ticket = uuid4()
+ result = None
+
+ with open(task.captchaFile, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+ #self.logDebug("%s: %s : %s" % (ticket, task.captchaFile, data))
+
+ req = getRequest()
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ result = req.load(self.API_URL, post={"action": "upload", "key": self.getConfig("passkey"),
+ "file": data, "gen_task_id": ticket})
+ finally:
+ req.close()
+
+ self.logDebug("result %s : %s" % (ticket, result))
+ task.setResult(result)
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo(_("Your ExpertDecoders Account has not enough credits"))
+
+ def captchaInvalid(self, task):
+ if "ticket" in task.data:
+
+ try:
+ response = getURL(self.API_URL, post={"action": "refund", "key": self.getConfig("passkey"),
+ "gen_task_id": task.data['ticket']})
+ self.logInfo("Request refund: %s" % response)
+
+ except BadHeader, e:
+ self.logError("Could not send refund request.", str(e))
diff --git a/pyload/plugins/hooks/ExternalScripts.py b/pyload/plugins/hooks/ExternalScripts.py
new file mode 100644
index 000000000..372035e82
--- /dev/null
+++ b/pyload/plugins/hooks/ExternalScripts.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+
+import subprocess
+
+from os import listdir, access, X_OK, makedirs
+from os.path import join, exists, basename, abspath
+
+from pyload.plugins.Hook import Hook
+from pyload.utils import safe_join
+
+
+class ExternalScripts(Hook):
+ __name__ = "ExternalScripts"
+ __type__ = "hook"
+ __version__ = "0.23"
+
+ __config__ = [("activated", "bool", "Activated", True)]
+
+ __description__ = """Run external scripts"""
+ __author_name__ = ("mkaay", "RaNaN", "spoob")
+ __author_mail__ = ("mkaay@mkaay.de", "ranan@pyload.org", "spoob@pyload.org")
+
+ event_list = ["unrarFinished", "allDownloadsFinished", "allDownloadsProcessed"]
+
+
+ def setup(self):
+ self.scripts = {}
+
+ folders = ["download_preparing", "download_finished", "package_finished",
+ "before_reconnect", "after_reconnect", "unrar_finished",
+ "all_dls_finished", "all_dls_processed"]
+
+ for folder in folders:
+ self.scripts[folder] = []
+
+ self.initPluginType(folder, join(pypath, 'scripts', folder))
+ self.initPluginType(folder, join('scripts', folder))
+
+ for script_type, names in self.scripts.iteritems():
+ if names:
+ self.logInfo((_("Installed scripts for %s: ") % script_type) + ", ".join([basename(x) for x in names]))
+
+ def initPluginType(self, folder, path):
+ if not exists(path):
+ try:
+ makedirs(path)
+ except:
+ self.logDebug("Script folder %s not created" % folder)
+ return
+
+ for f in listdir(path):
+ if f.startswith("#") or f.startswith(".") or f.startswith("_") or f.endswith("~") or f.endswith(".swp"):
+ continue
+
+ if not access(join(path, f), X_OK):
+ self.logWarning(_("Script not executable:") + " %s/%s" % (folder, f))
+
+ self.scripts[folder].append(join(path, f))
+
+ def callScript(self, script, *args):
+ try:
+ cmd = [script] + [str(x) if not isinstance(x, basestring) else x for x in args]
+ self.logDebug("Executing %(script)s: %(cmd)s" % {"script": abspath(script), "cmd": " ".join(cmd)})
+ #output goes to pyload
+ subprocess.Popen(cmd, bufsize=-1)
+ except Exception, e:
+ self.logError(_("Error in %(script)s: %(error)s") % {"script": basename(script), "error": str(e)})
+
+ def downloadPreparing(self, pyfile):
+ for script in self.scripts['download_preparing']:
+ self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.id)
+
+ def downloadFinished(self, pyfile):
+ for script in self.scripts['download_finished']:
+ self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.name,
+ safe_join(self.config['general']['download_folder'],
+ pyfile.package().folder, pyfile.name), pyfile.id)
+
+ def packageFinished(self, pypack):
+ for script in self.scripts['package_finished']:
+ folder = self.config['general']['download_folder']
+ folder = safe_join(folder, pypack.folder)
+
+ self.callScript(script, pypack.name, folder, pypack.password, pypack.id)
+
+ def beforeReconnecting(self, ip):
+ for script in self.scripts['before_reconnect']:
+ self.callScript(script, ip)
+
+ def afterReconnecting(self, ip):
+ for script in self.scripts['after_reconnect']:
+ self.callScript(script, ip)
+
+ def unrarFinished(self, folder, fname):
+ for script in self.scripts['unrar_finished']:
+ self.callScript(script, folder, fname)
+
+ def allDownloadsFinished(self):
+ for script in self.scripts['all_dls_finished']:
+ self.callScript(script)
+
+ def allDownloadsProcessed(self):
+ for script in self.scripts['all_dls_processed']:
+ self.callScript(script)
diff --git a/pyload/plugins/hooks/ExtractArchive.py b/pyload/plugins/hooks/ExtractArchive.py
new file mode 100644
index 000000000..1a2da53ad
--- /dev/null
+++ b/pyload/plugins/hooks/ExtractArchive.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+
+from copy import copy
+from os import remove, chmod, makedirs
+from os.path import exists, basename, isfile, isdir
+from traceback import print_exc
+
+# monkey patch bug in python 2.6 and lower
+# http://bugs.python.org/issue6122 , http://bugs.python.org/issue1236 , http://bugs.python.org/issue1731717
+if sys.version_info < (2, 7) and os.name != "nt":
+ import errno
+ from subprocess import Popen
+
+ def _eintr_retry_call(func, *args):
+ while True:
+ try:
+ return func(*args)
+ except OSError, e:
+ if e.errno == errno.EINTR:
+ continue
+ raise
+
+ # unsued timeout option for older python version
+ def wait(self, timeout=0):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ try:
+ pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
+ except OSError, e:
+ if e.errno != errno.ECHILD:
+ raise
+ # This happens if SIGCLD is set to be ignored or waiting
+ # for child processes has otherwise been disabled for our
+ # process. This child is dead, we can't get the status.
+ sts = 0
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+ Popen.wait = wait
+
+if os.name != "nt":
+ from grp import getgrnam
+ from os import chown
+ from pwd import getpwnam
+
+from pyload.plugins.Hook import Hook, threaded, Expose
+from pyload.plugins.internal.AbstractExtractor import ArchiveError, CRCError, WrongPassword
+from pyload.utils import safe_join, fs_encode
+
+
+class ExtractArchive(Hook):
+ """
+ Provides: unrarFinished (folder, filename)
+ """
+ __name__ = "ExtractArchive"
+ __type__ = "hook"
+ __version__ = "0.16"
+
+ __config__ = [("activated", "bool", "Activated", True),
+ ("fullpath", "bool", "Extract full path", True),
+ ("overwrite", "bool", "Overwrite files", True),
+ ("passwordfile", "file", "password file", "unrar_passwords.txt"),
+ ("deletearchive", "bool", "Delete archives when done", False),
+ ("subfolder", "bool", "Create subfolder for each package", False),
+ ("destination", "folder", "Extract files to", ""),
+ ("excludefiles", "str", "Exclude files from unpacking (seperated by ;)", ""),
+ ("recursive", "bool", "Extract archives in archvies", True),
+ ("queue", "bool", "Wait for all downloads to be finished", True),
+ ("renice", "int", "CPU Priority", 0)]
+
+ __description__ = """Extract different kind of archives"""
+ __author_name__ = ("pyLoad Team", "AndroKev")
+ __author_mail__ = ("admin@pyload.org", "@pyloadforum")
+
+ event_list = ["allDownloadsProcessed"]
+
+
+ def setup(self):
+ self.plugins = []
+ self.passwords = []
+ names = []
+
+ for p in ("UnRar", "UnZip"):
+ try:
+ module = self.core.pluginManager.loadModule("internal", p)
+ klass = getattr(module, p)
+ if klass.checkDeps():
+ names.append(p)
+ self.plugins.append(klass)
+
+ except OSError, e:
+ if e.errno == 2:
+ self.logInfo(_("No %s installed") % p)
+ else:
+ self.logWarning(_("Could not activate %s") % p, str(e))
+ if self.core.debug:
+ print_exc()
+
+ except Exception, e:
+ self.logWarning(_("Could not activate %s") % p, str(e))
+ if self.core.debug:
+ print_exc()
+
+ if names:
+ self.logInfo(_("Activated") + " " + " ".join(names))
+ else:
+ self.logInfo(_("No Extract plugins activated"))
+
+ # queue with package ids
+ self.queue = []
+
+ @Expose
+ def extractPackage(self, id):
+ """ Extract package with given id"""
+ self.manager.startThread(self.extract, [id])
+
+ def packageFinished(self, pypack):
+ if self.getConfig("queue"):
+ self.logInfo(_("Package %s queued for later extracting") % pypack.name)
+ self.queue.append(pypack.id)
+ else:
+ self.manager.startThread(self.extract, [pypack.id])
+
+ @threaded
+ def allDownloadsProcessed(self, thread):
+ local = copy(self.queue)
+ del self.queue[:]
+ self.extract(local, thread)
+
+ def extract(self, ids, thread=None):
+ # reload from txt file
+ self.reloadPasswords()
+
+ # dl folder
+ dl = self.config['general']['download_folder']
+
+ extracted = []
+
+ #iterate packages -> plugins -> targets
+ for pid in ids:
+ p = self.core.files.getPackage(pid)
+ self.logInfo(_("Check package %s") % p.name)
+ if not p:
+ continue
+
+ # determine output folder
+ out = safe_join(dl, p.folder, "")
+ # force trailing slash
+
+ if self.getConfig("destination") and self.getConfig("destination").lower() != "none":
+
+ out = safe_join(dl, p.folder, self.getConfig("destination"), "")
+ #relative to package folder if destination is relative, otherwise absolute path overwrites them
+
+ if self.getConfig("subfolder"):
+ out = safe_join(out, fs_encode(p.folder))
+
+ if not exists(out):
+ makedirs(out)
+
+ files_ids = [(safe_join(dl, p.folder, x['name']), x['id']) for x in p.getChildren().itervalues()]
+ matched = False
+
+ # check as long there are unseen files
+ while files_ids:
+ new_files_ids = []
+
+ for plugin in self.plugins:
+ targets = plugin.getTargets(files_ids)
+ if targets:
+ self.logDebug("Targets for %s: %s" % (plugin.__name__, targets))
+ matched = True
+ for target, fid in targets:
+ if target in extracted:
+ self.logDebug(basename(target), "skipped")
+ continue
+ extracted.append(target) # prevent extracting same file twice
+
+ klass = plugin(self, target, out, self.getConfig("fullpath"), self.getConfig("overwrite"), self.getConfig("excludefiles"),
+ self.getConfig("renice"))
+ klass.init()
+
+ self.logInfo(basename(target), _("Extract to %s") % out)
+ new_files = self.startExtracting(klass, fid, p.password.strip().splitlines(), thread)
+ self.logDebug("Extracted: %s" % new_files)
+ self.setPermissions(new_files)
+
+ for file in new_files:
+ if not exists(file):
+ self.logDebug("new file %s does not exists" % file)
+ continue
+ if self.getConfig("recursive") and isfile(file):
+ new_files_ids.append((file, fid)) # append as new target
+
+ files_ids = new_files_ids # also check extracted files
+
+ if not matched:
+ self.logInfo(_("No files found to extract"))
+
+ def startExtracting(self, plugin, fid, passwords, thread):
+ pyfile = self.core.files.getFile(fid)
+ if not pyfile:
+ return []
+
+ pyfile.setCustomStatus(_("extracting"))
+ thread.addActive(pyfile) # keep this file until everything is done
+
+ try:
+ progress = lambda x: pyfile.setProgress(x)
+ success = False
+
+ if not plugin.checkArchive():
+ plugin.extract(progress)
+ success = True
+ else:
+ self.logInfo(basename(plugin.file), _("Password protected"))
+ self.logDebug("Passwords: %s" % str(passwords))
+
+ pwlist = copy(self.getPasswords())
+ #remove already supplied pws from list (only local)
+ for pw in passwords:
+ if pw in pwlist:
+ pwlist.remove(pw)
+
+ for pw in passwords + pwlist:
+ try:
+ self.logDebug("Try password: %s" % pw)
+ if plugin.checkPassword(pw):
+ plugin.extract(progress, pw)
+ self.addPassword(pw)
+ success = True
+ break
+ except WrongPassword:
+ self.logDebug("Password was wrong")
+
+ if not success:
+ self.logError(basename(plugin.file), _("Wrong password"))
+ return []
+
+ if self.core.debug:
+ self.logDebug("Would delete: %s" % ", ".join(plugin.getDeleteFiles()))
+
+ if self.getConfig("deletearchive"):
+ files = plugin.getDeleteFiles()
+ self.logInfo(_("Deleting %s files") % len(files))
+ for f in files:
+ if exists(f):
+ remove(f)
+ else:
+ self.logDebug("%s does not exists" % f)
+
+ self.logInfo(basename(plugin.file), _("Extracting finished"))
+ self.manager.dispatchEvent("unrarFinished", plugin.out, plugin.file)
+
+ return plugin.getExtractedFiles()
+
+ except ArchiveError, e:
+ self.logError(basename(plugin.file), _("Archive Error"), str(e))
+ except CRCError:
+ self.logError(basename(plugin.file), _("CRC Mismatch"))
+ except Exception, e:
+ if self.core.debug:
+ print_exc()
+ self.logError(basename(plugin.file), _("Unknown Error"), str(e))
+
+ return []
+
+ @Expose
+ def getPasswords(self):
+ """ List of saved passwords """
+ return self.passwords
+
+ def reloadPasswords(self):
+ pwfile = self.getConfig("passwordfile")
+ if not exists(pwfile):
+ open(pwfile, "wb").close()
+
+ passwords = []
+ f = open(pwfile, "rb")
+ for pw in f.read().splitlines():
+ passwords.append(pw)
+ f.close()
+
+ self.passwords = passwords
+
+ @Expose
+ def addPassword(self, pw):
+ """ Adds a password to saved list"""
+ pwfile = self.getConfig("passwordfile")
+
+ if pw in self.passwords:
+ self.passwords.remove(pw)
+ self.passwords.insert(0, pw)
+
+ f = open(pwfile, "wb")
+ for pw in self.passwords:
+ f.write(pw + "\n")
+ f.close()
+
+ def setPermissions(self, files):
+ for f in files:
+ if not exists(f):
+ continue
+ try:
+ if self.config['permission']['change_file']:
+ if isfile(f):
+ chmod(f, int(self.config['permission']['file'], 8))
+ elif isdir(f):
+ chmod(f, int(self.config['permission']['folder'], 8))
+
+ if self.config['permission']['change_dl'] and os.name != "nt":
+ uid = getpwnam(self.config['permission']['user'])[2]
+ gid = getgrnam(self.config['permission']['group'])[2]
+ chown(f, uid, gid)
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed"), e)
diff --git a/pyload/plugins/hooks/FastixRu.py b/pyload/plugins/hooks/FastixRu.py
new file mode 100644
index 000000000..966bc6bd3
--- /dev/null
+++ b/pyload/plugins/hooks/FastixRu.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class FastixRu(MultiHoster):
+ __name__ = "FastixRu"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Fastix.ru hook plugin"""
+ __author_name__ = "Massimo Rosamilia"
+ __author_mail__ = "max@spiritix.eu"
+
+
+ def getHoster(self):
+ page = getURL(
+ "http://fastix.ru/api_v2/?apikey=5182964c3f8f9a7f0b00000a_kelmFB4n1IrnCDYuIFn2y&sub=allowed_sources")
+ host_list = json_loads(page)
+ host_list = host_list['allow']
+ return host_list
diff --git a/pyload/plugins/hooks/FreeWayMe.py b/pyload/plugins/hooks/FreeWayMe.py
new file mode 100644
index 000000000..35b275067
--- /dev/null
+++ b/pyload/plugins/hooks/FreeWayMe.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class FreeWayMe(MultiHoster):
+ __name__ = "FreeWayMe"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """FreeWay.me hook plugin"""
+ __author_name__ = "Nicolas Giese"
+ __author_mail__ = "james@free-way.me"
+
+
+ def getHoster(self):
+ hostis = getURL("https://www.free-way.me/ajax/jd.php", get={"id": 3}).replace("\"", "").strip()
+ self.logDebug("hosters: %s" % hostis)
+ return [x.strip() for x in hostis.split(",") if x.strip()]
diff --git a/pyload/plugins/hooks/HotFolder.py b/pyload/plugins/hooks/HotFolder.py
new file mode 100644
index 000000000..f76e95af4
--- /dev/null
+++ b/pyload/plugins/hooks/HotFolder.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+import time
+
+from os import listdir, makedirs
+from os.path import exists, isfile, join
+from shutil import move
+
+from pyload.plugins.Hook import Hook
+
+
+class HotFolder(Hook):
+ __name__ = "HotFolder"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("folder", "str", "Folder to observe", "container"),
+ ("watch_file", "bool", "Observe link file", False),
+ ("keep", "bool", "Keep added containers", True),
+ ("file", "str", "Link file", "links.txt")]
+
+ __description__ = """Observe folder and file for changes and add container and links"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.de"
+
+
+ def setup(self):
+ self.interval = 10
+
+ def periodical(self):
+ if not exists(join(self.getConfig("folder"), "finished")):
+ makedirs(join(self.getConfig("folder"), "finished"))
+
+ if self.getConfig("watch_file"):
+
+ if not exists(self.getConfig("file")):
+ f = open(self.getConfig("file"), "wb")
+ f.close()
+
+ f = open(self.getConfig("file"), "rb")
+ content = f.read().strip()
+ f.close()
+ f = open(self.getConfig("file"), "wb")
+ f.close()
+ if content:
+ name = "%s_%s.txt" % (self.getConfig("file"), time.strftime("%H-%M-%S_%d%b%Y"))
+
+ f = open(join(self.getConfig("folder"), "finished", name), "wb")
+ f.write(content)
+ f.close()
+
+ self.core.api.addPackage(f.name, [f.name], 1)
+
+ for f in listdir(self.getConfig("folder")):
+ path = join(self.getConfig("folder"), f)
+
+ if not isfile(path) or f.endswith("~") or f.startswith("#") or f.startswith("."):
+ continue
+
+ newpath = join(self.getConfig("folder"), "finished", f if self.getConfig("keep") else "tmp_" + f)
+ move(path, newpath)
+
+ self.logInfo(_("Added %s from HotFolder") % f)
+ self.core.api.addPackage(f, [newpath], 1)
diff --git a/pyload/plugins/hooks/IRCInterface.py b/pyload/plugins/hooks/IRCInterface.py
new file mode 100644
index 000000000..af8d8fa69
--- /dev/null
+++ b/pyload/plugins/hooks/IRCInterface.py
@@ -0,0 +1,404 @@
+# -*- coding: utf-8 -*-
+
+import re
+import socket
+import time
+
+from pycurl import FORM_FILE
+from select import select
+from threading import Thread
+from time import sleep
+from traceback import print_exc
+
+from pyload.Api import PackageDoesNotExists, FileDoesNotExists
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hook import Hook
+from pyload.utils import formatSize
+
+
+class IRCInterface(Thread, Hook):
+ __name__ = "IRCInterface"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("host", "str", "IRC-Server Address", "Enter your server here!"),
+ ("port", "int", "IRC-Server Port", 6667),
+ ("ident", "str", "Clients ident", "pyload-irc"),
+ ("realname", "str", "Realname", "pyload-irc"),
+ ("nick", "str", "Nickname the Client will take", "pyLoad-IRC"),
+ ("owner", "str", "Nickname the Client will accept commands from", "Enter your nick here!"),
+ ("info_file", "bool", "Inform about every file finished", False),
+ ("info_pack", "bool", "Inform about every package finished", True),
+ ("captcha", "bool", "Send captcha requests", True)]
+
+ __description__ = """Connect to irc and let owner perform different tasks"""
+ __author_name__ = "Jeix"
+ __author_mail__ = "Jeix@hasnomail.com"
+
+
+ def __init__(self, core, manager):
+ Thread.__init__(self)
+ Hook.__init__(self, core, manager)
+ self.setDaemon(True)
+ # self.sm = core.server_methods
+ self.api = core.api # todo, only use api
+
+ def coreReady(self):
+ self.abort = False
+ self.more = []
+ self.new_package = {}
+
+ self.start()
+
+ def packageFinished(self, pypack):
+ try:
+ if self.getConfig("info_pack"):
+ self.response(_("Package finished: %s") % pypack.name)
+ except:
+ pass
+
+ def downloadFinished(self, pyfile):
+ try:
+ if self.getConfig("info_file"):
+ self.response(
+ _("Download finished: %(name)s @ %(plugin)s ") % {"name": pyfile.name, "plugin": pyfile.pluginname})
+ except:
+ pass
+
+ def newCaptchaTask(self, task):
+ if self.getConfig("captcha") and task.isTextual():
+ task.handler.append(self)
+ task.setWaiting(60)
+
+ page = getURL("http://www.freeimagehosting.net/upload.php",
+ post={"attached": (FORM_FILE, task.captchaFile)}, multipart=True)
+
+ url = re.search(r"\[img\]([^\[]+)\[/img\]\[/url\]", page).group(1)
+ self.response(_("New Captcha Request: %s") % url)
+ self.response(_("Answer with 'c %s text on the captcha'") % task.id)
+
+ def run(self):
+ # connect to IRC etc.
+ self.sock = socket.socket()
+ host = self.getConfig("host")
+ self.sock.connect((host, self.getConfig("port")))
+ nick = self.getConfig("nick")
+ self.sock.send("NICK %s\r\n" % nick)
+ self.sock.send("USER %s %s bla :%s\r\n" % (nick, host, nick))
+ for t in self.getConfig("owner").split():
+ if t.strip().startswith("#"):
+ self.sock.send("JOIN %s\r\n" % t.strip())
+ self.logInfo("pyLoad IRC: Connected to %s!" % host)
+ self.logInfo("pyLoad IRC: Switching to listening mode!")
+ try:
+ self.main_loop()
+
+ except IRCError, ex:
+ self.sock.send("QUIT :byebye\r\n")
+ print_exc()
+ self.sock.close()
+
+ def main_loop(self):
+ readbuffer = ""
+ while True:
+ sleep(1)
+ fdset = select([self.sock], [], [], 0)
+ if self.sock not in fdset[0]:
+ continue
+
+ if self.abort:
+ raise IRCError("quit")
+
+ readbuffer += self.sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ line = line.rstrip()
+ first = line.split()
+
+ if first[0] == "PING":
+ self.sock.send("PONG %s\r\n" % first[1])
+
+ if first[0] == "ERROR":
+ raise IRCError(line)
+
+ msg = line.split(None, 3)
+ if len(msg) < 4:
+ continue
+
+ msg = {
+ "origin": msg[0][1:],
+ "action": msg[1],
+ "target": msg[2],
+ "text": msg[3][1:]
+ }
+
+ self.handle_events(msg)
+
+ def handle_events(self, msg):
+ if not msg['origin'].split("!", 1)[0] in self.getConfig("owner").split():
+ return
+
+ if msg['target'].split("!", 1)[0] != self.getConfig("nick"):
+ return
+
+ if msg['action'] != "PRIVMSG":
+ return
+
+ # HANDLE CTCP ANTI FLOOD/BOT PROTECTION
+ if msg['text'] == "\x01VERSION\x01":
+ self.logDebug("Sending CTCP VERSION.")
+ self.sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
+ return
+ elif msg['text'] == "\x01TIME\x01":
+ self.logDebug("Sending CTCP TIME.")
+ self.sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
+ return
+ elif msg['text'] == "\x01LAG\x01":
+ self.logDebug("Received CTCP LAG.") # don't know how to answer
+ return
+
+ trigger = "pass"
+ args = None
+
+ try:
+ temp = msg['text'].split()
+ trigger = temp[0]
+ if len(temp) > 1:
+ args = temp[1:]
+ except:
+ pass
+
+ handler = getattr(self, "event_%s" % trigger, self.event_pass)
+ try:
+ res = handler(args)
+ for line in res:
+ self.response(line, msg['origin'])
+ except Exception, e:
+ self.logError("pyLoad IRC: " + repr(e))
+
+ def response(self, msg, origin=""):
+ if origin == "":
+ for t in self.getConfig("owner").split():
+ self.sock.send("PRIVMSG %s :%s\r\n" % (t.strip(), msg))
+ else:
+ self.sock.send("PRIVMSG %s :%s\r\n" % (origin.split("!", 1)[0], msg))
+
+ #### Events
+
+ def event_pass(self, args):
+ return []
+
+ def event_status(self, args):
+ downloads = self.api.statusDownloads()
+ if not downloads:
+ return ["INFO: There are no active downloads currently."]
+
+ temp_progress = ""
+ lines = ["ID - Name - Status - Speed - ETA - Progress"]
+ for data in downloads:
+
+ if data.status == 5:
+ temp_progress = data.format_wait
+ else:
+ temp_progress = "%d%% (%s)" % (data.percent, data.format_size)
+
+ lines.append("#%d - %s - %s - %s - %s - %s" %
+ (
+ data.fid,
+ data.name,
+ data.statusmsg,
+ "%s/s" % formatSize(data.speed),
+ "%s" % data.format_eta,
+ temp_progress
+ ))
+ return lines
+
+ def event_queue(self, args):
+ ps = self.api.getQueueData()
+
+ if not ps:
+ return ["INFO: There are no packages in queue."]
+
+ lines = []
+ for pack in ps:
+ lines.append('PACKAGE #%s: "%s" with %d links.' % (pack.pid, pack.name, len(pack.links)))
+
+ return lines
+
+ def event_collector(self, args):
+ ps = self.api.getCollectorData()
+ if not ps:
+ return ["INFO: No packages in collector!"]
+
+ lines = []
+ for pack in ps:
+ lines.append('PACKAGE #%s: "%s" with %d links.' % (pack.pid, pack.name, len(pack.links)))
+
+ return lines
+
+ def event_info(self, args):
+ if not args:
+ return ["ERROR: Use info like this: info <id>"]
+
+ info = None
+ try:
+ info = self.api.getFileData(int(args[0]))
+
+ except FileDoesNotExists:
+ return ["ERROR: Link doesn't exists."]
+
+ return ['LINK #%s: %s (%s) [%s][%s]' % (info.fid, info.name, info.format_size, info.statusmsg, info.plugin)]
+
+ def event_packinfo(self, args):
+ if not args:
+ return ["ERROR: Use packinfo like this: packinfo <id>"]
+
+ lines = []
+ pack = None
+ try:
+ pack = self.api.getPackageData(int(args[0]))
+
+ except PackageDoesNotExists:
+ return ["ERROR: Package doesn't exists."]
+
+ id = args[0]
+
+ self.more = []
+
+ lines.append('PACKAGE #%s: "%s" with %d links' % (id, pack.name, len(pack.links)))
+ for pyfile in pack.links:
+ self.more.append('LINK #%s: %s (%s) [%s][%s]' % (pyfile.fid, pyfile.name, pyfile.format_size,
+ pyfile.statusmsg, pyfile.plugin))
+
+ if len(self.more) < 6:
+ lines.extend(self.more)
+ self.more = []
+ else:
+ lines.extend(self.more[:6])
+ self.more = self.more[6:]
+ lines.append("%d more links do display." % len(self.more))
+
+ return lines
+
+ def event_more(self, args):
+ if not self.more:
+ return ["No more information to display."]
+
+ lines = self.more[:6]
+ self.more = self.more[6:]
+ lines.append("%d more links do display." % len(self.more))
+
+ return lines
+
+ def event_start(self, args):
+ self.api.unpauseServer()
+ return ["INFO: Starting downloads."]
+
+ def event_stop(self, args):
+ self.api.pauseServer()
+ return ["INFO: No new downloads will be started."]
+
+ def event_add(self, args):
+ if len(args) < 2:
+ return ['ERROR: Add links like this: "add <packagename|id> links". ',
+ "This will add the link <link> to to the package <package> / the package with id <id>!"]
+
+ pack = args[0].strip()
+ links = [x.strip() for x in args[1:]]
+
+ count_added = 0
+ count_failed = 0
+ try:
+ id = int(pack)
+ pack = self.api.getPackageData(id)
+ if not pack:
+ return ["ERROR: Package doesn't exists."]
+
+ #TODO add links
+
+ return ["INFO: Added %d links to Package %s [#%d]" % (len(links), pack['name'], id)]
+
+ except:
+ # create new package
+ id = self.api.addPackage(pack, links, 1)
+ return ["INFO: Created new Package %s [#%d] with %d links." % (pack, id, len(links))]
+
+ def event_del(self, args):
+ if len(args) < 2:
+ return ["ERROR: Use del command like this: del -p|-l <id> [...] (-p indicates that the ids are from packages, -l indicates that the ids are from links)"]
+
+ if args[0] == "-p":
+ ret = self.api.deletePackages(map(int, args[1:]))
+ return ["INFO: Deleted %d packages!" % len(args[1:])]
+
+ elif args[0] == "-l":
+ ret = self.api.delLinks(map(int, args[1:]))
+ return ["INFO: Deleted %d links!" % len(args[1:])]
+
+ else:
+ return ["ERROR: Use del command like this: del <-p|-l> <id> [...] (-p indicates that the ids are from packages, -l indicates that the ids are from links)"]
+
+ def event_push(self, args):
+ if not args:
+ return ["ERROR: Push package to queue like this: push <package id>"]
+
+ id = int(args[0])
+ try:
+ info = self.api.getPackageInfo(id)
+ except PackageDoesNotExists:
+ return ["ERROR: Package #%d does not exist." % id]
+
+ self.api.pushToQueue(id)
+ return ["INFO: Pushed package #%d to queue." % id]
+
+ def event_pull(self, args):
+ if not args:
+ return ["ERROR: Pull package from queue like this: pull <package id>."]
+
+ id = int(args[0])
+ if not self.api.getPackageData(id):
+ return ["ERROR: Package #%d does not exist." % id]
+
+ self.api.pullFromQueue(id)
+ return ["INFO: Pulled package #%d from queue to collector." % id]
+
+ def event_c(self, args):
+ """ captcha answer """
+ if not args:
+ return ["ERROR: Captcha ID missing."]
+
+ task = self.core.captchaManager.getTaskByID(args[0])
+ if not task:
+ return ["ERROR: Captcha Task with ID %s does not exists." % args[0]]
+
+ task.setResult(" ".join(args[1:]))
+ return ["INFO: Result %s saved." % " ".join(args[1:])]
+
+ def event_help(self, args):
+ lines = ["The following commands are available:",
+ "add <package|packid> <links> [...] Adds link to package. (creates new package if it does not exist)",
+ "queue Shows all packages in the queue",
+ "collector Shows all packages in collector",
+ "del -p|-l <id> [...] Deletes all packages|links with the ids specified",
+ "info <id> Shows info of the link with id <id>",
+ "packinfo <id> Shows info of the package with id <id>",
+ "more Shows more info when the result was truncated",
+ "start Starts all downloads",
+ "stop Stops the download (but not abort active downloads)",
+ "push <id> Push package to queue",
+ "pull <id> Pull package from queue",
+ "status Show general download status",
+ "help Shows this help message"]
+ return lines
+
+
+class IRCError(Exception):
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return repr(self.value)
diff --git a/pyload/plugins/hooks/ImageTyperz.py b/pyload/plugins/hooks/ImageTyperz.py
new file mode 100644
index 000000000..2591a1c78
--- /dev/null
+++ b/pyload/plugins/hooks/ImageTyperz.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from base64 import b64encode
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+from thread import start_new_thread
+
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.Hook import Hook
+
+
+class ImageTyperzException(Exception):
+
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<ImageTyperzException %s>" % self.err
+
+ def __repr__(self):
+ return "<ImageTyperzException %s>" % self.err
+
+
+class ImageTyperz(Hook):
+ __name__ = "ImageTyperz"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("passkey", "password", "Password", ""),
+ ("force", "bool", "Force IT even if client is connected", False)]
+
+ __description__ = """Send captchas to ImageTyperz.com"""
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ SUBMIT_URL = "http://captchatypers.com/Forms/UploadFileAndGetTextNEW.ashx"
+ RESPOND_URL = "http://captchatypers.com/Forms/SetBadImage.ashx"
+ GETCREDITS_URL = "http://captchatypers.com/Forms/RequestBalance.ashx"
+
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.GETCREDITS_URL, post={"action": "REQUESTBALANCE", "username": self.getConfig("username"),
+ "password": self.getConfig("passkey")})
+
+ if response.startswith('ERROR'):
+ raise ImageTyperzException(response)
+
+ try:
+ balance = float(response)
+ except:
+ raise ImageTyperzException("invalid response")
+
+ self.logInfo("Account balance: $%s left" % response)
+ return balance
+
+ def submit(self, captcha, captchaType="file", match=None):
+ req = getRequest()
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ #workaround multipart-post bug in HTTPRequest.py
+ if re.match("^[A-Za-z0-9]*$", self.getConfig("passkey")):
+ multipart = True
+ data = (FORM_FILE, captcha)
+ else:
+ multipart = False
+ with open(captcha, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+
+ response = req.load(self.SUBMIT_URL, post={"action": "UPLOADCAPTCHA",
+ "username": self.getConfig("username"),
+ "password": self.getConfig("passkey"), "file": data},
+ multipart=multipart)
+ finally:
+ req.close()
+
+ if response.startswith("ERROR"):
+ raise ImageTyperzException(response)
+ else:
+ data = response.split('|')
+ if len(data) == 2:
+ ticket, result = data
+ else:
+ raise ImageTyperzException("Unknown response %s" % response)
+
+ return ticket, result
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo("Your %s account has not enough credits" % self.__name__)
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ response = getURL(self.RESPOND_URL, post={"action": "SETBADIMAGE", "username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "imageid": task.data['ticket']})
+
+ if response == "SUCCESS":
+ self.logInfo("Bad captcha solution received, requested refund")
+ else:
+ self.logError("Bad captcha solution received, refund request failed", response)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except ImageTyperzException, e:
+ task.error = e.getCode()
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hooks/LinkdecrypterCom.py b/pyload/plugins/hooks/LinkdecrypterCom.py
new file mode 100644
index 000000000..34517761a
--- /dev/null
+++ b/pyload/plugins/hooks/LinkdecrypterCom.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hook import Hook
+from pyload.utils import remove_chars
+
+
+class LinkdecrypterCom(Hook):
+ __name__ = "LinkdecrypterCom"
+ __type__ = "hook"
+ __version__ = "0.19"
+
+ __config__ = [("activated", "bool", "Activated", False)]
+
+ __description__ = """Linkdecrypter.com hook plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def coreReady(self):
+ try:
+ self.loadPatterns()
+ except Exception, e:
+ self.logError(e)
+
+ def loadPatterns(self):
+ page = getURL("http://linkdecrypter.com/")
+ m = re.search(r'<b>Supported\(\d+\)</b>: <i>([^+<]*)', page)
+ if m is None:
+ self.logError(_("Crypter list not found"))
+ return
+
+ builtin = [name.lower() for name in self.core.pluginManager.crypterPlugins.keys()]
+ builtin.append("downloadserienjunkiesorg")
+
+ crypter_pattern = re.compile("(\w[\w.-]+)")
+ online = []
+ for crypter in m.group(1).split(', '):
+ m = re.match(crypter_pattern, crypter)
+ if m and remove_chars(m.group(1), "-.") not in builtin:
+ online.append(m.group(1).replace(".", "\\."))
+
+ if not online:
+ self.logError(_("Crypter list is empty"))
+ return
+
+ regexp = r"https?://([^.]+\.)*?(%s)/.*" % "|".join(online)
+
+ dict = self.core.pluginManager.crypterPlugins[self.__name__]
+ dict['pattern'] = regexp
+ dict['re'] = re.compile(regexp)
+
+ self.logDebug("REGEXP: " + regexp)
diff --git a/pyload/plugins/hooks/LinksnappyCom.py b/pyload/plugins/hooks/LinksnappyCom.py
new file mode 100644
index 000000000..f662ae4e9
--- /dev/null
+++ b/pyload/plugins/hooks/LinksnappyCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class LinksnappyCom(MultiHoster):
+ __name__ = "LinksnappyCom"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Linksnappy.com hook plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def getHoster(self):
+ json_data = getURL('http://gen.linksnappy.com/lseAPI.php?act=FILEHOSTS')
+ json_data = json_loads(json_data)
+
+ return json_data['return'].keys()
diff --git a/pyload/plugins/hooks/MegaDebridEu.py b/pyload/plugins/hooks/MegaDebridEu.py
new file mode 100644
index 000000000..da151f9aa
--- /dev/null
+++ b/pyload/plugins/hooks/MegaDebridEu.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class MegaDebridEu(MultiHoster):
+ __name__ = "MegaDebridEu"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False)]
+
+ __description__ = """mega-debrid.eu hook plugin"""
+ __author_name__ = "D.Ducatel"
+ __author_mail__ = "dducatel@je-geek.fr"
+
+
+ def getHoster(self):
+ reponse = getURL('http://www.mega-debrid.eu/api.php?action=getHosters')
+ json_data = json_loads(reponse)
+
+ if json_data['response_code'] == "ok":
+ host_list = [element[0] for element in json_data['hosters']]
+ else:
+ self.logError("Unable to retrieve hoster list")
+ host_list = list()
+
+ return host_list
diff --git a/pyload/plugins/hooks/MergeFiles.py b/pyload/plugins/hooks/MergeFiles.py
new file mode 100644
index 000000000..5761a5990
--- /dev/null
+++ b/pyload/plugins/hooks/MergeFiles.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+import traceback
+
+from pyload.plugins.Hook import Hook, threaded
+from pyload.utils import safe_join, fs_encode
+
+
+class MergeFiles(Hook):
+ __name__ = "MergeFiles"
+ __type__ = "hook"
+ __version__ = "0.12"
+
+ __config__ = [("activated", "bool", "Activated", False)]
+
+ __description__ = """Merges parts splitted with hjsplit"""
+ __author_name__ = "and9000"
+ __author_mail__ = "me@has-no-mail.com"
+
+ BUFFER_SIZE = 4096
+
+
+ def setup(self):
+ # nothing to do
+ pass
+
+ @threaded
+ def packageFinished(self, pack):
+ files = {}
+ fid_dict = {}
+ for fid, data in pack.getChildren().iteritems():
+ if re.search("\.[0-9]{3}$", data['name']):
+ if data['name'][:-4] not in files:
+ files[data['name'][:-4]] = []
+ files[data['name'][:-4]].append(data['name'])
+ files[data['name'][:-4]].sort()
+ fid_dict[data['name']] = fid
+
+ download_folder = self.config['general']['download_folder']
+
+ if self.config['general']['folder_per_package']:
+ download_folder = safe_join(download_folder, pack.folder)
+
+ for name, file_list in files.iteritems():
+ self.logInfo("Starting merging of %s" % name)
+ final_file = open(safe_join(download_folder, name), "wb")
+
+ for splitted_file in file_list:
+ self.logDebug("Merging part %s" % splitted_file)
+ pyfile = self.core.files.getFile(fid_dict[splitted_file])
+ pyfile.setStatus("processing")
+ try:
+ s_file = open(os.path.join(download_folder, splitted_file), "rb")
+ size_written = 0
+ s_file_size = int(os.path.getsize(os.path.join(download_folder, splitted_file)))
+ while True:
+ f_buffer = s_file.read(self.BUFFER_SIZE)
+ if f_buffer:
+ final_file.write(f_buffer)
+ size_written += self.BUFFER_SIZE
+ pyfile.setProgress((size_written * 100) / s_file_size)
+ else:
+ break
+ s_file.close()
+ self.logDebug("Finished merging part %s" % splitted_file)
+ except Exception, e:
+ print traceback.print_exc()
+ finally:
+ pyfile.setProgress(100)
+ pyfile.setStatus("finished")
+ pyfile.release()
+
+ final_file.close()
+ self.logInfo("Finished merging of %s" % name)
diff --git a/pyload/plugins/hooks/MultiDebridCom.py b/pyload/plugins/hooks/MultiDebridCom.py
new file mode 100644
index 000000000..7d9b6526a
--- /dev/null
+++ b/pyload/plugins/hooks/MultiDebridCom.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class MultiDebridCom(MultiHoster):
+ __name__ = "MultiDebridCom"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Multi-debrid.com hook plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def getHoster(self):
+ json_data = getURL('http://multi-debrid.com/api.php?hosts', decode=True)
+ self.logDebug('JSON data: ' + json_data)
+ json_data = json_loads(json_data)
+
+ return json_data['hosts']
diff --git a/pyload/plugins/hooks/MultiHome.py b/pyload/plugins/hooks/MultiHome.py
new file mode 100644
index 000000000..61fbdd230
--- /dev/null
+++ b/pyload/plugins/hooks/MultiHome.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.plugins.Hook import Hook
+
+
+class MultiHome(Hook):
+ __name__ = "MultiHome"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("interfaces", "str", "Interfaces", "None")]
+
+ __description__ = """Ip address changer"""
+ __author_name__ = "mkaay"
+ __author_mail__ = "mkaay@mkaay.de"
+
+
+ def setup(self):
+ self.register = {}
+ self.interfaces = []
+ self.parseInterfaces(self.getConfig("interfaces").split(";"))
+ if not self.interfaces:
+ self.parseInterfaces([self.config['download']['interface']])
+ self.setConfig("interfaces", self.toConfig())
+
+ def toConfig(self):
+ return ";".join([i.adress for i in self.interfaces])
+
+ def parseInterfaces(self, interfaces):
+ for interface in interfaces:
+ if not interface or str(interface).lower() == "none":
+ continue
+ self.interfaces.append(Interface(interface))
+
+ def coreReady(self):
+ requestFactory = self.core.requestFactory
+ oldGetRequest = requestFactory.getRequest
+
+ def getRequest(pluginName, account=None):
+ iface = self.bestInterface(pluginName, account)
+ if iface:
+ iface.useFor(pluginName, account)
+ requestFactory.iface = lambda: iface.adress
+ self.logDebug("Multihome: using address: " + iface.adress)
+ return oldGetRequest(pluginName, account)
+
+ requestFactory.getRequest = getRequest
+
+ def bestInterface(self, pluginName, account):
+ best = None
+ for interface in self.interfaces:
+ if not best or interface.lastPluginAccess(pluginName, account) < best.lastPluginAccess(pluginName, account):
+ best = interface
+ return best
+
+
+class Interface(object):
+
+ def __init__(self, adress):
+ self.adress = adress
+ self.history = {}
+
+ def lastPluginAccess(self, pluginName, account):
+ if (pluginName, account) in self.history:
+ return self.history[(pluginName, account)]
+ return 0
+
+ def useFor(self, pluginName, account):
+ self.history[(pluginName, account)] = time()
+
+ def __repr__(self):
+ return "<Interface - %s>" % self.adress
diff --git a/pyload/plugins/hooks/MultishareCz.py b/pyload/plugins/hooks/MultishareCz.py
new file mode 100644
index 000000000..9e1bd50a4
--- /dev/null
+++ b/pyload/plugins/hooks/MultishareCz.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class MultishareCz(MultiHoster):
+ __name__ = "MultishareCz"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "uloz.to")]
+
+ __description__ = """MultiShare.cz hook plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_PATTERN = r'<img class="logo-shareserveru"[^>]*?alt="([^"]+)"></td>\s*<td class="stav">[^>]*?alt="OK"'
+
+
+ def getHoster(self):
+ page = getURL("http://www.multishare.cz/monitoring/")
+ return re.findall(self.HOSTER_PATTERN, page)
diff --git a/pyload/plugins/hooks/OverLoadMe.py b/pyload/plugins/hooks/OverLoadMe.py
new file mode 100644
index 000000000..a57c7c2b4
--- /dev/null
+++ b/pyload/plugins/hooks/OverLoadMe.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class OverLoadMe(MultiHoster):
+ __name__ = "OverLoadMe"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("https", "bool", "Enable HTTPS", True),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 12)]
+
+ __description__ = """Over-Load.me hook plugin"""
+ __author_name__ = "marley"
+ __author_mail__ = "marley@over-load.me"
+
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://api.over-load.me/hoster.php",
+ get={"auth": "0001-cb1f24dadb3aa487bda5afd3b76298935329be7700cd7-5329be77-00cf-1ca0135f"}
+ ).replace("\"", "").strip()
+ self.logDebug("Hosterlist: %s" % page)
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/hooks/Premium4Me.py b/pyload/plugins/hooks/Premium4Me.py
new file mode 100644
index 000000000..6841dfa90
--- /dev/null
+++ b/pyload/plugins/hooks/Premium4Me.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class Premium4Me(MultiHoster):
+ __name__ = "Premium4Me"
+ __type__ = "hook"
+ __version__ = "0.03"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for downloads from supported hosters:", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """Premium.to hook plugin"""
+ __author_name__ = ("RaNaN", "zoidberg", "stickell")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+
+ def getHoster(self):
+ page = getURL("http://premium.to/api/hosters.php?authcode=%s" % self.account.authcode)
+ return [x.strip() for x in page.replace("\"", "").split(";")]
+
+ def coreReady(self):
+ self.account = self.core.accountManager.getAccountPlugin("Premium4Me")
+
+ user = self.account.selectAccount()[0]
+
+ if not user:
+ self.logError(_("Please add your premium.to account first and restart pyLoad"))
+ return
+
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hooks/PremiumizeMe.py b/pyload/plugins/hooks/PremiumizeMe.py
new file mode 100644
index 000000000..70bc4a0f2
--- /dev/null
+++ b/pyload/plugins/hooks/PremiumizeMe.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class PremiumizeMe(MultiHoster):
+ __name__ = "PremiumizeMe"
+ __type__ = "hook"
+ __version__ = "0.12"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Premiumize.me hook plugin"""
+ __author_name__ = "Florian Franzen"
+ __author_mail__ = "FlorianFranzen@gmail.com"
+
+
+ def getHoster(self):
+ # If no accounts are available there will be no hosters available
+ if not self.account or not self.account.canUse():
+ return []
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ # Get supported hosters list from premiumize.me using the
+ # json API v1 (see https://secure.premiumize.me/?show=api)
+ answer = getURL("https://api.premiumize.me/pm-api/v1.php?method=hosterlist&params[login]=%s&params[pass]=%s" % (
+ user, data['password']))
+ data = json_loads(answer)
+
+ # If account is not valid thera are no hosters available
+ if data['status'] != 200:
+ return []
+
+ # Extract hosters from json file
+ return data['result']['hosterlist']
+
+ def coreReady(self):
+ # Get account plugin and check if there is a valid account available
+ self.account = self.core.accountManager.getAccountPlugin("PremiumizeMe")
+ if not self.account.canUse():
+ self.account = None
+ self.logError(_("Please add a valid premiumize.me account first and restart pyLoad."))
+ return
+
+ # Run the overwriten core ready which actually enables the multihoster hook
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hooks/RPNetBiz.py b/pyload/plugins/hooks/RPNetBiz.py
new file mode 100644
index 000000000..e119e6451
--- /dev/null
+++ b/pyload/plugins/hooks/RPNetBiz.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class RPNetBiz(MultiHoster):
+ __name__ = "RPNetBiz"
+ __type__ = "hook"
+ __version__ = "0.1"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """RPNet.biz hook plugin"""
+ __author_name__ = "Dman"
+ __author_mail__ = "dmanugm@gmail.com"
+
+
+ def getHoster(self):
+ # No hosts supported if no account
+ if not self.account or not self.account.canUse():
+ return []
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ response = getURL("https://premium.rpnet.biz/client_api.php",
+ get={"username": user, "password": data['password'], "action": "showHosterList"})
+ hoster_list = json_loads(response)
+
+ # If account is not valid thera are no hosters available
+ if 'error' in hoster_list:
+ return []
+
+ # Extract hosters from json file
+ return hoster_list['hosters']
+
+ def coreReady(self):
+ # Get account plugin and check if there is a valid account available
+ self.account = self.core.accountManager.getAccountPlugin("RPNetBiz")
+ if not self.account.canUse():
+ self.account = None
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rpnet")
+ return
+
+ # Run the overwriten core ready which actually enables the multihoster hook
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hooks/RealdebridCom.py b/pyload/plugins/hooks/RealdebridCom.py
new file mode 100644
index 000000000..c1c519ace
--- /dev/null
+++ b/pyload/plugins/hooks/RealdebridCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class RealdebridCom(MultiHoster):
+ __name__ = "RealdebridCom"
+ __type__ = "hook"
+ __version__ = "0.43"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("https", "bool", "Enable HTTPS", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Real-Debrid.com hook plugin"""
+ __author_name__ = "Devirex Hazzard"
+ __author_mail__ = "naibaf_11@yahoo.de"
+
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://real-debrid.com/api/hosters.php").replace("\"", "").strip()
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/hooks/RehostTo.py b/pyload/plugins/hooks/RehostTo.py
new file mode 100644
index 000000000..097ebc646
--- /dev/null
+++ b/pyload/plugins/hooks/RehostTo.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class RehostTo(MultiHoster):
+ __name__ = "RehostTo"
+ __type__ = "hook"
+ __version__ = "0.43"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Rehost.to hook plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def getHoster(self):
+ page = getURL("http://rehost.to/api.php?cmd=get_supported_och_dl&long_ses=%s" % self.long_ses)
+ return [x.strip() for x in page.replace("\"", "").split(",")]
+
+ def coreReady(self):
+ self.account = self.core.accountManager.getAccountPlugin("RehostTo")
+
+ user = self.account.selectAccount()[0]
+
+ if not user:
+ self.logError("Rehost.to: " + _("Please add your rehost.to account first and restart pyLoad"))
+ return
+
+ data = self.account.getAccountInfo(user)
+ self.ses = data['ses']
+ self.long_ses = data['long_ses']
+
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hooks/RestartFailed.py b/pyload/plugins/hooks/RestartFailed.py
new file mode 100644
index 000000000..a50ab60a4
--- /dev/null
+++ b/pyload/plugins/hooks/RestartFailed.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Hook import Hook
+
+
+class RestartFailed(Hook):
+ __name__ = "RestartFailed"
+ __type__ = "hook"
+ __version__ = "1.55"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("interval", "int", "Check interval in minutes", 90)]
+
+ __description__ = """Periodically restart all failed downloads in queue"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ MIN_INTERVAL = 15 * 60 #: 15m minimum check interval (value is in seconds)
+
+ event_list = ["pluginConfigChanged"]
+
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == "interval":
+ interval = value * 60
+ if self.MIN_INTERVAL <= interval != self.interval:
+ self.core.scheduler.removeJob(self.cb)
+ self.interval = interval
+ self.initPeriodical()
+ else:
+ self.logDebug("Invalid interval value, kept current")
+
+ def periodical(self):
+ self.logInfo("Restart failed downloads")
+ self.api.restartFailed()
+
+ def setup(self):
+ self.api = self.core.api
+ self.interval = self.MIN_INTERVAL
+
+ def coreReady(self):
+ self.pluginConfigChanged(self.__name__, "interval", self.getConfig("interval"))
diff --git a/pyload/plugins/hooks/SimplyPremiumCom.py b/pyload/plugins/hooks/SimplyPremiumCom.py
new file mode 100644
index 000000000..8e9bc5e1e
--- /dev/null
+++ b/pyload/plugins/hooks/SimplyPremiumCom.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class SimplyPremiumCom(MultiHoster):
+ __name__ = "SimplyPremiumCom"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __description__ = """Simply-Premium.com hook plugin"""
+ __author_name__ = "EvolutionClip"
+ __author_mail__ = "evolutionclip@live.de"
+
+
+ def getHoster(self):
+ json_data = getURL('http://www.simply-premium.com/api/hosts.php?format=json&online=1')
+ json_data = json_loads(json_data)
+
+ host_list = [element['regex'] for element in json_data['result']]
+
+ return host_list
diff --git a/pyload/plugins/hooks/SimplydebridCom.py b/pyload/plugins/hooks/SimplydebridCom.py
new file mode 100644
index 000000000..f7c899a48
--- /dev/null
+++ b/pyload/plugins/hooks/SimplydebridCom.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class SimplydebridCom(MultiHoster):
+ __name__ = "SimplydebridCom"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """Simply-Debrid.com hook plugin"""
+ __author_name__ = "Kagenoshin"
+ __author_mail__ = "kagenoshin@gmx.ch"
+
+
+ def getHoster(self):
+ page = getURL("http://simply-debrid.com/api.php?list=1")
+ return [x.strip() for x in page.rstrip(';').replace("\"", "").split(";")]
diff --git a/pyload/plugins/hooks/UnSkipOnFail.py b/pyload/plugins/hooks/UnSkipOnFail.py
new file mode 100644
index 000000000..fd3b35a0a
--- /dev/null
+++ b/pyload/plugins/hooks/UnSkipOnFail.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+from os.path import basename
+
+from pyload.PyFile import PyFile
+from pyload.plugins.Hook import Hook
+from pyload.utils import fs_encode
+
+
+class UnSkipOnFail(Hook):
+ __name__ = "UnSkipOnFail"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", True)]
+
+ __description__ = """When a download fails, restart skipped duplicates"""
+ __author_name__ = "hagg"
+ __author_mail__ = None
+
+
+ def downloadFailed(self, pyfile):
+ pyfile_name = basename(pyfile.name)
+ pid = pyfile.package().id
+ msg = 'look for skipped duplicates for %s (pid:%s)...'
+ self.logInfo(msg % (pyfile_name, pid))
+ dups = self.findDuplicates(pyfile)
+ for link in dups:
+ # check if link is "skipped"(=4)
+ if link.status == 4:
+ lpid = link.packageID
+ self.logInfo('restart "%s" (pid:%s)...' % (pyfile_name, lpid))
+ self.setLinkStatus(link, "queued")
+
+ def findDuplicates(self, pyfile):
+ """ Search all packages for duplicate links to "pyfile".
+ Duplicates are links that would overwrite "pyfile".
+ To test on duplicity the package-folder and link-name
+ of twolinks are compared (basename(link.name)).
+ So this method returns a list of all links with equal
+ package-folders and filenames as "pyfile", but except
+ the data for "pyfile" iotselöf.
+ It does MOT check the link's status.
+ """
+ dups = []
+ pyfile_name = fs_encode(basename(pyfile.name))
+ # get packages (w/o files, as most file data is useless here)
+ queue = self.core.api.getQueue()
+ for package in queue:
+ # check if package-folder equals pyfile's package folder
+ if fs_encode(package.folder) == fs_encode(pyfile.package().folder):
+ # now get packaged data w/ files/links
+ pdata = self.core.api.getPackageData(package.pid)
+ if pdata.links:
+ for link in pdata.links:
+ link_name = fs_encode(basename(link.name))
+ # check if link name collides with pdata's name
+ if link_name == pyfile_name:
+ # at last check if it is not pyfile itself
+ if link.fid != pyfile.id:
+ dups.append(link)
+ return dups
+
+ def setLinkStatus(self, link, new_status):
+ """ Change status of "link" to "new_status".
+ "link" has to be a valid FileData object,
+ "new_status" has to be a valid status name
+ (i.e. "queued" for this Plugin)
+ It creates a temporary PyFile object using
+ "link" data, changes its status, and tells
+ the core.files-manager to save its data.
+ """
+ pyfile = PyFile(self.core.files,
+ link.fid,
+ link.url,
+ link.name,
+ link.size,
+ link.status,
+ link.error,
+ link.plugin,
+ link.packageID,
+ link.order)
+ pyfile.setStatus(new_status)
+ self.core.files.save()
+ pyfile.release()
diff --git a/pyload/plugins/hooks/UnrestrictLi.py b/pyload/plugins/hooks/UnrestrictLi.py
new file mode 100644
index 000000000..1562bdf24
--- /dev/null
+++ b/pyload/plugins/hooks/UnrestrictLi.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class UnrestrictLi(MultiHoster):
+ __name__ = "UnrestrictLi"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24),
+ ("history", "bool", "Delete History", False)]
+
+ __description__ = """Unrestrict.li hook plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def getHoster(self):
+ json_data = getURL('http://unrestrict.li/api/jdownloader/hosts.php?format=json')
+ json_data = json_loads(json_data)
+
+ host_list = [element['host'] for element in json_data['result']]
+
+ return host_list
diff --git a/pyload/plugins/hooks/UpdateManager.py b/pyload/plugins/hooks/UpdateManager.py
new file mode 100644
index 000000000..ece7ca610
--- /dev/null
+++ b/pyload/plugins/hooks/UpdateManager.py
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+
+import re
+import sys
+
+from operator import itemgetter
+from os import path, remove, stat
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hook import Expose, Hook, threaded
+from pyload.utils import safe_join
+
+
+class UpdateManager(Hook):
+ __name__ = "UpdateManager"
+ __type__ = "hook"
+ __version__ = "0.35"
+
+ __config__ = [("activated", "bool", "Activated", True),
+ ("mode", "pyLoad + plugins;plugins only", "Check updates for", "pyLoad + plugins"),
+ ("interval", "int", "Check interval in hours", 8),
+ ("reloadplugins", "bool", "Monitor plugins for code changes (debug mode only)", True),
+ ("nodebugupdate", "bool", "Don't check for updates in debug mode", True)]
+
+ __description__ = """ Check for updates """
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+
+ event_list = ["pluginConfigChanged"]
+
+ SERVER_URL = "http://updatemanager.pyload.org"
+ MIN_INTERVAL = 3 * 60 * 60 #: 3h minimum check interval (value is in seconds)
+
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == "interval":
+ interval = value * 60 * 60
+ if self.MIN_INTERVAL <= interval != self.interval:
+ self.core.scheduler.removeJob(self.cb)
+ self.interval = interval
+ self.initPeriodical()
+ else:
+ self.logDebug("Invalid interval value, kept current")
+ elif name == "reloadplugins":
+ if self.cb2:
+ self.core.scheduler.removeJob(self.cb2)
+ if value is True and self.core.debug:
+ self.periodical2()
+
+ def coreReady(self):
+ self.pluginConfigChanged(self.__name__, "interval", self.getConfig("interval"))
+ x = lambda: self.pluginConfigChanged(self.__name__, "reloadplugins", self.getConfig("reloadplugins"))
+ self.core.scheduler.addJob(10, x, threaded=False)
+
+ def unload(self):
+ self.pluginConfigChanged(self.__name__, "reloadplugins", False)
+
+ def setup(self):
+ self.cb2 = None
+ self.interval = self.MIN_INTERVAL
+ self.updating = False
+ self.info = {'pyload': False, 'version': None, 'plugins': False}
+ self.mtimes = {} #: store modification time for each plugin
+
+ def periodical2(self):
+ if not self.updating:
+ self.autoreloadPlugins()
+ self.cb2 = self.core.scheduler.addJob(4, self.periodical2, threaded=False)
+
+ @Expose
+ def autoreloadPlugins(self):
+ """ reload and reindex all modified plugins """
+ modules = filter(
+ lambda m: m and (m.__name__.startswith("pyload.plugins.") or
+ m.__name__.startswith("userplugins.")) and
+ m.__name__.count(".") >= 2, sys.modules.itervalues()
+ )
+
+ reloads = []
+
+ for m in modules:
+ root, type, name = m.__name__.rsplit(".", 2)
+ id = (type, name)
+ if type in self.core.pluginManager.plugins:
+ f = m.__file__.replace(".pyc", ".py")
+ if not path.isfile(f):
+ continue
+
+ mtime = stat(f).st_mtime
+
+ if id not in self.mtimes:
+ self.mtimes[id] = mtime
+ elif self.mtimes[id] < mtime:
+ reloads.append(id)
+ self.mtimes[id] = mtime
+
+ return True if self.core.pluginManager.reloadPlugins(reloads) else False
+
+ def periodical(self):
+ if not self.info['pyload'] and not (self.getConfig("nodebugupdate") and self.core.debug):
+ self.updateThread()
+
+ def server_request(self):
+ try:
+ return getURL(self.SERVER_URL, get={'v': self.core.api.getServerVersion()}).splitlines()
+ except:
+ self.logWarning(_("Unable to contact server to get updates"))
+
+ @threaded
+ def updateThread(self):
+ self.updating = True
+ status = self.update(onlyplugin=self.getConfig("mode") == "plugins only")
+ if status == 2:
+ self.core.api.restart()
+ else:
+ self.updating = False
+
+ @Expose
+ def updatePlugins(self):
+ """ simple wrapper for calling plugin update quickly """
+ return self.update(onlyplugin=True)
+
+ @Expose
+ def update(self, onlyplugin=False):
+ """ check for updates """
+ data = self.server_request()
+ if not data:
+ exitcode = 0
+ elif data[0] == "None":
+ self.logInfo(_("No new pyLoad version available"))
+ updates = data[1:]
+ exitcode = self._updatePlugins(updates)
+ elif onlyplugin:
+ exitcode = 0
+ else:
+ newversion = data[0]
+ self.logInfo(_("*** New pyLoad Version %s available ***") % newversion)
+ self.logInfo(_("*** Get it here: https://github.com/pyload/pyload/releases ***"))
+ exitcode = 3
+ self.info['pyload'] = True
+ self.info['version'] = newversion
+ return exitcode #: 0 = No plugins updated; 1 = Plugins updated; 2 = Plugins updated, but restart required; 3 = No plugins updated, new pyLoad version available
+
+ def _updatePlugins(self, updates):
+ """ check for plugin updates """
+
+ if self.info['plugins']:
+ return False #: plugins were already updated
+
+ updated = []
+
+ vre = re.compile(r'__version__.*=.*("|\')([0-9.]+)')
+ url = updates[0]
+ schema = updates[1].split('|')
+ if "BLACKLIST" in updates:
+ blacklist = updates[updates.index('BLACKLIST') + 1:]
+ updates = updates[2:updates.index('BLACKLIST')]
+ else:
+ blacklist = None
+ updates = updates[2:]
+
+ upgradable = sorted(map(lambda x: dict(zip(schema, x.split('|'))), updates), key=itemgetter("type", "name"))
+ for plugin in upgradable:
+ filename = plugin['name']
+ prefix = plugin['type']
+ version = plugin['version']
+
+ if filename.endswith(".pyc"):
+ name = filename[:filename.find("_")]
+ else:
+ name = filename.replace(".py", "")
+
+ #@TODO: obsolete after 0.4.10
+ if prefix.endswith("s"):
+ type = prefix[:-1]
+ else:
+ type = prefix
+
+ plugins = getattr(self.core.pluginManager, "%sPlugins" % type)
+
+ oldver = float(plugins[name]['v']) if name in plugins else None
+ newver = float(version)
+
+ if not oldver:
+ msg = "New [%(type)s] %(name)s (v%(newver)s)"
+ elif newver > oldver:
+ msg = "New version of [%(type)s] %(name)s (v%(oldver)s -> v%(newver)s)"
+ else:
+ continue
+
+ self.logInfo(_(msg) % {
+ 'type': type,
+ 'name': name,
+ 'oldver': oldver,
+ 'newver': newver,
+ })
+
+ try:
+ content = getURL(url % plugin)
+ m = vre.search(content)
+ if m and m.group(2) == version:
+ f = open(safe_join("userplugins", prefix, filename), "wb")
+ f.write(content)
+ f.close()
+ updated.append((prefix, name))
+ else:
+ raise Exception, _("Version mismatch")
+ except Exception, e:
+ self.logError(_("Error updating plugin %s") % filename, str(e))
+
+ if blacklist:
+ blacklisted = sorted(map(lambda x: (x.split('|')[0], x.split('|')[1].rsplit('.', 1)[0]), blacklist))
+
+ # Always protect UpdateManager from self-removing
+ try:
+ blacklisted.remove(("hook", "UpdateManager"))
+ except:
+ pass
+
+ removed = self.removePlugins(blacklisted)
+ for t, n in removed:
+ self.logInfo(_("Removed blacklisted plugin [%(type)s] %(name)s") % {
+ 'type': t,
+ 'name': n,
+ })
+
+ if updated:
+ reloaded = self.core.pluginManager.reloadPlugins(updated)
+ if reloaded:
+ self.logInfo(_("Plugins updated and reloaded"))
+ exitcode = 1
+ else:
+ self.logInfo(_("*** Plugins have been updated, but need a pyLoad restart to be reloaded ***"))
+ self.info['plugins'] = True
+ exitcode = 2
+ else:
+ self.logInfo(_("No plugin updates available"))
+ exitcode = 0
+
+ return exitcode #: 0 = No plugins updated; 1 = Plugins updated; 2 = Plugins updated, but restart required
+
+ @Expose
+ def removePlugins(self, type_plugins):
+ """ delete plugins from disk """
+
+ if not type_plugins:
+ return
+
+ self.logDebug("Request deletion of plugins: %s" % type_plugins)
+
+ removed = []
+
+ for type, name in type_plugins:
+ err = False
+ file = name + ".py"
+
+ for root in ("userplugins", path.join(pypath, "pyload", "plugins")):
+
+ filename = safe_join(root, type, file)
+ try:
+ remove(filename)
+ except Exception, e:
+ self.logDebug("Error deleting \"%s\"" % path.basename(filename), str(e))
+ err = True
+
+ filename += "c"
+ if path.isfile(filename):
+ try:
+ if type == "hook":
+ self.manager.deactivateHook(name)
+ remove(filename)
+ except Exception, e:
+ self.logDebug("Error deleting \"%s\"" % path.basename(filename), str(e))
+ err = True
+
+ if not err:
+ id = (type, name)
+ removed.append(id)
+
+ return removed #: return a list of the plugins successfully removed
diff --git a/pyload/plugins/hooks/WindowsPhoneToastNotify.py b/pyload/plugins/hooks/WindowsPhoneToastNotify.py
new file mode 100644
index 000000000..79812cefa
--- /dev/null
+++ b/pyload/plugins/hooks/WindowsPhoneToastNotify.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import httplib
+import time
+
+from pyload.plugins.Hook import Hook
+
+
+class WindowsPhoneToastNotify(Hook):
+ __name__ = "WindowsPhoneToastNotify"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force even if client is connected", False),
+ ("pushId", "str", "pushId", ""),
+ ("pushUrl", "str", "pushUrl", ""),
+ ("pushTimeout", "int", "Timeout between notifications in seconds", 0)]
+
+ __description__ = """Send push notifications to Windows Phone"""
+ __author_name__ = "Andy Voigt"
+ __author_mail__ = "phone-support@hotmail.de"
+
+
+ def setup(self):
+ self.info = {}
+
+ def getXmlData(self):
+ myxml = ("<?xml version='1.0' encoding='utf-8'?> <wp:Notification xmlns:wp='WPNotification'> "
+ "<wp:Toast> <wp:Text1>Pyload Mobile</wp:Text1> <wp:Text2>Captcha waiting!</wp:Text2> "
+ "</wp:Toast> </wp:Notification>")
+ return myxml
+
+ def doRequest(self):
+ URL = self.getConfig("pushUrl")
+ request = self.getXmlData()
+ webservice = httplib.HTTP(URL)
+ webservice.putrequest("POST", self.getConfig("pushId"))
+ webservice.putheader("Host", URL)
+ webservice.putheader("Content-type", "text/xml")
+ webservice.putheader("X-NotificationClass", "2")
+ webservice.putheader("X-WindowsPhone-Target", "toast")
+ webservice.putheader("Content-length", "%d" % len(request))
+ webservice.endheaders()
+ webservice.send(request)
+ webservice.close()
+ self.setStorage("LAST_NOTIFY", time.time())
+
+ def newCaptchaTask(self, task):
+ if not self.getConfig("pushId") or not self.getConfig("pushUrl"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if (time.time() - float(self.getStorage("LAST_NOTIFY", 0))) < self.getConf("pushTimeout"):
+ return False
+
+ self.doRequest()
diff --git a/pyload/plugins/hooks/XFileSharingPro.py b/pyload/plugins/hooks/XFileSharingPro.py
new file mode 100644
index 000000000..7478034c6
--- /dev/null
+++ b/pyload/plugins/hooks/XFileSharingPro.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hook import Hook
+
+
+class XFileSharingPro(Hook):
+ __name__ = "XFileSharingPro"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated", True),
+ ("loadDefault", "bool", "Include default (built-in) hoster list", True),
+ ("includeList", "str", "Include hosters (comma separated)", ""),
+ ("excludeList", "str", "Exclude hosters (comma separated)", "")]
+
+ __description__ = """XFileSharingPro hook plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def coreReady(self):
+ self.loadPattern()
+
+ def loadPattern(self):
+ hosterList = self.getConfigSet('includeList')
+ excludeList = self.getConfigSet('excludeList')
+
+ if self.getConfig('loadDefault'):
+ hosterList |= set((
+ #WORKING HOSTERS:
+ "aieshare.com", "asixfiles.com", "banashare.com", "cyberlocker.ch", "eyesfile.co", "eyesfile.com",
+ "fileband.com", "filedwon.com", "filedownloads.org", "hipfile.com", "kingsupload.com", "mlfat4arab.com",
+ "netuploaded.com", "odsiebie.pl", "q4share.com", "ravishare.com", "uptobox.com", "verzend.be",
+ "xvidstage.com", "thefile.me", "sharesix.com", "hostingbulk.com",
+ #NOT TESTED:
+ "bebasupload.com", "boosterking.com", "divxme.com", "filevelocity.com", "glumbouploads.com",
+ "grupload.com", "heftyfile.com", "host4desi.com", "laoupload.com", "linkzhost.com", "movreel.com",
+ "rockdizfile.com", "limfile.com", "share76.com", "sharebeast.com", "sharehut.com", "sharerun.com",
+ "shareswift.com", "sharingonline.com", "6ybh-upload.com", "skipfile.com", "spaadyshare.com",
+ "space4file.com", "uploadbaz.com", "uploadc.com", "uploaddot.com", "uploadfloor.com", "uploadic.com",
+ "uploadville.com", "vidbull.com", "zalaa.com", "zomgupload.com", "kupload.org", "movbay.org",
+ "multishare.org", "omegave.org", "toucansharing.org", "uflinq.org", "banicrazy.info", "flowhot.info",
+ "upbrasil.info", "shareyourfilez.biz", "bzlink.us", "cloudcache.cc", "fileserver.cc", "farshare.to",
+ "filemaze.ws", "filehost.ws", "filestock.ru", "moidisk.ru", "4up.im", "100shared.com", "sharesix.com",
+ "thefile.me", "filenuke.com", "sharerepo.com", "mightyupload.com",
+ #WRONG FILE NAME:
+ "sendmyway.com", "upchi.co.il",
+ #NOT WORKING:
+ "amonshare.com", "imageporter.com", "file4safe.com",
+ #DOWN OR BROKEN:
+ "ddlanime.com", "fileforth.com", "loombo.com", "goldfile.eu", "putshare.com"
+ ))
+
+ hosterList -= (excludeList)
+ hosterList -= set(('', u''))
+
+ if not hosterList:
+ self.unload()
+ return
+
+ regexp = r"http://(?:[^/]*\.)?(%s)/\w{12}" % ("|".join(sorted(hosterList)).replace('.', '\.'))
+ #self.logDebug(regexp)
+
+ dict = self.core.pluginManager.hosterPlugins['XFileSharingPro']
+ dict['pattern'] = regexp
+ dict['re'] = re.compile(regexp)
+ self.logDebug("Pattern loaded - handling %d hosters" % len(hosterList))
+
+ def getConfigSet(self, option):
+ s = self.getConfig(option).lower().replace('|', ',').replace(';', ',')
+ return set([x.strip() for x in s.split(',')])
+
+ def unload(self):
+ dict = self.core.pluginManager.hosterPlugins['XFileSharingPro']
+ dict['pattern'] = r'^unmatchable$'
+ dict['re'] = re.compile(r'^unmatchable$')
diff --git a/pyload/plugins/hooks/XMPPInterface.py b/pyload/plugins/hooks/XMPPInterface.py
new file mode 100644
index 000000000..881e7f5dc
--- /dev/null
+++ b/pyload/plugins/hooks/XMPPInterface.py
@@ -0,0 +1,233 @@
+# -*- coding: utf-8 -*-
+
+from pyxmpp import streamtls
+from pyxmpp.all import JID, Message
+from pyxmpp.interface import implements
+from pyxmpp.interfaces import *
+from pyxmpp.jabber.client import JabberClient
+
+from pyload.plugins.hooks.IRCInterface import IRCInterface
+
+
+class XMPPInterface(IRCInterface, JabberClient):
+ __name__ = "XMPPInterface"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("jid", "str", "Jabber ID", "user@exmaple-jabber-server.org"),
+ ("pw", "str", "Password", ""),
+ ("tls", "bool", "Use TLS", False),
+ ("owners", "str", "List of JIDs accepting commands from", "me@icq-gateway.org;some@msn-gateway.org"),
+ ("info_file", "bool", "Inform about every file finished", False),
+ ("info_pack", "bool", "Inform about every package finished", True),
+ ("captcha", "bool", "Send captcha requests", True)]
+
+ __description__ = """Connect to jabber and let owner perform different tasks"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ implements(IMessageHandlersProvider)
+
+ def __init__(self, core, manager):
+ IRCInterface.__init__(self, core, manager)
+
+ self.jid = JID(self.getConfig("jid"))
+ password = self.getConfig("pw")
+
+ # if bare JID is provided add a resource -- it is required
+ if not self.jid.resource:
+ self.jid = JID(self.jid.node, self.jid.domain, "pyLoad")
+
+ if self.getConfig("tls"):
+ tls_settings = streamtls.TLSSettings(require=True, verify_peer=False)
+ auth = ("sasl:PLAIN", "sasl:DIGEST-MD5")
+ else:
+ tls_settings = None
+ auth = ("sasl:DIGEST-MD5", "digest")
+
+ # setup client with provided connection information
+ # and identity data
+ JabberClient.__init__(self, self.jid, password,
+ disco_name="pyLoad XMPP Client", disco_type="bot",
+ tls_settings=tls_settings, auth_methods=auth)
+
+ self.interface_providers = [
+ VersionHandler(self),
+ self,
+ ]
+
+ def coreReady(self):
+ self.new_package = {}
+
+ self.start()
+
+ def packageFinished(self, pypack):
+ try:
+ if self.getConfig("info_pack"):
+ self.announce(_("Package finished: %s") % pypack.name)
+ except:
+ pass
+
+ def downloadFinished(self, pyfile):
+ try:
+ if self.getConfig("info_file"):
+ self.announce(
+ _("Download finished: %(name)s @ %(plugin)s") % {"name": pyfile.name, "plugin": pyfile.pluginname})
+ except:
+ pass
+
+ def run(self):
+ # connect to IRC etc.
+ self.connect()
+ try:
+ self.loop()
+ except Exception, ex:
+ self.logError("pyLoad XMPP: %s" % str(ex))
+
+ def stream_state_changed(self, state, arg):
+ """This one is called when the state of stream connecting the component
+ to a server changes. This will usually be used to let the user
+ know what is going on."""
+ self.logDebug("pyLoad XMPP: *** State changed: %s %r ***" % (state, arg))
+
+ def disconnected(self):
+ self.logDebug("pyLoad XMPP: Client was disconnected")
+
+ def stream_closed(self, stream):
+ self.logDebug("pyLoad XMPP: Stream was closed | %s" % stream)
+
+ def stream_error(self, err):
+ self.logDebug("pyLoad XMPP: Stream Error: %s" % err)
+
+ def get_message_handlers(self):
+ """Return list of (message_type, message_handler) tuples.
+
+ The handlers returned will be called when matching message is received
+ in a client session."""
+ return [("normal", self.message)]
+
+ def message(self, stanza):
+ """Message handler for the component."""
+ subject = stanza.get_subject()
+ body = stanza.get_body()
+ t = stanza.get_type()
+ self.logDebug(u'pyLoad XMPP: Message from %s received.' % (unicode(stanza.get_from(),)))
+ self.logDebug(u'pyLoad XMPP: Body: %s Subject: %s Type: %s' % (body, subject, t))
+
+ if t == "headline":
+ # 'headline' messages should never be replied to
+ return True
+ if subject:
+ subject = u"Re: " + subject
+
+ to_jid = stanza.get_from()
+ from_jid = stanza.get_to()
+
+ #j = JID()
+ to_name = to_jid.as_utf8()
+ from_name = from_jid.as_utf8()
+
+ names = self.getConfig("owners").split(";")
+
+ if to_name in names or to_jid.node + "@" + to_jid.domain in names:
+ messages = []
+
+ trigger = "pass"
+ args = None
+
+ try:
+ temp = body.split()
+ trigger = temp[0]
+ if len(temp) > 1:
+ args = temp[1:]
+ except:
+ pass
+
+ handler = getattr(self, "event_%s" % trigger, self.event_pass)
+ try:
+ res = handler(args)
+ for line in res:
+ m = Message(
+ to_jid=to_jid,
+ from_jid=from_jid,
+ stanza_type=stanza.get_type(),
+ subject=subject,
+ body=line)
+
+ messages.append(m)
+ except Exception, e:
+ self.logError("pyLoad XMPP: " + repr(e))
+
+ return messages
+
+ else:
+ return True
+
+ def response(self, msg, origin=""):
+ return self.announce(msg)
+
+ def announce(self, message):
+ """ send message to all owners"""
+ for user in self.getConfig("owners").split(";"):
+ self.logDebug("pyLoad XMPP: Send message to %s" % user)
+
+ to_jid = JID(user)
+
+ m = Message(from_jid=self.jid,
+ to_jid=to_jid,
+ stanza_type="chat",
+ body=message)
+
+ stream = self.get_stream()
+ if not stream:
+ self.connect()
+ stream = self.get_stream()
+
+ stream.send(m)
+
+ def beforeReconnecting(self, ip):
+ self.disconnect()
+
+ def afterReconnecting(self, ip):
+ self.connect()
+
+
+class VersionHandler(object):
+ """Provides handler for a version query.
+
+ This class will answer version query and announce 'jabber:iq:version' namespace
+ in the client's disco#info results."""
+
+ implements(IIqHandlersProvider, IFeaturesProvider)
+
+ def __init__(self, client):
+ """Just remember who created this."""
+ self.client = client
+
+ def get_features(self):
+ """Return namespace which should the client include in its reply to a
+ disco#info query."""
+ return ["jabber:iq:version"]
+
+ def get_iq_get_handlers(self):
+ """Return list of tuples (element_name, namespace, handler) describing
+ handlers of <iq type='get'/> stanzas"""
+ return [("query", "jabber:iq:version", self.get_version)]
+
+ def get_iq_set_handlers(self):
+ """Return empty list, as this class provides no <iq type='set'/> stanza handler."""
+ return []
+
+ def get_version(self, iq):
+ """Handler for jabber:iq:version queries.
+
+ jabber:iq:version queries are not supported directly by PyXMPP, so the
+ XML node is accessed directly through the libxml2 API. This should be
+ used very carefully!"""
+ iq = iq.make_result_response()
+ q = iq.new_query("jabber:iq:version")
+ q.newTextChild(q.ns(), "name", "Echo component")
+ q.newTextChild(q.ns(), "version", "1.0")
+ return iq
diff --git a/pyload/plugins/hooks/ZeveraCom.py b/pyload/plugins/hooks/ZeveraCom.py
new file mode 100644
index 000000000..155143f64
--- /dev/null
+++ b/pyload/plugins/hooks/ZeveraCom.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class ZeveraCom(MultiHoster):
+ __name__ = "ZeveraCom"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """Real-Debrid.com hook plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def getHoster(self):
+ page = getURL("http://www.zevera.com/jDownloader.ashx?cmd=gethosters")
+ return [x.strip() for x in page.replace("\"", "").split(",")]
diff --git a/pyload/plugins/hooks/__init__.py b/pyload/plugins/hooks/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/hooks/__init__.py
diff --git a/pyload/plugins/hoster/AlldebridCom.py b/pyload/plugins/hoster/AlldebridCom.py
new file mode 100644
index 000000000..1b115f19e
--- /dev/null
+++ b/pyload/plugins/hoster/AlldebridCom.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import unquote
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class AlldebridCom(Hoster):
+ __name__ = "AlldebridCom"
+ __type__ = "hoster"
+ __version__ = "0.34"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?alldebrid\..*'
+
+ __description__ = """Alldebrid.com hoster plugin"""
+ __author_name__ = "Andy Voigt"
+ __author_mail__ = "spamsales@online.de"
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): # incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+ def setup(self):
+ self.chunkLimit = 16
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "AllDebrid")
+ self.fail("No AllDebrid account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ password = self.getPassword().splitlines()
+ password = "" if not password else password[0]
+
+ url = "http://www.alldebrid.com/service.php?link=%s&json=true&pw=%s" % (pyfile.url, password)
+ page = self.load(url)
+ data = json_loads(page)
+
+ self.logDebug("Json data: %s" % str(data))
+
+ if data['error']:
+ if data['error'] == "This link isn't available on the hoster website.":
+ self.offline()
+ else:
+ self.logWarning(data['error'])
+ self.tempOffline()
+ else:
+ if pyfile.name and not pyfile.name.endswith('.tmp'):
+ pyfile.name = data['filename']
+ pyfile.size = parseFileSize(data['filesize'])
+ new_url = data['link']
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"error": "<title>An error occured while processing your request</title>",
+ "empty": re.compile(r"^$")})
+
+ if check == "error":
+ self.retry(wait_time=60, reason="An error occured while generating link.")
+ elif check == "empty":
+ self.retry(wait_time=60, reason="Downloaded File was empty.")
diff --git a/pyload/plugins/hoster/BasePlugin.py b/pyload/plugins/hoster/BasePlugin.py
new file mode 100644
index 000000000..55cdf5b88
--- /dev/null
+++ b/pyload/plugins/hoster/BasePlugin.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+
+from re import match, search
+from urllib import unquote
+from urlparse import urlparse
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import html_unescape, remove_chars
+
+
+class BasePlugin(Hoster):
+ __name__ = "BasePlugin"
+ __type__ = "hoster"
+ __version__ = "0.20"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Base Plugin when any other didnt fit"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ """main function"""
+
+ #debug part, for api exerciser
+ if pyfile.url.startswith("DEBUG_API"):
+ self.multiDL = False
+ return
+
+ # self.__name__ = "NetloadIn"
+ # pyfile.name = "test"
+ # self.html = self.load("http://localhost:9000/short")
+ # self.download("http://localhost:9000/short")
+ # self.api = self.load("http://localhost:9000/short")
+ # self.decryptCaptcha("http://localhost:9000/captcha")
+ #
+ # if pyfile.url == "79":
+ # self.core.api.addPackage("test", [str(i) for i in xrange(80)], 1)
+ #
+ # return
+ if pyfile.url.startswith("http"):
+
+ try:
+ self.downloadFile(pyfile)
+ except BadHeader, e:
+ if e.code in (401, 403):
+ self.logDebug("Auth required")
+
+ account = self.core.accountManager.getAccountPlugin('Http')
+ servers = [x['login'] for x in account.getAllAccounts()]
+ server = urlparse(pyfile.url).netloc
+
+ if server in servers:
+ self.logDebug("Logging on to %s" % server)
+ self.req.addAuth(account.accounts[server]['password'])
+ else:
+ for pwd in pyfile.package().password.splitlines():
+ if ":" in pwd:
+ self.req.addAuth(pwd.strip())
+ break
+ else:
+ self.fail(_("Authorization required (username:password)"))
+
+ self.downloadFile(pyfile)
+ else:
+ raise
+
+ else:
+ self.fail("No Plugin matched and not a downloadable url.")
+
+ def downloadFile(self, pyfile):
+ url = pyfile.url
+
+ for _ in xrange(5):
+ header = self.load(url, just_header=True)
+
+ # self.load does not raise a BadHeader on 404 responses, do it here
+ if 'code' in header and header['code'] == 404:
+ raise BadHeader(404)
+
+ if 'location' in header:
+ self.logDebug("Location: " + header['location'])
+ base = match(r'https?://[^/]+', url).group(0)
+ if header['location'].startswith("http"):
+ url = header['location']
+ elif header['location'].startswith("/"):
+ url = base + unquote(header['location'])
+ else:
+ url = '%s/%s' % (base, unquote(header['location']))
+ else:
+ break
+
+ name = html_unescape(unquote(urlparse(url).path.split("/")[-1]))
+
+ if 'content-disposition' in header:
+ self.logDebug("Content-Disposition: " + header['content-disposition'])
+ m = search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", header['content-disposition'])
+ if m:
+ disp = m.groupdict()
+ self.logDebug(disp)
+ if not disp['enc']:
+ disp['enc'] = 'utf-8'
+ name = remove_chars(disp['name'], "\"';").strip()
+ name = unicode(unquote(name), disp['enc'])
+
+ if not name:
+ name = url
+ pyfile.name = name
+ self.logDebug("Filename: %s" % pyfile.name)
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/hoster/BayfilesCom.py b/pyload/plugins/hoster/BayfilesCom.py
new file mode 100644
index 000000000..ea4bd3ca5
--- /dev/null
+++ b/pyload/plugins/hoster/BayfilesCom.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class BayfilesCom(SimpleHoster):
+ __name__ = "BayfilesCom"
+ __type__ = "hoster"
+ __version__ = "0.07"
+
+ __pattern__ = r'https?://(?:www\.)?bayfiles\.(com|net)/file/(?P<ID>[a-zA-Z0-9]+/[a-zA-Z0-9]+/[^/]+)'
+
+ __description__ = """Bayfiles.com hoster plugin"""
+ __author_name__ = ("zoidberg", "Walter Purcaro")
+ __author_mail__ = ("zoidberg@mujmail.cz", "vuolter@gmail.com")
+
+ FILE_INFO_PATTERN = r'<p title="(?P<N>[^"]+)">[^<]*<strong>(?P<S>[0-9., ]+)(?P<U>[kKMG])i?B</strong></p>'
+ OFFLINE_PATTERN = r'(<p>The requested file could not be found.</p>|<title>404 Not Found</title>)'
+
+ WAIT_PATTERN = r'>Your IP [0-9.]* has recently downloaded a file\. Upgrade to premium or wait (\d+) minutes\.<'
+ VARS_PATTERN = r'var vfid = (\d+);\s*var delay = (\d+);'
+ FREE_LINK_PATTERN = r"javascript:window.location.href = '([^']+)';"
+ PREMIUM_LINK_PATTERN = r'(?:<a class="highlighted-btn" href="|(?=http://s\d+\.baycdn\.com/dl/))(.*?)"'
+
+
+ def handleFree(self):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.wait(int(m.group(1)) * 60)
+ self.retry()
+
+ # Get download token
+ m = re.search(self.VARS_PATTERN, self.html)
+ if m is None:
+ self.parseError('VARS')
+ vfid, delay = m.groups()
+
+ response = json_loads(self.load('http://bayfiles.com/ajax_download', get={
+ "_": time() * 1000,
+ "action": "startTimer",
+ "vfid": vfid}, decode=True))
+
+ if not "token" in response or not response['token']:
+ self.fail('No token')
+
+ self.wait(int(delay))
+
+ self.html = self.load('http://bayfiles.com/ajax_download', get={
+ "token": response['token'],
+ "action": "getLink",
+ "vfid": vfid})
+
+ # Get final link and download
+ m = re.search(self.FREE_LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("Free link")
+ self.startDownload(m.group(1))
+
+ def handlePremium(self):
+ m = re.search(self.PREMIUM_LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("Premium link")
+ self.startDownload(m.group(1))
+
+ def startDownload(self, url):
+ self.logDebug("%s URL: %s" % ("Premium" if self.premium else "Free", url))
+ self.download(url)
+ # check download
+ check = self.checkDownload({
+ "waitforfreeslots": re.compile(r"<title>BayFiles</title>"),
+ "notfound": re.compile(r"<title>404 Not Found</title>")
+ })
+ if check == "waitforfreeslots":
+ self.retry(30, 5 * 60, "Wait for free slot")
+ elif check == "notfound":
+ self.retry(30, 5 * 60, "404 Not found")
+
+
+getInfo = create_getInfo(BayfilesCom)
diff --git a/pyload/plugins/hoster/BezvadataCz.py b/pyload/plugins/hoster/BezvadataCz.py
new file mode 100644
index 000000000..8b989da67
--- /dev/null
+++ b/pyload/plugins/hoster/BezvadataCz.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class BezvadataCz(SimpleHoster):
+ __name__ = "BezvadataCz"
+ __type__ = "hoster"
+ __version__ = "0.24"
+
+ __pattern__ = r'http://(?:www\.)?bezvadata.cz/stahnout/.*'
+
+ __description__ = """BezvaData.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<p><b>Soubor: (?P<N>[^<]+)</b></p>'
+ FILE_SIZE_PATTERN = r'<li><strong>Velikost:</strong> (?P<S>[^<]+)</li>'
+ OFFLINE_PATTERN = r'<title>BezvaData \| Soubor nenalezen</title>'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+
+ def handleFree(self):
+ #download button
+ m = re.search(r'<a class="stahnoutSoubor".*?href="(.*?)"', self.html)
+ if m is None:
+ self.parseError("page1 URL")
+ url = "http://bezvadata.cz%s" % m.group(1)
+
+ #captcha form
+ self.html = self.load(url)
+ self.checkErrors()
+ for _ in xrange(5):
+ action, inputs = self.parseHtmlForm('frm-stahnoutFreeForm')
+ if not inputs:
+ self.parseError("FreeForm")
+
+ m = re.search(r'<img src="data:image/png;base64,(.*?)"', self.html)
+ if m is None:
+ self.parseError("captcha img")
+
+ #captcha image is contained in html page as base64encoded data but decryptCaptcha() expects image url
+ self.load, proper_load = self.loadcaptcha, self.load
+ try:
+ inputs['captcha'] = self.decryptCaptcha(m.group(1), imgtype='png')
+ finally:
+ self.load = proper_load
+
+ if '<img src="data:image/png;base64' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("No valid captcha code entered")
+
+ #download url
+ self.html = self.load("http://bezvadata.cz%s" % action, post=inputs)
+ self.checkErrors()
+ m = re.search(r'<a class="stahnoutSoubor2" href="(.*?)">', self.html)
+ if m is None:
+ self.parseError("page2 URL")
+ url = "http://bezvadata.cz%s" % m.group(1)
+ self.logDebug("DL URL %s" % url)
+
+ #countdown
+ m = re.search(r'id="countdown">(\d\d):(\d\d)<', self.html)
+ wait_time = (int(m.group(1)) * 60 + int(m.group(2)) + 1) if m else 120
+ self.wait(wait_time, False)
+
+ self.download(url)
+
+ def checkErrors(self):
+ if 'images/button-download-disable.png' in self.html:
+ self.longWait(5 * 60, 24) # parallel dl limit
+ elif '<div class="infobox' in self.html:
+ self.tempOffline()
+
+ def loadcaptcha(self, data, *args, **kwargs):
+ return data.decode("base64")
+
+
+getInfo = create_getInfo(BezvadataCz)
diff --git a/pyload/plugins/hoster/BillionuploadsCom.py b/pyload/plugins/hoster/BillionuploadsCom.py
new file mode 100644
index 000000000..6c14d103d
--- /dev/null
+++ b/pyload/plugins/hoster/BillionuploadsCom.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class BillionuploadsCom(XFileSharingPro):
+ __name__ = "BillionuploadsCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?billionuploads.com/\w{12}'
+
+ __description__ = """Billionuploads.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_NAME = "billionuploads.com"
+
+ FILE_NAME_PATTERN = r'<b>Filename:</b>(?P<N>.*?)<br>'
+ FILE_SIZE_PATTERN = r'<b>Size:</b>(?P<S>.*?)<br>'
+
+
+getInfo = create_getInfo(BillionuploadsCom)
diff --git a/pyload/plugins/hoster/BitshareCom.py b/pyload/plugins/hoster/BitshareCom.py
new file mode 100644
index 000000000..897206f87
--- /dev/null
+++ b/pyload/plugins/hoster/BitshareCom.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class BitshareCom(SimpleHoster):
+ __name__ = "BitshareCom"
+ __type__ = "hoster"
+ __version__ = "0.50"
+
+ __pattern__ = r'http://(?:www\.)?bitshare\.com/(files/(?P<id1>[a-zA-Z0-9]+)(/(?P<name>.*?)\.html)?|\?f=(?P<id2>[a-zA-Z0-9]+))'
+
+ __description__ = """Bitshare.com hoster plugin"""
+ __author_name__ = ("Paul King", "fragonib")
+ __author_mail__ = ("", "fragonib[AT]yahoo[DOT]es")
+
+ FILE_INFO_PATTERN = r'Downloading (?P<N>.+) - (?P<S>[\d.]+) (?P<U>\w+)</h1>'
+ OFFLINE_PATTERN = r'(>We are sorry, but the requested file was not found in our database|>Error - File not available<|The file was deleted either by the uploader, inactivity or due to copyright claim)'
+
+ FILE_AJAXID_PATTERN = r'var ajaxdl = "(.*?)";'
+ CAPTCHA_KEY_PATTERN = r'http://api\.recaptcha\.net/challenge\?k=(.*?) '
+ TRAFFIC_USED_UP = r'Your Traffic is used up for today. Upgrade to premium to continue!'
+
+
+ def setup(self):
+ self.req.cj.setCookie(".bitshare.com", "language_selection", "EN")
+ self.multiDL = self.premium
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if self.premium:
+ self.account.relogin(self.user)
+
+ self.pyfile = pyfile
+
+ # File id
+ m = re.match(self.__pattern__, pyfile.url)
+ self.file_id = max(m.group('id1'), m.group('id2'))
+ self.logDebug("File id is [%s]" % self.file_id)
+
+ # Load main page
+ self.html = self.load(pyfile.url, ref=False, decode=True)
+
+ # Check offline
+ if re.search(self.OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ # Check Traffic used up
+ if re.search(self.TRAFFIC_USED_UP, self.html):
+ self.logInfo("Your Traffic is used up for today")
+ self.wait(30 * 60, True)
+ self.retry()
+
+ # File name
+ m = re.match(self.__pattern__, pyfile.url)
+ name1 = m.group('name') if m else None
+ m = re.search(self.FILE_INFO_PATTERN, self.html)
+ name2 = m.group('N') if m else None
+ pyfile.name = max(name1, name2)
+
+ # Ajax file id
+ self.ajaxid = re.search(self.FILE_AJAXID_PATTERN, self.html).group(1)
+ self.logDebug("File ajax id is [%s]" % self.ajaxid)
+
+ # This may either download our file or forward us to an error page
+ url = self.getDownloadUrl()
+ self.logDebug("Downloading file with url [%s]" % url)
+ self.download(url)
+
+ check = self.checkDownload({"404": ">404 Not Found<", "Error": ">Error occured<"})
+ if check == "404":
+ self.retry(3, 60, 'Error 404')
+ elif check == "error":
+ self.retry(5, 5 * 60, "Bitshare host : Error occured")
+
+ def getDownloadUrl(self):
+ # Return location if direct download is active
+ if self.premium:
+ header = self.load(self.pyfile.url, cookies=True, just_header=True)
+ if 'location' in header:
+ return header['location']
+
+ # Get download info
+ self.logDebug("Getting download info")
+ response = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request": "generateID", "ajaxid": self.ajaxid})
+ self.handleErrors(response, ':')
+ parts = response.split(":")
+ filetype = parts[0]
+ wait = int(parts[1])
+ captcha = int(parts[2])
+ self.logDebug("Download info [type: '%s', waiting: %d, captcha: %d]" % (filetype, wait, captcha))
+
+ # Waiting
+ if wait > 0:
+ self.logDebug("Waiting %d seconds." % wait)
+ if wait < 120:
+ self.wait(wait, False)
+ else:
+ self.wait(wait - 55, True)
+ self.retry()
+
+ # Resolve captcha
+ if captcha == 1:
+ self.logDebug("File is captcha protected")
+ id = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group(1)
+ # Try up to 3 times
+ for i in xrange(3):
+ self.logDebug("Resolving ReCaptcha with key [%s], round %d" % (id, i + 1))
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(id)
+ response = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request": "validateCaptcha", "ajaxid": self.ajaxid,
+ "recaptcha_challenge_field": challenge, "recaptcha_response_field": code})
+ if self.handleCaptchaErrors(response):
+ break
+
+ # Get download URL
+ self.logDebug("Getting download url")
+ response = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request": "getDownloadURL", "ajaxid": self.ajaxid})
+ self.handleErrors(response, '#')
+ url = response.split("#")[-1]
+
+ return url
+
+ def handleErrors(self, response, separator):
+ self.logDebug("Checking response [%s]" % response)
+ if "ERROR:Session timed out" in response:
+ self.retry()
+ elif "ERROR" in response:
+ msg = response.split(separator)[-1]
+ self.fail(msg)
+
+ def handleCaptchaErrors(self, response):
+ self.logDebug("Result of captcha resolving [%s]" % response)
+ if "SUCCESS" in response:
+ self.correctCaptcha()
+ return True
+ elif "ERROR:SESSION ERROR" in response:
+ self.retry()
+ self.logDebug("Wrong captcha")
+ self.invalidCaptcha()
+
+
+getInfo = create_getInfo(BitshareCom)
diff --git a/pyload/plugins/hoster/BoltsharingCom.py b/pyload/plugins/hoster/BoltsharingCom.py
new file mode 100644
index 000000000..196e801e4
--- /dev/null
+++ b/pyload/plugins/hoster/BoltsharingCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class BoltsharingCom(DeadHoster):
+ __name__ = "BoltsharingCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?boltsharing.com/\w{12}'
+
+ __description__ = """Boltsharing.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(BoltsharingCom)
diff --git a/pyload/plugins/hoster/CatShareNet.py b/pyload/plugins/hoster/CatShareNet.py
new file mode 100644
index 000000000..415ec2379
--- /dev/null
+++ b/pyload/plugins/hoster/CatShareNet.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class CatShareNet(SimpleHoster):
+ __name__ = "CatShareNet"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?catshare.net/\w{16}.*'
+
+ __description__ = """CatShare.net hoster plugin"""
+ __author_name__ = "z00nx"
+ __author_mail__ = "z00nx0@gmail.com"
+
+ FILE_INFO_PATTERN = r'<h3 class="pull-left"[^>]+>(?P<N>.*)</h3>\s+<h3 class="pull-right"[^>]+>(?P<S>.*)</h3>'
+ OFFLINE_PATTERN = r'Podany plik zosta'
+
+ SECONDS_PATTERN = r'var\s+count\s+=\s+(\d+);'
+
+ RECAPTCHA_KEY = "6Lfln9kSAAAAANZ9JtHSOgxUPB9qfDFeLUI_QMEy"
+
+
+ def handleFree(self):
+ m = re.search(self.SECONDS_PATTERN, self.html)
+ seconds = int(m.group(1))
+ self.logDebug("Seconds found", seconds)
+ self.wait(seconds + 1)
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": code}
+ self.download(self.pyfile.url, post=post_data)
+ check = self.checkDownload({"html": re.compile("\A<!DOCTYPE html PUBLIC")})
+ if check == "html":
+ self.logDebug("Wrong captcha entered")
+ self.invalidCaptcha()
+ self.retry()
+
+
+getInfo = create_getInfo(CatShareNet)
diff --git a/pyload/plugins/hoster/CloudzerNet.py b/pyload/plugins/hoster/CloudzerNet.py
new file mode 100644
index 000000000..88313acee
--- /dev/null
+++ b/pyload/plugins/hoster/CloudzerNet.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class CloudzerNet(DeadHoster):
+ __name__ = "CloudzerNet"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:www\.)?(cloudzer\.net/file/|clz\.to/(file/)?)\w+'
+
+ __description__ = """Cloudzer.net hoster plugin"""
+ __author_name__ = ("gs", "z00nx", "stickell")
+ __author_mail__ = ("I-_-I-_-I@web.de", "z00nx0@gmail.com", "l.stickell@yahoo.it")
+
+
+getInfo = create_getInfo(CloudzerNet)
diff --git a/pyload/plugins/hoster/CramitIn.py b/pyload/plugins/hoster/CramitIn.py
new file mode 100644
index 000000000..6c5142d96
--- /dev/null
+++ b/pyload/plugins/hoster/CramitIn.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class CramitIn(XFileSharingPro):
+ __name__ = "CramitIn"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?cramit.in/\w{12}'
+
+ __description__ = """Cramit.in hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_NAME = "cramit.in"
+
+ FILE_INFO_PATTERN = r'<span class=t2>\s*(?P<N>.*?)</span>.*?<small>\s*\((?P<S>.*?)\)'
+ LINK_PATTERN = r'href="(http://cramit.in/file_download/.*?)"'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+
+getInfo = create_getInfo(CramitIn)
diff --git a/pyload/plugins/hoster/CrockoCom.py b/pyload/plugins/hoster/CrockoCom.py
new file mode 100644
index 000000000..c1e941553
--- /dev/null
+++ b/pyload/plugins/hoster/CrockoCom.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class CrockoCom(SimpleHoster):
+ __name__ = "CrockoCom"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'http://(?:www\.)?(crocko|easy-share).com/\w+'
+
+ __description__ = """Crocko hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<span class="fz24">Download:\s*<strong>(?P<N>.*)'
+ FILE_SIZE_PATTERN = r'<span class="tip1"><span class="inner">(?P<S>[^<]+)</span></span>'
+ OFFLINE_PATTERN = r"<h1>Sorry,<br />the page you're looking for <br />isn't here.</h1>|File not found"
+
+ CAPTCHA_URL_PATTERN = re.compile(r"u='(/file_contents/captcha/\w+)';\s*w='(\d+)';")
+ CAPTCHA_KEY_PATTERN = re.compile(r'Recaptcha.create\("([^"]+)"')
+
+ FORM_PATTERN = r'<form method="post" action="([^"]+)">(.*?)</form>'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="?([^" ]+)"? value="?([^" ]+)"?[^>]*>'
+
+ FILE_NAME_REPLACEMENTS = [(r'<[^>]*>', '')]
+
+
+ def handleFree(self):
+ if "You need Premium membership to download this file." in self.html:
+ self.fail("You need Premium membership to download this file.")
+
+ for _ in xrange(5):
+ m = re.search(self.CAPTCHA_URL_PATTERN, self.html)
+ if m:
+ url, wait_time = 'http://crocko.com' + m.group(1), m.group(2)
+ self.wait(wait_time)
+ self.html = self.load(url)
+ else:
+ break
+
+ m = re.search(self.CAPTCHA_KEY_PATTERN, self.html)
+ if m is None:
+ self.parseError('Captcha KEY')
+ captcha_key = m.group(1)
+
+ m = re.search(self.FORM_PATTERN, self.html, re.DOTALL)
+ if m is None:
+ self.parseError('ACTION')
+ action, form = m.groups()
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+
+ recaptcha = ReCaptcha(self)
+
+ for _ in xrange(5):
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key)
+ self.download(action, post=inputs)
+
+ check = self.checkDownload({
+ "captcha_err": self.CAPTCHA_KEY_PATTERN
+ })
+
+ if check == "captcha_err":
+ self.invalidCaptcha()
+ else:
+ break
+ else:
+ self.fail('No valid captcha solution received')
+
+
+getInfo = create_getInfo(CrockoCom)
diff --git a/pyload/plugins/hoster/CyberlockerCh.py b/pyload/plugins/hoster/CyberlockerCh.py
new file mode 100644
index 000000000..7c97deedb
--- /dev/null
+++ b/pyload/plugins/hoster/CyberlockerCh.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class CyberlockerCh(DeadHoster):
+ __name__ = "CyberlockerCh"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?cyberlocker\.ch/\w+'
+
+ __description__ = """Cyberlocker.ch hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+getInfo = create_getInfo(CyberlockerCh)
diff --git a/pyload/plugins/hoster/CzshareCom.py b/pyload/plugins/hoster/CzshareCom.py
new file mode 100644
index 000000000..0e6fab15a
--- /dev/null
+++ b/pyload/plugins/hoster/CzshareCom.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://czshare.com/5278880/random.bin
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pyload.utils import parseFileSize
+
+
+class CzshareCom(SimpleHoster):
+ __name__ = "CzshareCom"
+ __type__ = "hoster"
+ __version__ = "0.94"
+
+ __pattern__ = r'http://(?:www\.)?(czshare|sdilej)\.(com|cz)/(\d+/|download.php\?).*'
+
+ __description__ = """CZshare.com hoster plugin, now Sdilej.cz"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<div class="tab" id="parameters">\s*<p>\s*Cel. n.zev: <a href=[^>]*>(?P<N>[^<]+)</a>'
+ FILE_SIZE_PATTERN = r'<div class="tab" id="category">(?:\s*<p>[^\n]*</p>)*\s*Velikost:\s*(?P<S>[0-9., ]+)(?P<U>[kKMG])i?B\s*</div>'
+ OFFLINE_PATTERN = r'<div class="header clearfix">\s*<h2 class="red">'
+
+ FILE_SIZE_REPLACEMENTS = [(' ', '')]
+ FILE_URL_REPLACEMENTS = [(r'http://[^/]*/download.php\?.*?id=(\w+).*', r'http://sdilej.cz/\1/x/')]
+
+ SH_CHECK_TRAFFIC = True
+
+ FREE_URL_PATTERN = r'<a href="([^"]+)" class="page-download">[^>]*alt="([^"]+)" /></a>'
+ FREE_FORM_PATTERN = r'<form action="download.php" method="post">\s*<img src="captcha.php" id="captcha" />(.*?)</form>'
+ PREMIUM_FORM_PATTERN = r'<form action="/profi_down.php" method="post">(.*?)</form>'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="([^"]+)" value="([^"]+)"[^>]*/>'
+ MULTIDL_PATTERN = r"<p><font color='red'>Z[^<]*PROFI.</font></p>"
+ USER_CREDIT_PATTERN = r'<div class="credit">\s*kredit: <strong>([0-9., ]+)([kKMG]i?B)</strong>\s*</div><!-- .credit -->'
+
+
+ def checkTrafficLeft(self):
+ # check if user logged in
+ m = re.search(self.USER_CREDIT_PATTERN, self.html)
+ if m is None:
+ self.account.relogin(self.user)
+ self.html = self.load(self.pyfile.url, cookies=True, decode=True)
+ m = re.search(self.USER_CREDIT_PATTERN, self.html)
+ if m is None:
+ return False
+
+ # check user credit
+ try:
+ credit = parseFileSize(m.group(1).replace(' ', ''), m.group(2))
+ self.logInfo("Premium download for %i KiB of Credit" % (self.pyfile.size / 1024))
+ self.logInfo("User %s has %i KiB left" % (self.user, credit / 1024))
+ if credit < self.pyfile.size:
+ self.logInfo("Not enough credit to download file %s" % self.pyfile.name)
+ return False
+ except Exception, e:
+ # let's continue and see what happens...
+ self.logError('Parse error (CREDIT): %s' % e)
+
+ return True
+
+ def handlePremium(self):
+ # parse download link
+ try:
+ form = re.search(self.PREMIUM_FORM_PATTERN, self.html, re.DOTALL).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ except Exception, e:
+ self.logError("Parse error (FORM): %s" % e)
+ self.resetAccount()
+
+ # download the file, destination is determined by pyLoad
+ self.download("http://sdilej.cz/profi_down.php", post=inputs, disposition=True)
+ self.checkDownloadedFile()
+
+ def handleFree(self):
+ # get free url
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m is None:
+ self.parseError('Free URL')
+ parsed_url = "http://sdilej.cz" + m.group(1)
+ self.logDebug("PARSED_URL:" + parsed_url)
+
+ # get download ticket and parse html
+ self.html = self.load(parsed_url, cookies=True, decode=True)
+ if re.search(self.MULTIDL_PATTERN, self.html):
+ self.longWait(5 * 60, 12)
+
+ try:
+ form = re.search(self.FREE_FORM_PATTERN, self.html, re.DOTALL).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ self.pyfile.size = int(inputs['size'])
+ except Exception, e:
+ self.logError(e)
+ self.parseError('Form')
+
+ # get and decrypt captcha
+ captcha_url = 'http://sdilej.cz/captcha.php'
+ for _ in xrange(5):
+ inputs['captchastring2'] = self.decryptCaptcha(captcha_url)
+ self.html = self.load(parsed_url, cookies=True, post=inputs, decode=True)
+ if u"<li>ZadanÃœ ověřovací kód nesouhlasí!</li>" in self.html:
+ self.invalidCaptcha()
+ elif re.search(self.MULTIDL_PATTERN, self.html):
+ self.longWait(5 * 60, 12)
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("No valid captcha code entered")
+
+ m = re.search("countdown_number = (\d+);", self.html)
+ self.setWait(int(m.group(1)) if m else 50)
+
+ # download the file, destination is determined by pyLoad
+ self.logDebug("WAIT URL", self.req.lastEffectiveURL)
+ m = re.search("free_wait.php\?server=(.*?)&(.*)", self.req.lastEffectiveURL)
+ if m is None:
+ self.parseError('Download URL')
+
+ url = "http://%s/download.php?%s" % (m.group(1), m.group(2))
+
+ self.wait()
+ self.download(url)
+ self.checkDownloadedFile()
+
+ def checkDownloadedFile(self):
+ # check download
+ check = self.checkDownload({
+ "tempoffline": re.compile(r"^Soubor je do.*asn.* nedostupn.*$"),
+ "credit": re.compile(r"^Nem.*te dostate.*n.* kredit.$"),
+ "multi_dl": re.compile(self.MULTIDL_PATTERN),
+ "captcha_err": "<li>ZadanÃœ ověřovací kód nesouhlasí!</li>"
+ })
+
+ if check == "tempoffline":
+ self.fail("File not available - try later")
+ if check == "credit":
+ self.resetAccount()
+ elif check == "multi_dl":
+ self.longWait(5 * 60, 12)
+ elif check == "captcha_err":
+ self.invalidCaptcha()
+ self.retry()
+
+
+getInfo = create_getInfo(CzshareCom)
diff --git a/pyload/plugins/hoster/DailymotionCom.py b/pyload/plugins/hoster/DailymotionCom.py
new file mode 100644
index 000000000..0ae4c697b
--- /dev/null
+++ b/pyload/plugins/hoster/DailymotionCom.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.PyFile import statusMap
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+
+
+def getInfo(urls):
+ result = [] #: [ .. (name, size, status, url) .. ]
+ regex = re.compile(DailymotionCom.__pattern__)
+ apiurl = "https://api.dailymotion.com/video/"
+ request = {"fields": "access_error,status,title"}
+ for url in urls:
+ id = regex.search(url).group("ID")
+ page = getURL(apiurl + id, get=request)
+ info = json_loads(page)
+
+ if "title" in info:
+ name = info['title'] + ".mp4"
+ else:
+ name = url
+
+ if "error" in info or info['access_error']:
+ status = "offline"
+ else:
+ status = info['status']
+ if status in ("ready", "published"):
+ status = "online"
+ elif status in ("waiting", "processing"):
+ status = "temp. offline"
+ else:
+ status = "offline"
+
+ result.append((name, 0, statusMap[status], url))
+ return result
+
+
+class DailymotionCom(Hoster):
+ __name__ = "DailymotionCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'https?://(?:www\.)?dailymotion\.com/.*?video/(?P<ID>[\w^_]+)'
+ __config__ = [("quality", "Lowest;LD 144p;LD 240p;SD 384p;HQ 480p;HD 720p;HD 1080p;Highest", "Quality", "Highest")]
+
+ __description__ = """Dailymotion.com hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def getStreams(self):
+ streams = []
+ for result in re.finditer(r"\"(?P<URL>http:\\/\\/www.dailymotion.com\\/cdn\\/H264-(?P<QF>.*?)\\.*?)\"",
+ self.html):
+ url = result.group("URL")
+ qf = result.group("QF")
+ link = url.replace("\\", "")
+ quality = tuple(int(x) for x in qf.split("x"))
+ streams.append((quality, link))
+ return sorted(streams, key=lambda x: x[0][::-1])
+
+ def getQuality(self):
+ q = self.getConfig("quality")
+ if q == "Lowest":
+ quality = 0
+ elif q == "Highest":
+ quality = -1
+ else:
+ quality = int(q.rsplit(" ")[1][:-1])
+ return quality
+
+ def getLink(self, streams, quality):
+ if quality > 0:
+ for x, s in reversed([item for item in enumerate(streams)]):
+ qf = s[0][1]
+ if qf <= quality:
+ idx = x
+ break
+ else:
+ idx = 0
+ else:
+ idx = quality
+
+ s = streams[idx]
+ self.logInfo("Download video quality %sx%s" % s[0])
+ return s[1]
+
+ def checkInfo(self, pyfile):
+ pyfile.name, pyfile.size, pyfile.status, pyfile.url = getInfo([pyfile.url])[0]
+ if pyfile.status == 1:
+ self.offline()
+ elif pyfile.status == 6:
+ self.tempOffline()
+
+ def process(self, pyfile):
+ self.checkInfo(pyfile)
+
+ id = re.match(self.__pattern__, pyfile.url).group("ID")
+ self.html = self.load("http://www.dailymotion.com/embed/video/" + id, decode=True)
+
+ streams = self.getStreams()
+ quality = self.getQuality()
+ link = self.getLink(streams, quality)
+
+ self.download(link)
diff --git a/pyload/plugins/hoster/DataHu.py b/pyload/plugins/hoster/DataHu.py
new file mode 100644
index 000000000..68162c203
--- /dev/null
+++ b/pyload/plugins/hoster/DataHu.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://data.hu/get/6381232/random.bin
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DataHu(SimpleHoster):
+ __name__ = "DataHu"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?data.hu/get/\w+'
+
+ __description__ = """Data.hu hoster plugin"""
+ __author_name__ = ("crash", "stickell")
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ FILE_INFO_PATTERN = ur'<title>(?P<N>.*) \((?P<S>[^)]+)\) let\xf6lt\xe9se</title>'
+ OFFLINE_PATTERN = ur'Az adott f\xe1jl nem l\xe9tezik'
+ LINK_PATTERN = r'<div class="download_box_button"><a href="([^"]+)">'
+
+
+ def handleFree(self):
+ self.resumeDownload = True
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ self.logDebug('Direct link: ' + url)
+ else:
+ self.parseError('Unable to get direct link')
+
+ self.download(url, disposition=True)
+
+
+getInfo = create_getInfo(DataHu)
diff --git a/pyload/plugins/hoster/DataportCz.py b/pyload/plugins/hoster/DataportCz.py
new file mode 100644
index 000000000..2d87397df
--- /dev/null
+++ b/pyload/plugins/hoster/DataportCz.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DataportCz(SimpleHoster):
+ __name__ = "DataportCz"
+ __type__ = "hoster"
+ __version__ = "0.37"
+
+ __pattern__ = r'http://(?:www\.)?dataport.cz/file/(.*)'
+
+ __description__ = """Dataport.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<span itemprop="name">(?P<N>[^<]+)</span>'
+ FILE_SIZE_PATTERN = r'<td class="fil">Velikost</td>\s*<td>(?P<S>[^<]+)</td>'
+ OFFLINE_PATTERN = r'<h2>Soubor nebyl nalezen</h2>'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.dataport.cz/file/\1')]
+
+ CAPTCHA_URL_PATTERN = r'<section id="captcha_bg">\s*<img src="(.*?)"'
+ FREE_SLOTS_PATTERN = ur'Počet volnÜch slotů: <span class="darkblue">(\d+)</span><br />'
+
+
+ def handleFree(self):
+ captchas = {"1": "jkeG", "2": "hMJQ", "3": "vmEK", "4": "ePQM", "5": "blBd"}
+
+ for _ in xrange(60):
+ action, inputs = self.parseHtmlForm('free_download_form')
+ self.logDebug(action, inputs)
+ if not action or not inputs:
+ self.parseError('free_download_form')
+
+ if "captchaId" in inputs and inputs['captchaId'] in captchas:
+ inputs['captchaCode'] = captchas[inputs['captchaId']]
+ else:
+ self.parseError('captcha')
+
+ self.html = self.download("http://www.dataport.cz%s" % action, post=inputs)
+
+ check = self.checkDownload({"captcha": 'alert("\u0160patn\u011b opsan\u00fd k\u00f3d z obr\u00e1zu");',
+ "slot": 'alert("Je n\u00e1m l\u00edto, ale moment\u00e1ln\u011b nejsou'})
+ if check == "captcha":
+ self.parseError('invalid captcha')
+ elif check == "slot":
+ self.logDebug("No free slots - wait 60s and retry")
+ self.wait(60, False)
+ self.html = self.load(self.pyfile.url, decode=True)
+ continue
+ else:
+ break
+
+
+create_getInfo(DataportCz)
diff --git a/pyload/plugins/hoster/DateiTo.py b/pyload/plugins/hoster/DateiTo.py
new file mode 100644
index 000000000..1e8ca3614
--- /dev/null
+++ b/pyload/plugins/hoster/DateiTo.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DateiTo(SimpleHoster):
+ __name__ = "DateiTo"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?datei\.to/datei/(?P<ID>\w+)\.html'
+
+ __description__ = """Datei.to hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'Dateiname:</td>\s*<td colspan="2"><strong>(?P<N>.*?)</'
+ FILE_SIZE_PATTERN = r'Dateigr&ouml;&szlig;e:</td>\s*<td colspan="2">(?P<S>.*?)</'
+ OFFLINE_PATTERN = r'>Datei wurde nicht gefunden<|>Bitte wÀhle deine Datei aus... <'
+ PARALELL_PATTERN = r'>Du lÀdst bereits eine Datei herunter<'
+
+ WAIT_PATTERN = r'countdown\({seconds: (\d+)'
+ DATA_PATTERN = r'url: "(.*?)", data: "(.*?)",'
+ RECAPTCHA_KEY_PATTERN = r'Recaptcha.create\("(.*?)"'
+
+
+ def handleFree(self):
+ url = 'http://datei.to/ajax/download.php'
+ data = {'P': 'I', 'ID': self.file_info['ID']}
+
+ recaptcha = ReCaptcha(self)
+
+ for _ in xrange(10):
+ self.logDebug("URL", url, "POST", data)
+ self.html = self.load(url, post=data)
+ self.checkErrors()
+
+ if url.endswith('download.php') and 'P' in data:
+ if data['P'] == 'I':
+ self.doWait()
+
+ elif data['P'] == 'IV':
+ break
+
+ m = re.search(self.DATA_PATTERN, self.html)
+ if m is None:
+ self.parseError('data')
+ url = 'http://datei.to/' + m.group(1)
+ data = dict(x.split('=') for x in m.group(2).split('&'))
+
+ if url.endswith('recaptcha.php'):
+ m = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ recaptcha_key = m.group(1) if m else "6LdBbL8SAAAAAI0vKUo58XRwDd5Tu_Ze1DA7qTao"
+
+ data['recaptcha_challenge_field'], data['recaptcha_response_field'] = recaptcha.challenge(recaptcha_key)
+
+ else:
+ self.fail('Too bad...')
+
+ download_url = self.html
+ self.logDebug('Download URL', download_url)
+ self.download(download_url)
+
+ def checkErrors(self):
+ m = re.search(self.PARALELL_PATTERN, self.html)
+ if m:
+ m = re.search(self.WAIT_PATTERN, self.html)
+ wait_time = int(m.group(1)) if m else 30
+ self.wait(wait_time + 1, False)
+ self.retry()
+
+ def doWait(self):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ wait_time = int(m.group(1)) if m else 30
+
+ self.load('http://datei.to/ajax/download.php', post={'P': 'Ads'})
+ self.wait(wait_time + 1, False)
+
+
+getInfo = create_getInfo(DateiTo)
diff --git a/pyload/plugins/hoster/DdlstorageCom.py b/pyload/plugins/hoster/DdlstorageCom.py
new file mode 100644
index 000000000..8b477ade6
--- /dev/null
+++ b/pyload/plugins/hoster/DdlstorageCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class DdlstorageCom(DeadHoster):
+ __name__ = "DdlstorageCom"
+ __type__ = "hoster"
+ __version__ = "1.02"
+
+ __pattern__ = r'https?://(?:www\.)?ddlstorage\.com/\w+'
+
+ __description__ = """DDLStorage.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+
+getInfo = create_getInfo(DdlstorageCom)
diff --git a/pyload/plugins/hoster/DebridItaliaCom.py b/pyload/plugins/hoster/DebridItaliaCom.py
new file mode 100644
index 000000000..74879e6e5
--- /dev/null
+++ b/pyload/plugins/hoster/DebridItaliaCom.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class DebridItaliaCom(Hoster):
+ __name__ = "DebridItaliaCom"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?debriditalia\.com'
+
+ __description__ = """Debriditalia.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "DebridItalia")
+ self.fail("No DebridItalia account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ url = "http://debriditalia.com/linkgen2.php?xjxfun=convertiLink&xjxargs[]=S<![CDATA[%s]]>" % pyfile.url
+ page = self.load(url)
+ self.logDebug("XML data: %s" % page)
+
+ if 'File not available' in page:
+ self.fail('File not available')
+ else:
+ new_url = re.search(r'<a href="(?:[^"]+)">(?P<direct>[^<]+)</a>', page).group('direct')
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"empty": re.compile(r"^$")})
+
+ if check == "empty":
+ self.retry(5, 2 * 60, "Empty file downloaded")
diff --git a/pyload/plugins/hoster/DepositfilesCom.py b/pyload/plugins/hoster/DepositfilesCom.py
new file mode 100644
index 000000000..9c0348cbd
--- /dev/null
+++ b/pyload/plugins/hoster/DepositfilesCom.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DepositfilesCom(SimpleHoster):
+ __name__ = "DepositfilesCom"
+ __type__ = "hoster"
+ __version__ = "0.48"
+
+ __pattern__ = r'https?://(?:www\.)?(depositfiles\.com|dfiles\.(eu|ru))(/\w{1,3})?/files/(?P<ID>\w+)'
+
+ __description__ = """Depositfiles.com hoster plugin"""
+ __author_name__ = ("spoob", "zoidberg", "Walter Purcaro")
+ __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz", "vuolter@gmail.com")
+
+ FILE_NAME_PATTERN = r'<script type="text/javascript">eval\( unescape\(\'(?P<N>.*?)\''
+ FILE_SIZE_PATTERN = r': <b>(?P<S>[0-9.]+)&nbsp;(?P<U>[kKMG])i?B</b>'
+ OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>'
+
+ FILE_NAME_REPLACEMENTS = [(r'\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))),
+ (r'.*<b title="(?P<N>[^"]+).*', "\g<N>")]
+ FILE_URL_REPLACEMENTS = [(__pattern__, "https://dfiles.eu/files/\g<ID>")]
+
+ SH_COOKIES = [(".dfiles.eu", "lang_current", "en")]
+
+ RECAPTCHA_PATTERN = r"Recaptcha.create\('([^']+)'"
+
+ FREE_LINK_PATTERN = r'<form id="downloader_file_form" action="(http://.+?\.(dfiles\.eu|depositfiles\.com)/.+?)" method="post"'
+ PREMIUM_LINK_PATTERN = r'class="repeat"><a href="(.+?)"'
+ PREMIUM_MIRROR_PATTERN = r'class="repeat_mirror"><a href="(.+?)"'
+
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, post={"gateway_result": "1"}, cookies=True)
+
+ if re.search(r'File is checked, please try again in a minute.', self.html) is not None:
+ self.logInfo("DepositFiles.com: The file is being checked. Waiting 1 minute.")
+ self.wait(61)
+ self.retry()
+
+ wait = re.search(r'html_download_api-limit_interval\">(\d+)</span>', self.html)
+ if wait:
+ wait_time = int(wait.group(1))
+ self.logInfo("%s: Traffic used up. Waiting %d seconds." % (self.__name__, wait_time))
+ self.wait(wait_time, True)
+ self.retry()
+
+ wait = re.search(r'>Try in (\d+) minutes or use GOLD account', self.html)
+ if wait:
+ wait_time = int(wait.group(1))
+ self.logInfo("%s: All free slots occupied. Waiting %d minutes." % (self.__name__, wait_time))
+ self.setWait(wait_time * 60, False)
+
+ wait = re.search(r'Please wait (\d+) sec', self.html)
+ if wait:
+ self.setWait(int(wait.group(1)))
+
+ m = re.search(r"var fid = '(\w+)';", self.html)
+ if m is None:
+ self.retry(wait_time=5)
+ params = {'fid': m.group(1)}
+ self.logDebug("FID: %s" % params['fid'])
+
+ captcha_key = '6LdRTL8SAAAAAE9UOdWZ4d0Ky-aeA7XfSqyWDM2m'
+ m = re.search(self.RECAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ self.logDebug("CAPTCHA_KEY: %s" % captcha_key)
+
+ self.wait()
+ recaptcha = ReCaptcha(self)
+
+ for _ in xrange(5):
+ self.html = self.load("https://dfiles.eu/get_file.php", get=params)
+
+ if '<input type=button value="Continue" onclick="check_recaptcha' in self.html:
+ if not captcha_key:
+ self.parseError('Captcha key')
+ if 'response' in params:
+ self.invalidCaptcha()
+ params['challenge'], params['response'] = recaptcha.challenge(captcha_key)
+ self.logDebug(params)
+ continue
+
+ m = re.search(self.FREE_LINK_PATTERN, self.html)
+ if m:
+ if 'response' in params:
+ self.correctCaptcha()
+ link = unquote(m.group(1))
+ self.logDebug("LINK: %s" % link)
+ break
+ else:
+ self.parseError('Download link')
+ else:
+ self.fail('No valid captcha response received')
+
+ try:
+ self.download(link, disposition=True)
+ except:
+ self.retry(wait_time=60)
+
+ def handlePremium(self):
+ self.html = self.load(self.pyfile.url, cookies=self.SH_COOKIES)
+
+ if '<span class="html_download_api-gold_traffic_limit">' in self.html:
+ self.logWarning("Download limit reached")
+ self.retry(25, 60 * 60, "Download limit reached")
+ elif 'onClick="show_gold_offer' in self.html:
+ self.account.relogin(self.user)
+ self.retry()
+ else:
+ link = re.search(self.PREMIUM_LINK_PATTERN, self.html)
+ mirror = re.search(self.PREMIUM_MIRROR_PATTERN, self.html)
+ if link:
+ dlink = link.group(1)
+ elif mirror:
+ dlink = mirror.group(1)
+ else:
+ self.parseError("No direct download link or mirror found")
+ self.download(dlink, disposition=True)
+
+
+getInfo = create_getInfo(DepositfilesCom)
diff --git a/pyload/plugins/hoster/DlFreeFr.py b/pyload/plugins/hoster/DlFreeFr.py
new file mode 100644
index 000000000..387e11efc
--- /dev/null
+++ b/pyload/plugins/hoster/DlFreeFr.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+import pycurl
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.network.Browser import Browser
+from pyload.network.CookieJar import CookieJar
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns
+
+
+class CustomBrowser(Browser):
+
+ def __init__(self, bucket=None, options={}):
+ Browser.__init__(self, bucket, options)
+
+ def load(self, *args, **kwargs):
+ post = kwargs.get("post")
+
+ if post is None and len(args) > 2:
+ post = args[2]
+
+ if post:
+ self.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.http.c.setopt(pycurl.POST, 1)
+ self.http.c.setopt(pycurl.CUSTOMREQUEST, "POST")
+ else:
+ self.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.http.c.setopt(pycurl.POST, 0)
+ self.http.c.setopt(pycurl.CUSTOMREQUEST, "GET")
+
+ return Browser.load(self, *args, **kwargs)
+
+
+class AdYouLike:
+ """
+ Class to support adyoulike captcha service
+ """
+ ADYOULIKE_INPUT_PATTERN = r'Adyoulike.create\((.*?)\);'
+ ADYOULIKE_CALLBACK = r'Adyoulike.g._jsonp_5579316662423138'
+ ADYOULIKE_CHALLENGE_PATTERN = ADYOULIKE_CALLBACK + r'\((.*?)\)'
+
+ def __init__(self, plugin, engine="adyoulike"):
+ self.plugin = plugin
+ self.engine = engine
+
+ def challenge(self, html):
+ adyoulike_data_string = None
+ m = re.search(self.ADYOULIKE_INPUT_PATTERN, html)
+ if m:
+ adyoulike_data_string = m.group(1)
+ else:
+ self.plugin.fail("Can't read AdYouLike input data")
+
+ # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"},
+ # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}}
+ ayl_data = json_loads(adyoulike_data_string)
+
+ res = self.plugin.load(
+ r'http://api-ayl.appspot.com/challenge?key=%(ayl_key)s&env=%(ayl_env)s&callback=%(callback)s' % {
+ "ayl_key": ayl_data[self.engine]['key'], "ayl_env": ayl_data['all']['env'],
+ "callback": self.ADYOULIKE_CALLBACK})
+
+ m = re.search(self.ADYOULIKE_CHALLENGE_PATTERN, res)
+ challenge_string = None
+ if m:
+ challenge_string = m.group(1)
+ else:
+ self.plugin.fail("Invalid AdYouLike challenge")
+ challenge_data = json_loads(challenge_string)
+
+ return ayl_data, challenge_data
+
+ def result(self, ayl, challenge):
+ """
+ Adyoulike.g._jsonp_5579316662423138
+ ({"translations":{"fr":{"instructions_visual":"Recopiez « Soonnight » ci-dessous :"}},
+ "site_under":true,"clickable":true,"pixels":{"VIDEO_050":[],"DISPLAY":[],"VIDEO_000":[],"VIDEO_100":[],
+ "VIDEO_025":[],"VIDEO_075":[]},"medium_type":"image/adyoulike",
+ "iframes":{"big":"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\"
+ height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},"shares":{},"id":256,
+ "token":"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1","formats":{"small":{"y":300,"x":0,"w":300,"h":60},
+ "big":{"y":0,"x":0,"w":300,"h":250},"hover":{"y":440,"x":0,"w":300,"h":60}},
+ "tid":"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"})
+ """
+ response = None
+ try:
+ instructions_visual = challenge['translations'][ayl['all']['lang']]['instructions_visual']
+ m = re.search(u".*«(.*)».*", instructions_visual)
+ if m:
+ response = m.group(1).strip()
+ else:
+ self.plugin.fail("Can't parse instructions visual")
+ except KeyError:
+ self.plugin.fail("No instructions visual")
+
+ #TODO: Supports captcha
+
+ if not response:
+ self.plugin.fail("AdYouLike result failed")
+
+ return {"_ayl_captcha_engine": self.engine,
+ "_ayl_env": ayl['all']['env'],
+ "_ayl_tid": challenge['tid'],
+ "_ayl_token_challenge": challenge['token'],
+ "_ayl_response": response}
+
+
+class DlFreeFr(SimpleHoster):
+ __name__ = "DlFreeFr"
+ __type__ = "hoster"
+ __version__ = "0.25"
+
+ __pattern__ = r'http://(?:www\.)?dl\.free\.fr/([a-zA-Z0-9]+|getfile\.pl\?file=/[a-zA-Z0-9]+)'
+
+ __description__ = """Dl.free.fr hoster plugin"""
+ __author_name__ = ("the-razer", "zoidberg", "Toilal")
+ __author_mail__ = ("daniel_ AT gmx DOT net", "zoidberg@mujmail.cz", "toilal.dev@gmail.com")
+
+ FILE_NAME_PATTERN = r'Fichier:</td>\s*<td[^>]*>(?P<N>[^>]*)</td>'
+ FILE_SIZE_PATTERN = r'Taille:</td>\s*<td[^>]*>(?P<S>[\d.]+[KMG])o'
+ OFFLINE_PATTERN = r"Erreur 404 - Document non trouv|Fichier inexistant|Le fichier demand&eacute; n'a pas &eacute;t&eacute; trouv&eacute;"
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+ self.limitDL = 5
+ self.chunkLimit = 1
+
+ def init(self):
+ factory = self.core.requestFactory
+ self.req = CustomBrowser(factory.bucket, factory.getOptions())
+
+ def process(self, pyfile):
+ self.req.setCookieJar(None)
+
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+ valid_url = pyfile.url
+ headers = self.load(valid_url, just_header=True)
+
+ self.html = None
+ if headers.get('code') == 302:
+ valid_url = headers.get('location')
+ headers = self.load(valid_url, just_header=True)
+
+ if headers.get('code') == 200:
+ content_type = headers.get('content-type')
+ if content_type and content_type.startswith("text/html"):
+ # Undirect acces to requested file, with a web page providing it (captcha)
+ self.html = self.load(valid_url)
+ self.handleFree()
+ else:
+ # Direct access to requested file for users using free.fr as Internet Service Provider.
+ self.download(valid_url, disposition=True)
+ elif headers.get('code') == 404:
+ self.offline()
+ else:
+ self.fail("Invalid return code: " + str(headers.get('code')))
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('action="getfile.pl"')
+
+ adyoulike = AdYouLike(self)
+ ayl, challenge = adyoulike.challenge(self.html)
+ result = adyoulike.result(ayl, challenge)
+ inputs.update(result)
+
+ self.load("http://dl.free.fr/getfile.pl", post=inputs)
+ headers = self.getLastHeaders()
+ if headers.get("code") == 302 and "set-cookie" in headers and "location" in headers:
+ m = re.search("(.*?)=(.*?); path=(.*?); domain=(.*?)", headers.get("set-cookie"))
+ cj = CookieJar(__name__)
+ if m:
+ cj.setCookie(m.group(4), m.group(1), m.group(2), m.group(3))
+ else:
+ self.fail("Cookie error")
+ location = headers.get("location")
+ self.req.setCookieJar(cj)
+ self.download(location, disposition=True)
+ else:
+ self.fail("Invalid response")
+
+ def getLastHeaders(self):
+ #parse header
+ header = {"code": self.req.code}
+ for line in self.req.http.header.splitlines():
+ line = line.strip()
+ if not line or ":" not in line:
+ continue
+
+ key, none, value = line.partition(":")
+ key = key.lower().strip()
+ value = value.strip()
+
+ if key in header:
+ if type(header[key]) == list:
+ header[key].append(value)
+ else:
+ header[key] = [header[key], value]
+ else:
+ header[key] = value
+ return header
+
+
+getInfo = create_getInfo(DlFreeFr)
diff --git a/pyload/plugins/hoster/DuploadOrg.py b/pyload/plugins/hoster/DuploadOrg.py
new file mode 100644
index 000000000..8c2430c87
--- /dev/null
+++ b/pyload/plugins/hoster/DuploadOrg.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class DuploadOrg(XFileSharingPro):
+ __name__ = "DuploadOrg"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?dupload\.org/\w{12}'
+
+ __description__ = """Dupload.grg hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ HOSTER_NAME = "dupload.org"
+
+ FILE_INFO_PATTERN = r'<h3[^>]*>(?P<N>.+) \((?P<S>[\d.]+) (?P<U>\w+)\)</h3>'
+
+
+getInfo = create_getInfo(DuploadOrg)
diff --git a/pyload/plugins/hoster/EasybytezCom.py b/pyload/plugins/hoster/EasybytezCom.py
new file mode 100644
index 000000000..7b1d8881f
--- /dev/null
+++ b/pyload/plugins/hoster/EasybytezCom.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class EasybytezCom(XFileSharingPro):
+ __name__ = "EasybytezCom"
+ __type__ = "hoster"
+ __version__ = "0.18"
+
+ __pattern__ = r'http://(?:www\.)?easybytez.com/(\w+).*'
+
+ __description__ = """Easybytez.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ HOSTER_NAME = "easybytez.com"
+
+ FILE_INFO_PATTERN = r'<span class="name">(?P<N>.+)</span><br>\s*<span class="size">(?P<S>[^<]+)</span>'
+ OFFLINE_PATTERN = r'<h1>File not available</h1>'
+
+ LINK_PATTERN = r'(http://(\w+\.(easyload|easybytez|zingload)\.(com|to)|\d+\.\d+\.\d+\.\d+)/files/\d+/\w+/[^"<]+)'
+ OVR_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
+ ERROR_PATTERN = r'(?:class=["\']err["\'][^>]*>|<Center><b>)(.*?)</'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+
+getInfo = create_getInfo(EasybytezCom)
diff --git a/pyload/plugins/hoster/EdiskCz.py b/pyload/plugins/hoster/EdiskCz.py
new file mode 100644
index 000000000..4c532b33f
--- /dev/null
+++ b/pyload/plugins/hoster/EdiskCz.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class EdiskCz(SimpleHoster):
+ __name__ = "EdiskCz"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?edisk.(cz|sk|eu)/(stahni|sk/stahni|en/download)/.*'
+
+ __description__ = """Edisk.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r'<span class="fl" title="(?P<N>[^"]+)">\s*.*?\((?P<S>[0-9.]*) (?P<U>[kKMG])i?B\)</h1></span>'
+ OFFLINE_PATTERN = r'<h3>This file does not exist due to one of the following:</h3><ul><li>'
+
+ ACTION_PATTERN = r'/en/download/(\d+/.*\.html)'
+ LINK_PATTERN = r'http://.*edisk.cz.*\.html'
+
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+ url = re.sub("/(stahni|sk/stahni)/", "/en/download/", pyfile.url)
+
+ self.logDebug('URL:' + url)
+
+ m = re.search(self.ACTION_PATTERN, url)
+ if m is None:
+ self.parseError("ACTION")
+ action = m.group(1)
+
+ self.html = self.load(url, decode=True)
+ self.getFileInfo()
+
+ self.html = self.load(re.sub("/en/download/", "/en/download-slow/", url))
+
+ url = self.load(re.sub("/en/download/", "/x-download/", url), post={
+ "action": action
+ })
+
+ if not re.match(self.LINK_PATTERN, url):
+ self.fail("Unexpected server response")
+
+ self.download(url)
+
+
+getInfo = create_getInfo(EdiskCz)
diff --git a/pyload/plugins/hoster/EgoFilesCom.py b/pyload/plugins/hoster/EgoFilesCom.py
new file mode 100644
index 000000000..7d59b274c
--- /dev/null
+++ b/pyload/plugins/hoster/EgoFilesCom.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://egofiles.com/mOZfMI1WLZ6HBkGG/random.bin
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class EgoFilesCom(SimpleHoster):
+ __name__ = "EgoFilesCom"
+ __type__ = "hoster"
+ __version__ = "0.15"
+
+ __pattern__ = r'https?://(?:www\.)?egofiles.com/(\w+)'
+
+ __description__ = """Egofiles.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ FILE_INFO_PATTERN = r'<div class="down-file">\s+(?P<N>[^\t]+)\s+<div class="file-properties">\s+(File size|Rozmiar): (?P<S>[\w.]+) (?P<U>\w+) \|'
+ OFFLINE_PATTERN = r'(File size|Rozmiar): 0 KB'
+ WAIT_TIME_PATTERN = r'For next free download you have to wait <strong>((?P<m>\d*)m)? ?((?P<s>\d+)s)?</strong>'
+ LINK_PATTERN = r'<a href="(?P<link>[^"]+)">Download ></a>'
+ RECAPTCHA_KEY = "6LeXatQSAAAAAHezcjXyWAni-4t302TeYe7_gfvX"
+
+
+ def setup(self):
+ # Set English language
+ self.load("https://egofiles.com/ajax/lang.php?lang=en", just_header=True)
+
+ def process(self, pyfile):
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+ self.getFileInfo()
+
+ # Wait time between free downloads
+ if 'For next free download you have to wait' in self.html:
+ m = re.search(self.WAIT_TIME_PATTERN, self.html).groupdict('0')
+ waittime = int(m['m']) * 60 + int(m['s'])
+ self.wait(waittime, True)
+
+ downloadURL = r''
+ recaptcha = ReCaptcha(self)
+ for _ in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response}
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+ elif hasattr(m, 'group'):
+ downloadURL = m.group('link')
+ self.correctCaptcha()
+ break
+ else:
+ self.fail('Unknown error - Plugin may be out of date')
+
+ if not downloadURL:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.download(downloadURL, disposition=True)
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header:
+ self.logDebug('DIRECT LINK from header: ' + header['location'])
+ self.download(header['location'])
+ else:
+ self.html = self.load(self.pyfile.url, decode=True)
+ self.getFileInfo()
+ m = re.search(r'<a href="(?P<link>[^"]+)">Download ></a>', self.html)
+ if m is None:
+ self.parseError('Unable to detect direct download url')
+ else:
+ self.logDebug('DIRECT URL from html: ' + m.group('link'))
+ self.download(m.group('link'), disposition=True)
+
+
+getInfo = create_getInfo(EgoFilesCom)
diff --git a/pyload/plugins/hoster/EpicShareNet.py b/pyload/plugins/hoster/EpicShareNet.py
new file mode 100644
index 000000000..a4a6008ae
--- /dev/null
+++ b/pyload/plugins/hoster/EpicShareNet.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# BigBuckBunny_320x180.mp4 - 61.7 Mb - http://epicshare.net/fch3m2bk6ihp/BigBuckBunny_320x180.mp4.html
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class EpicShareNet(XFileSharingPro):
+ __name__ = "EpicShareNet"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?epicshare\.net/\w{12}'
+
+ __description__ = """EpicShare.net hoster plugin"""
+ __author_name__ = "t4skforce"
+ __author_mail__ = "t4skforce1337[AT]gmail[DOT]com"
+
+ HOSTER_NAME = "epicshare.net"
+
+ OFFLINE_PATTERN = r'<b>File Not Found</b><br><br>'
+ FILE_NAME_PATTERN = r'<b>Password:</b></div>\s*<h2>(?P<N>[^<]+)</h2>'
+
+
+getInfo = create_getInfo(EpicShareNet)
diff --git a/pyload/plugins/hoster/EuroshareEu.py b/pyload/plugins/hoster/EuroshareEu.py
new file mode 100644
index 000000000..d7c172594
--- /dev/null
+++ b/pyload/plugins/hoster/EuroshareEu.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class EuroshareEu(SimpleHoster):
+ __name__ = "EuroshareEu"
+ __type__ = "hoster"
+ __version__ = "0.25"
+
+ __pattern__ = r'http://(?:www\.)?euroshare.(eu|sk|cz|hu|pl)/file/.*'
+
+ __description__ = """Euroshare.eu hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>'
+ OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Poşadovaná stránka neexistuje!'
+
+ FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"'
+ ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor'
+ ERR_NOT_LOGGED_IN_PATTERN = r'href="/customer-zone/login/"'
+
+ FILE_URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")]
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+ self.req.setOption("timeout", 120)
+
+ def handlePremium(self):
+ if self.ERR_NOT_LOGGED_IN_PATTERN in self.html:
+ self.account.relogin(self.user)
+ self.retry(reason="User not logged in")
+
+ self.download(self.pyfile.url.rstrip('/') + "/download/")
+
+ check = self.checkDownload({"login": re.compile(self.ERR_NOT_LOGGED_IN_PATTERN),
+ "json": re.compile(r'\{"status":"error".*?"message":"(.*?)"')})
+ if check == "login" or (check == "json" and self.lastCheck.group(1) == "Access token expired"):
+ self.account.relogin(self.user)
+ self.retry(reason="Access token expired")
+ elif check == "json":
+ self.fail(self.lastCheck.group(1))
+
+ def handleFree(self):
+ if re.search(self.ERR_PARDL_PATTERN, self.html) is not None:
+ self.longWait(5 * 60, 12)
+
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m is None:
+ self.parseError("Parse error (URL)")
+ parsed_url = "http://euroshare.eu%s" % m.group(1)
+ self.logDebug("URL", parsed_url)
+ self.download(parsed_url, disposition=True)
+
+ check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)})
+ if check == "multi_dl":
+ self.longWait(5 * 60, 12)
+
+
+getInfo = create_getInfo(EuroshareEu)
diff --git a/pyload/plugins/hoster/ExtabitCom.py b/pyload/plugins/hoster/ExtabitCom.py
new file mode 100644
index 000000000..38479410e
--- /dev/null
+++ b/pyload/plugins/hoster/ExtabitCom.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads
+
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class ExtabitCom(SimpleHoster):
+ __name__ = "ExtabitCom"
+ __type__ = "hoster"
+ __version__ = "0.6"
+
+ __pattern__ = r'http://(?:www\.)?extabit\.com/(file|go|fid)/(?P<ID>\w+)'
+
+ __description__ = """Extabit.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<th>File:</th>\s*<td class="col-fileinfo">\s*<div title="(?P<N>[^"]+)">'
+ FILE_SIZE_PATTERN = r'<th>Size:</th>\s*<td class="col-fileinfo">(?P<S>[^<]+)</td>'
+ OFFLINE_PATTERN = r'>File not found<'
+ TEMP_OFFLINE_PATTERN = r'>(File is temporary unavailable|No download mirror)<'
+
+ LINK_PATTERN = r'[\'"](http://guest\d+\.extabit\.com/[a-z0-9]+/.*?)[\'"]'
+
+
+ def handleFree(self):
+ if r">Only premium users can download this file" in self.html:
+ self.fail("Only premium users can download this file")
+
+ m = re.search(r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.html)
+ if m:
+ self.wait(int(m.group(1)) * 60, True)
+ elif "The daily downloads limit from your IP is exceeded" in self.html:
+ self.logWarning("You have reached your daily downloads limit for today")
+ self.wait(secondsToMidnight(gmt=2), True)
+
+ self.logDebug("URL: " + self.req.http.lastEffectiveURL)
+ m = re.match(self.__pattern__, self.req.http.lastEffectiveURL)
+ fileID = m.group('ID') if m else self.file_info('ID')
+
+ m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.html)
+ if m:
+ recaptcha = ReCaptcha(self)
+ captcha_key = m.group(1)
+
+ for _ in xrange(5):
+ get_data = {"type": "recaptcha"}
+ get_data['challenge'], get_data['capture'] = recaptcha.challenge(captcha_key)
+ response = json_loads(self.load("http://extabit.com/file/%s/" % fileID, get=get_data))
+ if "ok" in response:
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail("Invalid captcha")
+ else:
+ self.parseError('Captcha')
+
+ if not "href" in response:
+ self.parseError('JSON')
+
+ self.html = self.load("http://extabit.com/file/%s%s" % (fileID, response['href']))
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError('Download URL')
+ url = m.group(1)
+ self.logDebug("Download URL: " + url)
+ self.download(url)
+
+
+getInfo = create_getInfo(ExtabitCom)
diff --git a/pyload/plugins/hoster/FastixRu.py b/pyload/plugins/hoster/FastixRu.py
new file mode 100644
index 000000000..e031e3e55
--- /dev/null
+++ b/pyload/plugins/hoster/FastixRu.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import unquote
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+
+
+class FastixRu(Hoster):
+ __name__ = "FastixRu"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?fastix\.(ru|it)/file/(?P<ID>[a-zA-Z0-9]{24})'
+
+ __description__ = """Fastix hoster plugin"""
+ __author_name__ = "Massimo Rosamilia"
+ __author_mail__ = "max@spiritix.eu"
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): # incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+ def setup(self):
+ self.chunkLimit = 3
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Fastix")
+ self.fail("No Fastix account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ api_key = self.account.getAccountData(self.user)
+ api_key = api_key['api']
+ url = "http://fastix.ru/api_v2/?apikey=%s&sub=getdirectlink&link=%s" % (api_key, pyfile.url)
+ page = self.load(url)
+ data = json_loads(page)
+ self.logDebug("Json data: %s" % str(data))
+ if "error\":true" in page:
+ self.offline()
+ else:
+ new_url = data['downloadlink']
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"error": "<title>An error occurred while processing your request</title>",
+ "empty": re.compile(r"^$")})
+
+ if check == "error":
+ self.retry(wait_time=60, reason="An error occurred while generating link.")
+ elif check == "empty":
+ self.retry(wait_time=60, reason="Downloaded File was empty.")
diff --git a/pyload/plugins/hoster/FastshareCz.py b/pyload/plugins/hoster/FastshareCz.py
new file mode 100644
index 000000000..3897a1c23
--- /dev/null
+++ b/pyload/plugins/hoster/FastshareCz.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://www.fastshare.cz/2141189/random.bin
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FastshareCz(SimpleHoster):
+ __name__ = "FastshareCz"
+ __type__ = "hoster"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(?:www\.)?fastshare\.cz/\d+/.+'
+
+ __description__ = """FastShare.cz hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell", "Walter Purcaro")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it", "vuolter@gmail.com")
+
+ FILE_INFO_PATTERN = r'<h1 class="dwp">(?P<N>[^<]+)</h1>\s*<div class="fileinfo">\s*Size\s*: (?P<S>\d+) (?P<U>\w+),'
+ OFFLINE_PATTERN = r'>(The file has been deleted|Requested page not found)'
+
+ FILE_URL_REPLACEMENTS = [("#.*", "")]
+
+ SH_COOKIES = [(".fastshare.cz", "lang", "en")]
+
+ FREE_URL_PATTERN = r'action=(/free/.*?)>\s*<img src="([^"]*)"><br'
+ PREMIUM_URL_PATTERN = r'(http://data\d+\.fastshare\.cz/download\.php\?id=\d+&)'
+ CREDIT_PATTERN = r' credit for '
+
+
+ def handleFree(self):
+ if "> 100% of FREE slots are full" in self.html:
+ self.retry(120, 60, "No free slots")
+
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m:
+ action, captcha_src = m.groups()
+ else:
+ self.parseError("Free URL")
+
+ baseurl = "http://www.fastshare.cz"
+ captcha = self.decryptCaptcha(urljoin(baseurl, captcha_src))
+ self.download(urljoin(baseurl, action), post={"code": captcha, "btn.x": 77, "btn.y": 18})
+
+ check = self.checkDownload({
+ "paralell_dl":
+ "<title>FastShare.cz</title>|<script>alert\('Pres FREE muzete stahovat jen jeden soubor najednou.'\)",
+ "wrong_captcha": "Download for FREE"
+ })
+
+ if check == "paralell_dl":
+ self.retry(6, 10 * 60, "Paralell download")
+ elif check == "wrong_captcha":
+ self.retry(max_tries=5, reason="Wrong captcha")
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if "location" in header:
+ url = header['location']
+ else:
+ self.html = self.load(self.pyfile.url)
+
+ self.getFileInfo() #
+
+ if self.CREDIT_PATTERN in self.html:
+ self.logWarning("Not enough traffic left")
+ self.resetAccount()
+ else:
+ m = re.search(self.PREMIUM_URL_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ else:
+ self.parseError("Premium URL")
+
+ self.logDebug("PREMIUM URL: " + url)
+ self.download(url, disposition=True)
+
+ check = self.checkDownload({"credit": re.compile(self.CREDIT_PATTERN)})
+ if check == "credit":
+ self.resetAccount()
+
+
+getInfo = create_getInfo(FastshareCz)
diff --git a/pyload/plugins/hoster/File4safeCom.py b/pyload/plugins/hoster/File4safeCom.py
new file mode 100644
index 000000000..4aa0e26a4
--- /dev/null
+++ b/pyload/plugins/hoster/File4safeCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class File4safeCom(XFileSharingPro):
+ __name__ = "File4safeCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?file4safe\.com/\w+'
+
+ __description__ = """File4safe.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ HOSTER_NAME = "file4safe.com"
+
+
+ def handlePremium(self):
+ self.req.http.lastURL = self.pyfile.url
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.load(self.pyfile.url, post=self.getPostParameters(), decode=True)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ m = re.search(r"Location\s*:\s*(.*)", self.header, re.I)
+ if m and re.match(self.LINK_PATTERN, m.group(1)):
+ location = m.group(1).strip()
+ self.startDownload(location)
+ else:
+ self.parseError("Unable to detect premium download link")
+
+
+getInfo = create_getInfo(File4safeCom)
diff --git a/pyload/plugins/hoster/FileApeCom.py b/pyload/plugins/hoster/FileApeCom.py
new file mode 100644
index 000000000..8c6305631
--- /dev/null
+++ b/pyload/plugins/hoster/FileApeCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FileApeCom(DeadHoster):
+ __name__ = "FileApeCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?fileape\.com/(index\.php\?act=download\&id=|dl/)\w+'
+
+ __description__ = """FileApe.com hoster plugin"""
+ __author_name__ = "espes"
+ __author_mail__ = None
+
+
+getInfo = create_getInfo(FileApeCom)
diff --git a/pyload/plugins/hoster/FileParadoxIn.py b/pyload/plugins/hoster/FileParadoxIn.py
new file mode 100644
index 000000000..955a9726b
--- /dev/null
+++ b/pyload/plugins/hoster/FileParadoxIn.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class FileParadoxIn(XFileSharingPro):
+ __name__ = "FileParadoxIn"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?fileparadox\.in/\w+'
+
+ __description__ = """FileParadox.in hoster plugin"""
+ __author_name__ = "RazorWing"
+ __author_mail__ = "muppetuk1@hotmail.com"
+
+ HOSTER_NAME = "fileparadox.in"
+
+ FILE_SIZE_PATTERN = r'</font>\s*\(\s*(?P<S>[^)]+)\s*\)</font>'
+ LINK_PATTERN = r'(http://([^/]*?fileparadox.in|\d+\.\d+\.\d+\.\d+)(:\d+/d/|/files/\w+/\w+/)[^"\'<]+)'
+
+
+getInfo = create_getInfo(FileParadoxIn)
diff --git a/pyload/plugins/hoster/FileStoreTo.py b/pyload/plugins/hoster/FileStoreTo.py
new file mode 100644
index 000000000..6a2963ec2
--- /dev/null
+++ b/pyload/plugins/hoster/FileStoreTo.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FileStoreTo(SimpleHoster):
+ __name__ = "FileStoreTo"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?filestore\.to/\?d=(?P<ID>\w+)'
+
+ __description__ = """FileStore.to hoster plugin"""
+ __author_name__ = ("Walter Purcaro", "stickell")
+ __author_mail__ = ("vuolter@gmail.com", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'File: <span[^>]*>(?P<N>.+)</span><br />Size: (?P<S>[\d,.]+) (?P<U>\w+)'
+ OFFLINE_PATTERN = r'>Download-Datei wurde nicht gefunden<'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def handleFree(self):
+ self.wait(10)
+ ldc = re.search(r'wert="(\w+)"', self.html).group(1)
+ link = self.load("http://filestore.to/ajax/download.php", get={"LDC": ldc})
+ self.logDebug("Download link = " + link)
+ self.download(link)
+
+
+getInfo = create_getInfo(FileStoreTo)
diff --git a/pyload/plugins/hoster/FilebeerInfo.py b/pyload/plugins/hoster/FilebeerInfo.py
new file mode 100644
index 000000000..561660148
--- /dev/null
+++ b/pyload/plugins/hoster/FilebeerInfo.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FilebeerInfo(DeadHoster):
+ __name__ = "FilebeerInfo"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?filebeer\.info/(?!\d*~f)(?P<ID>\w+).*'
+
+ __description__ = """Filebeer.info plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(FilebeerInfo)
diff --git a/pyload/plugins/hoster/FilecloudIo.py b/pyload/plugins/hoster/FilecloudIo.py
new file mode 100644
index 000000000..05753a67e
--- /dev/null
+++ b/pyload/plugins/hoster/FilecloudIo.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilecloudIo(SimpleHoster):
+ __name__ = "FilecloudIo"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?(?:filecloud\.io|ifile\.it|mihd\.net)/(?P<ID>\w+).*'
+
+ __description__ = """Filecloud.io hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ FILE_SIZE_PATTERN = r'{var __ab1 = (?P<S>\d+);}'
+ FILE_NAME_PATTERN = r'id="aliasSpan">(?P<N>.*?)&nbsp;&nbsp;<'
+ OFFLINE_PATTERN = r'l10n.(FILES__DOESNT_EXIST|REMOVED)'
+ TEMP_OFFLINE_PATTERN = r'l10n.FILES__WARNING'
+
+ UKEY_PATTERN = r"'ukey'\s*:'(\w+)',"
+ AB1_PATTERN = r"if\( __ab1 == '(\w+)' \)"
+ ERROR_MSG_PATTERN = r'var __error_msg\s*=\s*l10n\.(.*?);'
+ LINK_PATTERN = r'"(http://s\d+.filecloud.io/%s/\d+/.*?)"'
+ RECAPTCHA_KEY_PATTERN = r"var __recaptcha_public\s*=\s*'([^']+)';"
+ RECAPTCHA_KEY = "6Lf5OdISAAAAAEZObLcx5Wlv4daMaASRov1ysDB1"
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+ def handleFree(self):
+ data = {"ukey": self.file_info['ID']}
+
+ m = re.search(self.AB1_PATTERN, self.html)
+ if m is None:
+ self.parseError("__AB1")
+ data['__ab1'] = m.group(1)
+
+ if not self.account:
+ self.fail("User not logged in")
+ elif not self.account.logged_in:
+ recaptcha = ReCaptcha(self)
+ captcha_challenge, captcha_response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ self.account.form_data = {"recaptcha_challenge_field": captcha_challenge,
+ "recaptcha_response_field": captcha_response}
+ self.account.relogin(self.user)
+ self.retry(2)
+
+ json_url = "http://filecloud.io/download-request.json"
+ response = self.load(json_url, post=data)
+ self.logDebug(response)
+ response = json_loads(response)
+
+ if "error" in response and response['error']:
+ self.fail(response)
+
+ self.logDebug(response)
+ if response['captcha']:
+ recaptcha = ReCaptcha(self)
+ m = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ captcha_key = m.group(1) if m else self.RECAPTCHA_KEY
+ data['ctype'] = "recaptcha"
+
+ for _ in xrange(5):
+ data['recaptcha_challenge'], data['recaptcha_response'] = recaptcha.challenge(captcha_key)
+
+ json_url = "http://filecloud.io/download-request.json"
+ response = self.load(json_url, post=data)
+ self.logDebug(response)
+ response = json_loads(response)
+
+ if "retry" in response and response['retry']:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("Incorrect captcha")
+
+ if response['dl']:
+ self.html = self.load('http://filecloud.io/download.html')
+ m = re.search(self.LINK_PATTERN % self.file_info['ID'], self.html)
+ if m is None:
+ self.parseError("Download URL")
+ download_url = m.group(1)
+ self.logDebug("Download URL: %s" % download_url)
+
+ if "size" in self.file_info and self.file_info['size']:
+ self.check_data = {"size": int(self.file_info['size'])}
+ self.download(download_url)
+ else:
+ self.fail("Unexpected server response")
+
+ def handlePremium(self):
+ akey = self.account.getAccountData(self.user)['akey']
+ ukey = self.file_info['ID']
+ self.logDebug("Akey: %s | Ukey: %s" % (akey, ukey))
+ rep = self.load("http://api.filecloud.io/api-fetch_download_url.api",
+ post={"akey": akey, "ukey": ukey})
+ self.logDebug("FetchDownloadUrl: " + rep)
+ rep = json_loads(rep)
+ if rep['status'] == 'ok':
+ self.download(rep['download_url'], disposition=True)
+ else:
+ self.fail(rep['message'])
+
+
+getInfo = create_getInfo(FilecloudIo)
diff --git a/pyload/plugins/hoster/FilefactoryCom.py b/pyload/plugins/hoster/FilefactoryCom.py
new file mode 100644
index 000000000..fafe96477
--- /dev/null
+++ b/pyload/plugins/hoster/FilefactoryCom.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+
+
+def getInfo(urls):
+ for url in urls:
+ h = getURL(url, just_header=True)
+ m = re.search(r'Location: (.+)\r\n', h)
+ if m and not re.match(m.group(1), FilefactoryCom.__pattern__): # It's a direct link! Skipping
+ yield (url, 0, 3, url)
+ else: # It's a standard html page
+ file_info = parseFileInfo(FilefactoryCom, url, getURL(url))
+ yield file_info
+
+
+class FilefactoryCom(SimpleHoster):
+ __name__ = "FilefactoryCom"
+ __type__ = "hoster"
+ __version__ = "0.50"
+
+ __pattern__ = r'https?://(?:www\.)?filefactory\.com/file/(?P<id>[a-zA-Z0-9]+)'
+
+ __description__ = """Filefactory.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ FILE_INFO_PATTERN = r'<div id="file_name"[^>]*>\s*<h2>(?P<N>[^<]+)</h2>\s*<div id="file_info">\s*(?P<S>[\d.]+) (?P<U>\w+) uploaded'
+ LINK_PATTERN = r'<a href="(https?://[^"]+)"[^>]*><i[^>]*></i> Download with FileFactory Premium</a>'
+ OFFLINE_PATTERN = r'<h2>File Removed</h2>|This file is no longer available'
+ PREMIUM_ONLY_PATTERN = r'>Premium Account Required<'
+
+ SH_COOKIES = [(".filefactory.com", "locale", "en_US.utf8")]
+
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+ if "Currently only Premium Members can download files larger than" in self.html:
+ self.fail("File too large for free download")
+ elif "All free download slots on this server are currently in use" in self.html:
+ self.retry(50, 15 * 60, "All free slots are busy")
+
+ m = re.search(r'data-href(?:-direct)?="(http://[^"]+)"', self.html)
+ if m:
+ t = re.search(r'<div id="countdown_clock" data-delay="(\d+)">', self.html)
+ if t:
+ t = t.group(1)
+ else:
+ self.logDebug("Unable to detect countdown duration. Guessing 60 seconds")
+ t = 60
+ self.wait(t)
+ direct = m.group(1)
+ else: # This section could be completely useless now
+ # Load the page that contains the direct link
+ url = re.search(r"document\.location\.host \+\s*'(.+)';", self.html)
+ if url is None:
+ self.parseError('Unable to detect free link')
+ url = 'http://www.filefactory.com' + url.group(1)
+ self.html = self.load(url, decode=True)
+
+ # Free downloads wait time
+ waittime = re.search(r'id="startWait" value="(\d+)"', self.html)
+ if not waittime:
+ self.parseError('Unable to detect wait time')
+ self.wait(int(waittime.group(1)))
+
+ # Parse the direct link and download it
+ direct = re.search(r'data-href(?:-direct)?="(.*)" class="button', self.html)
+ if not direct:
+ self.parseError('Unable to detect free direct link')
+ direct = direct.group(1)
+
+ self.logDebug('DIRECT LINK: ' + direct)
+ self.download(direct, disposition=True)
+
+ check = self.checkDownload({"multiple": "You are currently downloading too many files at once.",
+ "error": '<div id="errorMessage">'})
+
+ if check == "multiple":
+ self.logDebug("Parallel downloads detected; waiting 15 minutes")
+ self.retry(wait_time=15 * 60, reason="Parallel downloads")
+ elif check == "error":
+ self.fail("Unknown error")
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header:
+ url = header['location'].strip()
+ if not url.startswith("http://"):
+ url = "http://www.filefactory.com" + url
+ elif 'content-disposition' in header:
+ url = self.pyfile.url
+ else:
+ self.logInfo('You could enable "Direct Downloads" on http://filefactory.com/account/')
+ html = self.load(self.pyfile.url)
+ m = re.search(self.LINK_PATTERN, html)
+ if m:
+ url = m.group(1)
+ else:
+ self.parseError('Unable to detect premium direct link')
+
+ self.logDebug('DIRECT PREMIUM LINK: ' + url)
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/hoster/FilejungleCom.py b/pyload/plugins/hoster/FilejungleCom.py
new file mode 100644
index 000000000..0bbc7502e
--- /dev/null
+++ b/pyload/plugins/hoster/FilejungleCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.FileserveCom import FileserveCom, checkFile
+from pyload.plugins.Plugin import chunks
+
+
+class FilejungleCom(FileserveCom):
+ __name__ = "FilejungleCom"
+ __type__ = "hoster"
+ __version__ = "0.51"
+
+ __pattern__ = r'http://(?:www\.)?filejungle\.com/f/(?P<id>[^/]+).*'
+
+ __description__ = """Filejungle.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ URLS = ["http://www.filejungle.com/f/", "http://www.filejungle.com/check_links.php",
+ "http://www.filejungle.com/checkReCaptcha.php"]
+ LINKCHECK_TR = r'<li>\s*(<div class="col1">.*?)</li>'
+ LINKCHECK_TD = r'<div class="(?:col )?col\d">(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ LONG_WAIT_PATTERN = r'<h1>Please wait for (\d+) (\w+)\s*to download the next file\.</h1>'
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100):
+ yield checkFile(FilejungleCom, chunk)
diff --git a/pyload/plugins/hoster/FileomCom.py b/pyload/plugins/hoster/FileomCom.py
new file mode 100644
index 000000000..11052e289
--- /dev/null
+++ b/pyload/plugins/hoster/FileomCom.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://fileom.com/gycaytyzdw3g/random.bin.html
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class FileomCom(XFileSharingPro):
+ __name__ = "FileomCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?fileom\.com/\w+'
+
+ __description__ = """Fileom.com hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ HOSTER_NAME = "fileom.com"
+
+ FILE_URL_REPLACEMENTS = [(r'/$', "")]
+ SH_COOKIES = [(".fileom.com", "lang", "english")]
+
+ FILE_NAME_PATTERN = r'Filename: <span>(?P<N>.+?)<'
+ FILE_SIZE_PATTERN = r'File Size: <span class="size">(?P<S>[\d\.]+) (?P<U>\w+)'
+
+ ERROR_PATTERN = r'class=["\']err["\'][^>]*>(.*?)(?:\'|</)'
+
+ LINK_PATTERN = r"var url2 = '(.+?)';"
+
+
+ def setup(self):
+ self.resumeDownload = self.premium
+ self.multiDL = True
+ self.chunkLimit = 1
+
+
+getInfo = create_getInfo(FileomCom)
diff --git a/pyload/plugins/hoster/FilepostCom.py b/pyload/plugins/hoster/FilepostCom.py
new file mode 100644
index 000000000..ac2ae4845
--- /dev/null
+++ b/pyload/plugins/hoster/FilepostCom.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilepostCom(SimpleHoster):
+ __name__ = "FilepostCom"
+ __type__ = "hoster"
+ __version__ = "0.28"
+
+ __pattern__ = r'https?://(?:www\.)?(?:filepost\.com/files|fp.io)/([^/]+).*'
+
+ __description__ = """Filepost.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[0-9\.]+ [kKMG]i?B)</a>\' class="inp_text"/>'
+ OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
+
+ PREMIUM_ONLY_PATTERN = r'members only. Please upgrade to premium|a premium membership is required to download this file'
+ RECAPTCHA_KEY_PATTERN = r"Captcha.init\({\s*key:\s*'([^']+)'"
+ FLP_TOKEN_PATTERN = r"set_store_options\({token: '([^']+)'"
+
+
+ def handleFree(self):
+ # Find token and captcha key
+ file_id = re.match(self.__pattern__, self.pyfile.url).group(1)
+
+ m = re.search(self.FLP_TOKEN_PATTERN, self.html)
+ if m is None:
+ self.parseError("Token")
+ flp_token = m.group(1)
+
+ m = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ if m is None:
+ self.parseError("Captcha key")
+ captcha_key = m.group(1)
+
+ # Get wait time
+ get_dict = {'SID': self.req.cj.getCookie('SID'), 'JsHttpRequest': str(int(time() * 10000)) + '-xml'}
+ post_dict = {'action': 'set_download', 'token': flp_token, 'code': file_id}
+ wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
+
+ if wait_time > 0:
+ self.wait(wait_time)
+
+ post_dict = {"token": flp_token, "code": file_id, "file_pass": ''}
+
+ if 'var is_pass_exists = true;' in self.html:
+ # Solve password
+ for file_pass in self.getPassword().splitlines():
+ get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
+ post_dict['file_pass'] = file_pass
+ self.logInfo("Password protected link, trying " + file_pass)
+
+ download_url = self.getJsonResponse(get_dict, post_dict, 'link')
+ if download_url:
+ break
+
+ else:
+ self.fail("No or incorrect password")
+
+ else:
+ # Solve recaptcha
+ recaptcha = ReCaptcha(self)
+
+ for i in xrange(5):
+ get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
+ if i:
+ post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field'] = recaptcha.challenge(
+ captcha_key)
+ self.logDebug(u"RECAPTCHA: %s : %s : %s" % (
+ captcha_key, post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field']))
+
+ download_url = self.getJsonResponse(get_dict, post_dict, 'link')
+ if download_url:
+ if i:
+ self.correctCaptcha()
+ break
+ elif i:
+ self.invalidCaptcha()
+
+ else:
+ self.fail("Invalid captcha")
+
+ # Download
+ self.download(download_url)
+
+ def getJsonResponse(self, get_dict, post_dict, field):
+ json_response = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict))
+ self.logDebug(json_response)
+
+ if not 'js' in json_response:
+ self.parseError('JSON %s 1' % field)
+
+ # i changed js_answer to json_response['js'] since js_answer is nowhere set.
+ # i don't know the JSON-HTTP specs in detail, but the previous author
+ # accessed json_response['js']['error'] as well as js_answer['error'].
+ # see the two lines commented out with "# ~?".
+ if 'error' in json_response['js']:
+ if json_response['js']['error'] == 'download_delay':
+ self.retry(wait_time=json_response['js']['params']['next_download'])
+ # ~? self.retry(wait_time=js_answer['params']['next_download'])
+ elif 'Wrong file password' in json_response['js']['error']:
+ return None
+ elif 'You entered a wrong CAPTCHA code' in json_response['js']['error']:
+ return None
+ elif 'CAPTCHA Code nicht korrekt' in json_response['js']['error']:
+ return None
+ elif 'CAPTCHA' in json_response['js']['error']:
+ self.logDebug('error response is unknown, but mentions CAPTCHA -> return None')
+ return None
+ else:
+ self.fail(json_response['js']['error'])
+ # ~? self.fail(js_answer['error'])
+
+ if not 'answer' in json_response['js'] or not field in json_response['js']['answer']:
+ self.parseError('JSON %s 2' % field)
+
+ return json_response['js']['answer'][field]
+
+
+getInfo = create_getInfo(FilepostCom)
diff --git a/pyload/plugins/hoster/FilerNet.py b/pyload/plugins/hoster/FilerNet.py
new file mode 100644
index 000000000..5f1b6bea8
--- /dev/null
+++ b/pyload/plugins/hoster/FilerNet.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://filer.net/get/ivgf5ztw53et3ogd
+# http://filer.net/get/hgo14gzcng3scbvv
+
+import pycurl
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilerNet(SimpleHoster):
+ __name__ = "FilerNet"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?filer\.net/get/(\w+)'
+
+ __description__ = """Filer.net hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ FILE_INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>\w+)</small></h1>'
+ OFFLINE_PATTERN = r'Nicht gefunden'
+ RECAPTCHA_KEY = "6LcFctISAAAAAAgaeHgyqhNecGJJRnxV1m_vAz3V"
+ LINK_PATTERN = r'href="([^"]+)">Get download</a>'
+
+
+ def process(self, pyfile):
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.req.setOption("timeout", 120)
+ self.html = self.load(self.pyfile.url, decode=not self.SH_BROKEN_ENCODING, cookies=self.SH_COOKIES)
+
+ # Wait between downloads
+ m = re.search(r'musst du <span id="time">(\d+)</span> Sekunden warten', self.html)
+ if m:
+ waittime = int(m.group(1))
+ self.retry(3, waittime, "Wait between free downloads")
+
+ self.getFileInfo()
+
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ inputs = self.parseHtmlForm(input_names='token')[1]
+ if 'token' not in inputs:
+ self.parseError('Unable to detect token')
+ token = inputs['token']
+ self.logDebug('Token: ' + token)
+
+ self.html = self.load(self.pyfile.url, post={'token': token}, decode=True)
+
+ inputs = self.parseHtmlForm(input_names='hash')[1]
+ if 'hash' not in inputs:
+ self.parseError('Unable to detect hash')
+ hash_data = inputs['hash']
+ self.logDebug('Hash: ' + hash_data)
+
+ downloadURL = r''
+ recaptcha = ReCaptcha(self)
+ for _ in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response,
+ 'hash': hash_data}
+
+ # Workaround for 0.4.9 just_header issue. In 0.5 clean the code using just_header
+ self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.load(self.pyfile.url, post=post_data)
+ self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
+
+ if 'location' in self.req.http.header.lower():
+ location = re.search(r'location: (\S+)', self.req.http.header, re.I).group(1)
+ downloadURL = urljoin('http://filer.net', location)
+ self.correctCaptcha()
+ break
+ else:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+
+ if not downloadURL:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.download(downloadURL, disposition=True)
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header: # Direct Download ON
+ dl = self.pyfile.url
+ else: # Direct Download OFF
+ html = self.load(self.pyfile.url)
+ m = re.search(self.LINK_PATTERN, html)
+ if m is None:
+ self.parseError("Unable to detect direct link, try to enable 'Direct download' in your user settings")
+ dl = 'http://filer.net' + m.group(1)
+
+ self.logDebug('Direct link: ' + dl)
+ self.download(dl, disposition=True)
+
+
+getInfo = create_getInfo(FilerNet)
diff --git a/pyload/plugins/hoster/FilerioCom.py b/pyload/plugins/hoster/FilerioCom.py
new file mode 100644
index 000000000..31d04b0ee
--- /dev/null
+++ b/pyload/plugins/hoster/FilerioCom.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class FilerioCom(XFileSharingPro):
+ __name__ = "FilerioCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?(filerio\.(in|com)|filekeen\.com)/\w{12}'
+
+ __description__ = """FileRio.in hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_NAME = "filerio.in"
+
+ OFFLINE_PATTERN = r'<b>&quot;File Not Found&quot;</b>|File has been removed due to Copyright Claim'
+ FILE_URL_REPLACEMENTS = [(r'http://.*?/', 'http://filerio.in/')]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+
+getInfo = create_getInfo(FilerioCom)
diff --git a/pyload/plugins/hoster/FilesMailRu.py b/pyload/plugins/hoster/FilesMailRu.py
new file mode 100644
index 000000000..01d9c256a
--- /dev/null
+++ b/pyload/plugins/hoster/FilesMailRu.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+
+
+def getInfo(urls):
+ result = []
+ for chunk in chunks(urls, 10):
+ for url in chunk:
+ src = getURL(url)
+ if r'<div class="errorMessage mb10">' in src:
+ result.append((url, 0, 1, url))
+ elif r'Page cannot be displayed' in src:
+ result.append((url, 0, 1, url))
+ else:
+ try:
+ url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
+ file_name = re.search(url_pattern, src).group(0).split(', event)">')[1].split('</a>')[0]
+ result.append((file_name, 0, 2, url))
+ except:
+ pass
+
+ # status 1=OFFLINE, 2=OK, 3=UNKNOWN
+ # result.append((#name,#size,#status,#url))
+ yield result
+
+
+class FilesMailRu(Hoster):
+ __name__ = "FilesMailRu"
+ __type__ = "hoster"
+ __version__ = "0.31"
+
+ __pattern__ = r'http://(?:www\.)?files\.mail\.ru/.*'
+
+ __description__ = """Files.mail.ru hoster plugin"""
+ __author_name__ = "oZiRiz"
+ __author_mail__ = "ich@oziriz.de"
+
+
+ def setup(self):
+ if not self.account:
+ self.multiDL = False
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url)
+ self.url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
+
+ #marks the file as "offline" when the pattern was found on the html-page'''
+ if r'<div class="errorMessage mb10">' in self.html:
+ self.offline()
+
+ elif r'Page cannot be displayed' in self.html:
+ self.offline()
+
+ #the filename that will be showed in the list (e.g. test.part1.rar)'''
+ pyfile.name = self.getFileName()
+
+ #prepare and download'''
+ if not self.account:
+ self.prepare()
+ self.download(self.getFileUrl())
+ self.myPostProcess()
+ else:
+ self.download(self.getFileUrl())
+ self.myPostProcess()
+
+ def prepare(self):
+ """You have to wait some seconds. Otherwise you will get a 40Byte HTML Page instead of the file you expected"""
+ self.setWait(10)
+ self.wait()
+ return True
+
+ def getFileUrl(self):
+ """gives you the URL to the file. Extracted from the Files.mail.ru HTML-page stored in self.html"""
+ return re.search(self.url_pattern, self.html).group(0).split('<a href="')[1].split('" onclick="return Act')[0]
+
+ def getFileName(self):
+ """gives you the Name for each file. Also extracted from the HTML-Page"""
+ return re.search(self.url_pattern, self.html).group(0).split(', event)">')[1].split('</a>')[0]
+
+ def myPostProcess(self):
+ # searches the file for HTMl-Code. Sometimes the Redirect
+ # doesn't work (maybe a curl Problem) and you get only a small
+ # HTML file and the Download is marked as "finished"
+ # then the download will be restarted. It's only bad for these
+ # who want download a HTML-File (it's one in a million ;-) )
+ #
+ # The maximum UploadSize allowed on files.mail.ru at the moment is 100MB
+ # so i set it to check every download because sometimes there are downloads
+ # that contain the HTML-Text and 60MB ZEROs after that in a xyzfile.part1.rar file
+ # (Loading 100MB in to ram is not an option)
+ check = self.checkDownload({"html": "<meta name="}, read_size=50000)
+ if check == "html":
+ self.logInfo(_(
+ "There was HTML Code in the Downloaded File (%s)...redirect error? The Download will be restarted." %
+ self.pyfile.name))
+ self.retry()
diff --git a/pyload/plugins/hoster/FileserveCom.py b/pyload/plugins/hoster/FileserveCom.py
new file mode 100644
index 000000000..15830b759
--- /dev/null
+++ b/pyload/plugins/hoster/FileserveCom.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.utils import parseFileSize
+
+
+def checkFile(plugin, urls):
+ html = getURL(plugin.URLS[1], post={"urls": "\n".join(urls)}, decode=True)
+
+ file_info = []
+ for li in re.finditer(plugin.LINKCHECK_TR, html, re.DOTALL):
+ try:
+ cols = re.findall(plugin.LINKCHECK_TD, li.group(1))
+ if cols:
+ file_info.append((
+ cols[1] if cols[1] != '--' else cols[0],
+ parseFileSize(cols[2]) if cols[2] != '--' else 0,
+ 2 if cols[3].startswith('Available') else 1,
+ cols[0]))
+ except Exception, e:
+ continue
+
+ return file_info
+
+
+class FileserveCom(Hoster):
+ __name__ = "FileserveCom"
+ __type__ = "hoster"
+ __version__ = "0.52"
+
+ __pattern__ = r'http://(?:www\.)?fileserve\.com/file/(?P<id>[^/]+).*'
+
+ __description__ = """Fileserve.com hoster plugin"""
+ __author_name__ = ("jeix", "mkaay", "Paul King", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.de", "mkaay@mkaay.de", "", "zoidberg@mujmail.cz")
+
+ URLS = ["http://www.fileserve.com/file/", "http://www.fileserve.com/link-checker.php",
+ "http://www.fileserve.com/checkReCaptcha.php"]
+ LINKCHECK_TR = r'<tr>\s*(<td>http://www.fileserve\.com/file/.*?)</tr>'
+ LINKCHECK_TD = r'<td>(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ CAPTCHA_KEY_PATTERN = r"var reCAPTCHA_publickey='(?P<key>[^']+)'"
+ LONG_WAIT_PATTERN = r'<li class="title">You need to wait (\d+) (\w+) to start another download\.</li>'
+ LINK_EXPIRED_PATTERN = r'Your download link has expired'
+ DAILY_LIMIT_PATTERN = r'Your daily download limit has been reached'
+ NOT_LOGGED_IN_PATTERN = r'<form (name="loginDialogBoxForm"|id="login_form")|<li><a href="/login.php">Login</a></li>'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+ self.file_id = re.match(self.__pattern__, self.pyfile.url).group('id')
+ self.url = "%s%s" % (self.URLS[0], self.file_id)
+ self.logDebug("File ID: %s URL: %s" % (self.file_id, self.url))
+
+ def process(self, pyfile):
+ pyfile.name, pyfile.size, status, self.url = checkFile(self, [self.url])[0]
+ if status != 2:
+ self.offline()
+ self.logDebug("File Name: %s Size: %d" % (pyfile.name, pyfile.size))
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.html = self.load(self.url)
+ action = self.load(self.url, post={"checkDownload": "check"}, decode=True)
+ action = json_loads(action)
+ self.logDebug(action)
+
+ if "fail" in action:
+ if action['fail'] == "timeLimit":
+ self.html = self.load(self.url, post={"checkDownload": "showError", "errorType": "timeLimit"},
+ decode=True)
+
+ self.doLongWait(re.search(self.LONG_WAIT_PATTERN, self.html))
+
+ elif action['fail'] == "parallelDownload":
+ self.logWarning(_("Parallel download error, now waiting 60s."))
+ self.retry(wait_time=60, reason="parallelDownload")
+
+ else:
+ self.fail("Download check returned %s" % action['fail'])
+
+ elif "success" in action:
+ if action['success'] == "showCaptcha":
+ self.doCaptcha()
+ self.doTimmer()
+ elif action['success'] == "showTimmer":
+ self.doTimmer()
+
+ else:
+ self.fail("Unknown server response")
+
+ # show download link
+ response = self.load(self.url, post={"downloadLink": "show"}, decode=True)
+ self.logDebug("show downloadLink response : %s" % response)
+ if "fail" in response:
+ self.fail("Couldn't retrieve download url")
+
+ # this may either download our file or forward us to an error page
+ self.download(self.url, post={"download": "normal"})
+ self.logDebug(self.req.http.lastEffectiveURL)
+
+ check = self.checkDownload({"expired": self.LINK_EXPIRED_PATTERN,
+ "wait": re.compile(self.LONG_WAIT_PATTERN),
+ "limit": self.DAILY_LIMIT_PATTERN})
+
+ if check == "expired":
+ self.logDebug("Download link was expired")
+ self.retry()
+ elif check == "wait":
+ self.doLongWait(self.lastCheck)
+ elif check == "limit":
+ self.logWarning("Download limited reached for today")
+ self.setWait(secondsToMidnight(gmt=2), True)
+ self.wait()
+ self.retry()
+
+ self.thread.m.reconnecting.wait(3) # Ease issue with later downloads appearing to be in parallel
+
+ def doTimmer(self):
+ response = self.load(self.url, post={"downloadLink": "wait"}, decode=True)
+ self.logDebug("wait response : %s" % response[:80])
+
+ if "fail" in response:
+ self.fail("Failed getting wait time")
+
+ if self.__name__ == "FilejungleCom":
+ m = re.search(r'"waitTime":(\d+)', response)
+ if m is None:
+ self.fail("Cannot get wait time")
+ wait_time = int(m.group(1))
+ else:
+ wait_time = int(response) + 3
+
+ self.setWait(wait_time)
+ self.wait()
+
+ def doCaptcha(self):
+ captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group("key")
+ recaptcha = ReCaptcha(self)
+
+ for _ in xrange(5):
+ challenge, code = recaptcha.challenge(captcha_key)
+
+ response = json_loads(self.load(self.URLS[2],
+ post={'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': code,
+ 'recaptcha_shortencode_field': self.file_id}))
+ self.logDebug("reCaptcha response : %s" % response)
+ if not response['success']:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("Invalid captcha")
+
+ def doLongWait(self, m):
+ wait_time = (int(m.group(1)) * {'seconds': 1, 'minutes': 60, 'hours': 3600}[m.group(2)]) if m else 12 * 60
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry()
+
+ def handlePremium(self):
+ premium_url = None
+ if self.__name__ == "FileserveCom":
+ #try api download
+ response = self.load("http://app.fileserve.com/api/download/premium/",
+ post={"username": self.user,
+ "password": self.account.getAccountData(self.user)['password'],
+ "shorten": self.file_id},
+ decode=True)
+ if response:
+ response = json_loads(response)
+ if response['error_code'] == "302":
+ premium_url = response['next']
+ elif response['error_code'] in ["305", "500"]:
+ self.tempOffline()
+ elif response['error_code'] in ["403", "605"]:
+ self.resetAccount()
+ elif response['error_code'] in ["606", "607", "608"]:
+ self.offline()
+ else:
+ self.logError(response['error_code'], response['error_message'])
+
+ self.download(premium_url or self.pyfile.url)
+
+ if not premium_url:
+ check = self.checkDownload({"login": re.compile(self.NOT_LOGGED_IN_PATTERN)})
+
+ if check == "login":
+ self.account.relogin(self.user)
+ self.retry(reason=_("Not logged in."))
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100):
+ yield checkFile(FileserveCom, chunk)
diff --git a/pyload/plugins/hoster/FileshareInUa.py b/pyload/plugins/hoster/FileshareInUa.py
new file mode 100644
index 000000000..162217de2
--- /dev/null
+++ b/pyload/plugins/hoster/FileshareInUa.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class FileshareInUa(Hoster):
+ __name__ = "FileshareInUa"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?fileshare.in.ua/[A-Za-z0-9]+'
+
+ __description__ = """Fileshare.in.ua hoster plugin"""
+ __author_name__ = "fwannmacher"
+ __author_mail__ = "felipe@warhammerproject.com"
+
+ PATTERN_FILENAME = r'<h3 class="b-filename">(.*?)</h3>'
+ PATTERN_FILESIZE = r'<b class="b-filesize">(.*?)</b>'
+ PATTERN_OFFLINE = r"This file doesn't exist, or has been removed."
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.html = self.load(pyfile.url, decode=True)
+
+ if not self._checkOnline():
+ self.offline()
+
+ pyfile.name = self._getName()
+
+ link = self._getLink()
+
+ if not link.startswith('http://'):
+ link = "http://fileshare.in.ua" + link
+
+ self.download(link)
+
+ def _checkOnline(self):
+ if re.search(self.PATTERN_OFFLINE, self.html):
+ return False
+ else:
+ return True
+
+ def _getName(self):
+ name = re.search(self.PATTERN_FILENAME, self.html)
+ if name is None:
+ self.fail("%s: Plugin broken." % self.__name__)
+
+ return name.group(1)
+
+ def _getLink(self):
+ return re.search("<a href=\"(/get/.+)\" class=\"b-button m-blue m-big\" >", self.html).group(1)
+
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+ html = getURL(url)
+
+ if re.search(FileshareInUa.PATTERN_OFFLINE, html):
+ result.append((url, 0, 1, url))
+ else:
+ name = re.search(FileshareInUa.PATTERN_FILENAME, html)
+
+ if name is None:
+ result.append((url, 0, 1, url))
+ continue
+
+ name = name.group(1)
+ size = re.search(FileshareInUa.PATTERN_FILESIZE, html)
+ size = parseFileSize(size.group(1))
+
+ result.append((name, size, 3, url))
+
+ yield result
diff --git a/pyload/plugins/hoster/FilezyNet.py b/pyload/plugins/hoster/FilezyNet.py
new file mode 100644
index 000000000..eeba4add0
--- /dev/null
+++ b/pyload/plugins/hoster/FilezyNet.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class FilezyNet(XFileSharingPro):
+ __name__ = "FilezyNet"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?filezy.net/.*/.*.html'
+
+ __description__ = """Filezy.net hoster plugin"""
+ __author_name__ = None
+ __author_mail__ = None
+
+ HOSTER_NAME = "filezy.net"
+
+ FILE_SIZE_PATTERN = r'<span class="plansize">(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</span>'
+ WAIT_PATTERN = r'<div id="countdown_str" class="seconds">\n<!--Wait--> <span id=".*?">(\d+)</span>'
+ DOWNLOAD_JS_PATTERN = r"<script type='text/javascript'>eval(.*)"
+
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = self.premium
+
+ def getDownloadLink(self):
+ self.logDebug("Getting download link")
+
+ data = self.getPostParameters()
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+
+ obfuscated_js = re.search(self.DOWNLOAD_JS_PATTERN, self.html)
+ dl_file_now = self.js.eval(obfuscated_js.group(1))
+ link = re.search(self.LINK_PATTERN, dl_file_now)
+ return link.group(1)
+
+
+getInfo = create_getInfo(FilezyNet)
diff --git a/pyload/plugins/hoster/FiredriveCom.py b/pyload/plugins/hoster/FiredriveCom.py
new file mode 100644
index 000000000..a9d62bb75
--- /dev/null
+++ b/pyload/plugins/hoster/FiredriveCom.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FiredriveCom(SimpleHoster):
+ __name__ = "FiredriveCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?(firedrive|putlocker)\.com/(mobile/)?(file|embed)/(?P<ID>\w+)'
+
+ __description__ = """Firedrive.com hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ FILE_NAME_PATTERN = r'<b>Name:</b> (?P<N>.+) <br>'
+ FILE_SIZE_PATTERN = r'<b>Size:</b> (?P<S>[\d.]+) (?P<U>[a-zA-Z]+) <br>'
+ OFFLINE_PATTERN = r'class="sad_face_image"|>No such page here.<'
+ TEMP_OFFLINE_PATTERN = r'>(File Temporarily Unavailable|Server Error. Try again later)'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.firedrive.com/file/\g<ID>')]
+
+ LINK_PATTERN = r'<a href="(https?://dl\.firedrive\.com/\?key=.+?)"'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+ self.chunkLimit = -1
+
+ def handleFree(self):
+ link = self._getLink()
+ self.logDebug("Direct link: " + link)
+ self.download(link, disposition=True)
+
+ def _getLink(self):
+ f = re.search(self.LINK_PATTERN, self.html)
+ if f:
+ return f.group(1)
+ else:
+ self.html = self.load(self.pyfile.url, post={"confirm": re.search(r'name="confirm" value="(.+?)"', self.html).group(1)})
+ f = re.search(self.LINK_PATTERN, self.html)
+ if f:
+ return f.group(1)
+ else:
+ self.parseError("Direct download link not found")
+
+
+getInfo = create_getInfo(FiredriveCom)
diff --git a/pyload/plugins/hoster/FlyFilesNet.py b/pyload/plugins/hoster/FlyFilesNet.py
new file mode 100644
index 000000000..d8d6efb7e
--- /dev/null
+++ b/pyload/plugins/hoster/FlyFilesNet.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster
+
+
+class FlyFilesNet(SimpleHoster):
+ __name__ = "FlyFilesNet"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?flyfiles\.net/.*'
+
+ __description__ = """FlyFiles.net hoster plugin"""
+ __author_name__ = None
+ __author_mail__ = None
+
+ SESSION_PATTERN = r'flyfiles\.net/(.*)/.*'
+ FILE_NAME_PATTERN = r'flyfiles\.net/.*/(.*)'
+
+
+ def process(self, pyfile):
+ name = re.search(self.FILE_NAME_PATTERN, pyfile.url).group(1)
+ pyfile.name = unquote_plus(name)
+
+ session = re.search(self.SESSION_PATTERN, pyfile.url).group(1)
+
+ url = "http://flyfiles.net"
+
+ # get download URL
+ parsed_url = getURL(url, post={"getDownLink": session}, cookies=True)
+ self.logDebug("Parsed URL: %s" % parsed_url)
+
+ if parsed_url == '#downlink|' or parsed_url == "#downlink|#":
+ self.logWarning("Could not get the download URL. Please wait 10 minutes.")
+ self.wait(10 * 60, True)
+ self.retry()
+
+ download_url = parsed_url.replace('#downlink|', '')
+
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/FourSharedCom.py b/pyload/plugins/hoster/FourSharedCom.py
new file mode 100644
index 000000000..e2cb980a4
--- /dev/null
+++ b/pyload/plugins/hoster/FourSharedCom.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FourSharedCom(SimpleHoster):
+ __name__ = "FourSharedCom"
+ __type__ = "hoster"
+ __version__ = "0.29"
+
+ __pattern__ = r'https?://(?:www\.)?4shared(\-china)?\.com/(account/)?(download|get|file|document|photo|video|audio|mp3|office|rar|zip|archive|music)/.+?/.*'
+
+ __description__ = """4Shared.com hoster plugin"""
+ __author_name__ = ("jeix", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.de", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<meta name="title" content="(?P<N>.+?)"'
+ FILE_SIZE_PATTERN = r'<span title="Size: (?P<S>[0-9,.]+) (?P<U>[kKMG])i?B">'
+ OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted.'
+
+ FILE_NAME_REPLACEMENTS = [(r"&#(\d+).", lambda m: unichr(int(m.group(1))))]
+ FILE_SIZE_REPLACEMENTS = [(",", "")]
+
+ DOWNLOAD_URL_PATTERN = r'name="d3link" value="(.*?)"'
+ DOWNLOAD_BUTTON_PATTERN = r'id="btnLink" href="(.*?)"'
+ FID_PATTERN = r'name="d3fid" value="(.*?)"'
+
+
+ def handleFree(self):
+ if not self.account:
+ self.fail("User not logged in")
+
+ m = re.search(self.DOWNLOAD_BUTTON_PATTERN, self.html)
+ if m:
+ link = m.group(1)
+ else:
+ link = re.sub(r'/(download|get|file|document|photo|video|audio)/', r'/get/', self.pyfile.url)
+
+ self.html = self.load(link)
+
+ m = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if m is None:
+ self.parseError('Download link')
+ link = m.group(1)
+
+ try:
+ m = re.search(self.FID_PATTERN, self.html)
+ response = self.load('http://www.4shared.com/web/d2/getFreeDownloadLimitInfo?fileId=%s' % m.group(1))
+ self.logDebug(response)
+ except:
+ pass
+
+ self.wait(20)
+ self.download(link)
+
+
+getInfo = create_getInfo(FourSharedCom)
diff --git a/pyload/plugins/hoster/FreakshareCom.py b/pyload/plugins/hoster/FreakshareCom.py
new file mode 100644
index 000000000..979b3c5f2
--- /dev/null
+++ b/pyload/plugins/hoster/FreakshareCom.py
@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+class FreakshareCom(Hoster):
+ __name__ = "FreakshareCom"
+ __type__ = "hoster"
+ __version__ = "0.39"
+
+ __pattern__ = r'http://(?:www\.)?freakshare\.(net|com)/files/\S*?/'
+
+ __description__ = """Freakshare.com hoster plugin"""
+ __author_name__ = ("sitacuisses", "spoob", "mkaay", "Toilal")
+ __author_mail__ = ("sitacuisses@yahoo.de", "spoob@pyload.org", "mkaay@mkaay.de", "toilal.dev@gmail.com")
+
+
+ def setup(self):
+ self.multiDL = False
+ self.req_opts = []
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ pyfile.url = pyfile.url.replace("freakshare.net/", "freakshare.com/")
+
+ if self.account:
+ self.html = self.load(pyfile.url, cookies=False)
+ pyfile.name = self.get_file_name()
+ self.download(pyfile.url)
+
+ else:
+ self.prepare()
+ self.get_file_url()
+
+ self.download(pyfile.url, post=self.req_opts)
+
+ check = self.checkDownload({"bad": "bad try",
+ "paralell": "> Sorry, you cant download more then 1 files at time. <",
+ "empty": "Warning: Unknown: Filename cannot be empty",
+ "wrong_captcha": "Wrong Captcha!",
+ "downloadserver": "No Downloadserver. Please try again later!"})
+
+ if check == "bad":
+ self.fail("Bad Try.")
+ elif check == "paralell":
+ self.setWait(300, True)
+ self.wait()
+ self.retry()
+ elif check == "empty":
+ self.fail("File not downloadable")
+ elif check == "wrong_captcha":
+ self.invalidCaptcha()
+ self.retry()
+ elif check == "downloadserver":
+ self.retry(5, 15 * 60, "No Download server")
+
+ def prepare(self):
+ pyfile = self.pyfile
+
+ self.wantReconnect = False
+
+ self.download_html()
+
+ if not self.file_exists():
+ self.offline()
+
+ self.setWait(self.get_waiting_time())
+
+ pyfile.name = self.get_file_name()
+ pyfile.size = self.get_file_size()
+
+ self.wait()
+
+ return True
+
+ def download_html(self):
+ self.load("http://freakshare.com/index.php", {"language": "EN"}) # Set english language in server session
+ self.html = self.load(self.pyfile.url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+ if not self.wantReconnect:
+ self.req_opts = self.get_download_options() # get the Post options for the Request
+ #file_url = self.pyfile.url
+ #return file_url
+ else:
+ self.offline()
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+ if not self.wantReconnect:
+ file_name = re.search(r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">([^ ]+)", self.html)
+ if file_name is not None:
+ file_name = file_name.group(1)
+ else:
+ file_name = self.pyfile.url
+ return file_name
+ else:
+ return self.pyfile.url
+
+ def get_file_size(self):
+ size = 0
+ if not self.html:
+ self.download_html()
+ if not self.wantReconnect:
+ file_size_check = re.search(
+ r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">[^ ]+ - ([^ ]+) (\w\w)yte", self.html)
+ if file_size_check is not None:
+ units = float(file_size_check.group(1).replace(",", ""))
+ pow = {'KB': 1, 'MB': 2, 'GB': 3}[file_size_check.group(2)]
+ size = int(units * 1024 ** pow)
+
+ return size
+
+ def get_waiting_time(self):
+ if not self.html:
+ self.download_html()
+
+ if "Your Traffic is used up for today" in self.html:
+ self.wantReconnect = True
+ return secondsToMidnight(gmt=2)
+
+ timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[.\d]*;', self.html)
+ if timestring:
+ return int(timestring.group(1)) + 1 # add 1 sec as tenths of seconds are cut off
+ else:
+ return 60
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+ if re.search(r"This file does not exist!", self.html) is not None:
+ return False
+ else:
+ return True
+
+ def get_download_options(self):
+ re_envelope = re.search(r".*?value=\"Free\sDownload\".*?\n*?(.*?<.*?>\n*)*?\n*\s*?</form>",
+ self.html).group(0) # get the whole request
+ to_sort = re.findall(r"<input\stype=\"hidden\"\svalue=\"(.*?)\"\sname=\"(.*?)\"\s\/>", re_envelope)
+ request_options = dict((n, v) for (v, n) in to_sort)
+
+ herewego = self.load(self.pyfile.url, None, request_options) # the actual download-Page
+
+ # comment this in, when it doesnt work
+ # with open("DUMP__FS_.HTML", "w") as fp:
+ # fp.write(herewego)
+
+ to_sort = re.findall(r"<input\stype=\".*?\"\svalue=\"(\S*?)\".*?name=\"(\S*?)\"\s.*?\/>", herewego)
+ request_options = dict((n, v) for (v, n) in to_sort)
+
+ # comment this in, when it doesnt work as well
+ #print "\n\n%s\n\n" % ";".join(["%s=%s" % x for x in to_sort])
+
+ challenge = re.search(r"http://api\.recaptcha\.net/challenge\?k=([0-9A-Za-z]+)", herewego)
+
+ if challenge:
+ re_captcha = ReCaptcha(self)
+ (request_options['recaptcha_challenge_field'],
+ request_options['recaptcha_response_field']) = re_captcha.challenge(challenge.group(1))
+
+ return request_options
diff --git a/pyload/plugins/hoster/FreeWayMe.py b/pyload/plugins/hoster/FreeWayMe.py
new file mode 100644
index 000000000..392430791
--- /dev/null
+++ b/pyload/plugins/hoster/FreeWayMe.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Hoster import Hoster
+
+
+class FreeWayMe(Hoster):
+ __name__ = "FreeWayMe"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'https://(?:www\.)?free-way.me/.*'
+
+ __description__ = """FreeWayMe hoster plugin"""
+ __author_name__ = "Nicolas Giese"
+ __author_mail__ = "james@free-way.me"
+
+
+ def setup(self):
+ self.resumeDownload = False
+ self.chunkLimit = 1
+ self.multiDL = self.premium
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "FreeWayMe")
+ self.fail("No FreeWay account provided")
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+
+ (user, data) = self.account.selectAccount()
+
+ self.download(
+ "https://www.free-way.me/load.php",
+ get={"multiget": 7, "url": pyfile.url, "user": user, "pw": self.account.getpw(user), "json": ""},
+ disposition=True)
diff --git a/pyload/plugins/hoster/FreevideoCz.py b/pyload/plugins/hoster/FreevideoCz.py
new file mode 100644
index 000000000..dc7dd04bd
--- /dev/null
+++ b/pyload/plugins/hoster/FreevideoCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FreevideoCz(DeadHoster):
+ __name__ = "FreevideoCz"
+ __type__ = "hoster"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?freevideo\.cz/vase-videa/.+'
+
+ __description__ = """Freevideo.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(FreevideoCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FshareVn.py b/pyload/plugins/hoster/FshareVn.py
new file mode 100644
index 000000000..5109d239d
--- /dev/null
+++ b/pyload/plugins/hoster/FshareVn.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import strptime, mktime, gmtime
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+
+
+def getInfo(urls):
+ for url in urls:
+ html = getURL('http://www.fshare.vn/check_link.php', post={
+ "action": "check_link",
+ "arrlinks": url
+ }, decode=True)
+
+ file_info = parseFileInfo(FshareVn, url, html)
+
+ yield file_info
+
+
+def doubleDecode(m):
+ return m.group(1).decode('raw_unicode_escape')
+
+
+class FshareVn(SimpleHoster):
+ __name__ = "FshareVn"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'http://(?:www\.)?fshare.vn/file/.*'
+
+ __description__ = """FshareVn hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r'<p>(?P<N>[^<]+)<\\/p>[\\trn\s]*<p>(?P<S>[0-9,.]+)\s*(?P<U>[kKMG])i?B<\\/p>'
+ OFFLINE_PATTERN = r'<div class=\\"f_left file_w\\"|<\\/p>\\t\\t\\t\\t\\r\\n\\t\\t<p><\\/p>\\t\\t\\r\\n\\t\\t<p>0 KB<\\/p>'
+
+ FILE_NAME_REPLACEMENTS = [("(.*)", doubleDecode)]
+
+ LINK_PATTERN = r'action="(http://download.*?)[#"]'
+ WAIT_PATTERN = ur'Lượt tải xuống kế tiếp là:\s*(.*?)\s*<'
+
+
+ def process(self, pyfile):
+ self.html = self.load('http://www.fshare.vn/check_link.php', post={
+ "action": "check_link",
+ "arrlinks": pyfile.url
+ }, decode=True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ self.checkDownloadedFile()
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ self.checkErrors()
+
+ action, inputs = self.parseHtmlForm('frm_download')
+ self.url = self.pyfile.url + action
+
+ if not inputs:
+ self.parseError('FORM')
+ elif 'link_file_pwd_dl' in inputs:
+ for password in self.getPassword().splitlines():
+ self.logInfo('Password protected link, trying "%s"' % password)
+ inputs['link_file_pwd_dl'] = password
+ self.html = self.load(self.url, post=inputs, decode=True)
+ if not 'name="link_file_pwd_dl"' in self.html:
+ break
+ else:
+ self.fail("No or incorrect password")
+ else:
+ self.html = self.load(self.url, post=inputs, decode=True)
+
+ self.checkErrors()
+
+ m = re.search(r'var count = (\d+)', self.html)
+ self.setWait(int(m.group(1)) if m else 30)
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError('FREE DL URL')
+ self.url = m.group(1)
+ self.logDebug("FREE DL URL: %s" % self.url)
+
+ self.wait()
+ self.download(self.url)
+
+ def handlePremium(self):
+ self.download(self.pyfile.url)
+
+ def checkErrors(self):
+ if '/error.php?' in self.req.lastEffectiveURL or u"Liên kết bạn chọn khÃŽng tồn" in self.html:
+ self.offline()
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.logInfo("Wait until %s ICT" % m.group(1))
+ wait_until = mktime(strptime(m.group(1), "%d/%m/%Y %H:%M"))
+ self.wait(wait_until - mktime(gmtime()) - 7 * 60 * 60, True)
+ self.retry()
+ elif '<ul class="message-error">' in self.html:
+ self.logError("Unknown error occured or wait time not parsed")
+ self.retry(30, 2 * 60, "Unknown error")
+
+ def checkDownloadedFile(self):
+ # check download
+ check = self.checkDownload({
+ "not_found": "<head><title>404 Not Found</title></head>"
+ })
+
+ if check == "not_found":
+ self.fail("File not m on server")
diff --git a/pyload/plugins/hoster/Ftp.py b/pyload/plugins/hoster/Ftp.py
new file mode 100644
index 000000000..f9b3865a7
--- /dev/null
+++ b/pyload/plugins/hoster/Ftp.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+import pycurl
+import re
+
+from urllib import quote, unquote
+from urlparse import urlparse
+
+from pyload.plugins.Hoster import Hoster
+
+
+class Ftp(Hoster):
+ __name__ = "Ftp"
+ __type__ = "hoster"
+ __version__ = "0.41"
+
+ __description__ = """Download from ftp directory"""
+ __author_name__ = ("jeix", "mkaay", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.com", "mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ parsed_url = urlparse(pyfile.url)
+ netloc = parsed_url.netloc
+
+ pyfile.name = parsed_url.path.rpartition('/')[2]
+ try:
+ pyfile.name = unquote(str(pyfile.name)).decode('utf8')
+ except:
+ pass
+
+ if not "@" in netloc:
+ servers = [x['login'] for x in self.account.getAllAccounts()] if self.account else []
+
+ if netloc in servers:
+ self.logDebug("Logging on to %s" % netloc)
+ self.req.addAuth(self.account.accounts[netloc]['password'])
+ else:
+ for pwd in pyfile.package().password.splitlines():
+ if ":" in pwd:
+ self.req.addAuth(pwd.strip())
+ break
+
+ self.req.http.c.setopt(pycurl.NOBODY, 1)
+
+ try:
+ response = self.load(pyfile.url)
+ except pycurl.error, e:
+ self.fail("Error %d: %s" % e.args)
+
+ self.req.http.c.setopt(pycurl.NOBODY, 0)
+ self.logDebug(self.req.http.header)
+
+ m = re.search(r"Content-Length:\s*(\d+)", response)
+ if m:
+ pyfile.size = int(m.group(1))
+ self.download(pyfile.url)
+ else:
+ #Naive ftp directory listing
+ if re.search(r'^25\d.*?"', self.req.http.header, re.M):
+ pyfile.url = pyfile.url.rstrip('/')
+ pkgname = "/".join(pyfile.package().name, urlparse(pyfile.url).path.rpartition('/')[2])
+ pyfile.url += '/'
+ self.req.http.c.setopt(48, 1) # CURLOPT_DIRLISTONLY
+ response = self.load(pyfile.url, decode=False)
+ links = [pyfile.url + quote(x) for x in response.splitlines()]
+ self.logDebug("LINKS", links)
+ self.core.api.addPackage(pkgname, links)
+ else:
+ self.fail("Unexpected server response")
diff --git a/pyload/plugins/hoster/GamefrontCom.py b/pyload/plugins/hoster/GamefrontCom.py
new file mode 100644
index 000000000..5d88fc0db
--- /dev/null
+++ b/pyload/plugins/hoster/GamefrontCom.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class GamefrontCom(Hoster):
+ __name__ = "GamefrontCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?gamefront.com/files/[A-Za-z0-9]+'
+
+ __description__ = """Gamefront.com hoster plugin"""
+ __author_name__ = "fwannmacher"
+ __author_mail__ = "felipe@warhammerproject.com"
+
+ PATTERN_FILENAME = r'<title>(.*?) | Game Front'
+ PATTERN_FILESIZE = r'<dt>File Size:</dt>[\n\s]*<dd>(.*?)</dd>'
+ PATTERN_OFFLINE = r"This file doesn't exist, or has been removed."
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = -1
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.html = self.load(pyfile.url, decode=True)
+
+ if not self._checkOnline():
+ self.offline()
+
+ pyfile.name = self._getName()
+
+ link = self._getLink()
+
+ if not link.startswith('http://'):
+ link = "http://www.gamefront.com/" + link
+
+ self.download(link)
+
+ def _checkOnline(self):
+ if re.search(self.PATTERN_OFFLINE, self.html):
+ return False
+ else:
+ return True
+
+ def _getName(self):
+ name = re.search(self.PATTERN_FILENAME, self.html)
+ if name is None:
+ self.fail("%s: Plugin broken." % self.__name__)
+
+ return name.group(1)
+
+ def _getLink(self):
+ self.html2 = self.load("http://www.gamefront.com/" + re.search("(files/service/thankyou\\?id=[A-Za-z0-9]+)",
+ self.html).group(1))
+ return re.search("<a href=\"(http://media[0-9]+\.gamefront.com/.*)\">click here</a>", self.html2).group(1).replace("&amp;", "&")
+
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+ html = getURL(url)
+
+ if re.search(GamefrontCom.PATTERN_OFFLINE, html):
+ result.append((url, 0, 1, url))
+ else:
+ name = re.search(GamefrontCom.PATTERN_FILENAME, html)
+ if name is None:
+ result.append((url, 0, 1, url))
+ else:
+ name = name.group(1)
+ size = re.search(GamefrontCom.PATTERN_FILESIZE, html)
+ size = parseFileSize(size.group(1))
+
+ result.append((name, size, 3, url))
+
+ yield result
diff --git a/pyload/plugins/hoster/GigapetaCom.py b/pyload/plugins/hoster/GigapetaCom.py
new file mode 100644
index 000000000..d09a1fb0c
--- /dev/null
+++ b/pyload/plugins/hoster/GigapetaCom.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+from random import randint
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class GigapetaCom(SimpleHoster):
+ __name__ = "GigapetaCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?gigapeta\.com/dl/\w+'
+
+ __description__ = """GigaPeta.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<img src=".*" alt="file" />-->\s*(?P<N>.*?)\s*</td>'
+ FILE_SIZE_PATTERN = r'<th>\s*Size\s*</th>\s*<td>\s*(?P<S>.*?)\s*</td>'
+ OFFLINE_PATTERN = r'<div id="page_error">'
+
+ SH_COOKIES = [(".gigapeta.com", "lang", "us")]
+
+
+ def handleFree(self):
+ captcha_key = str(randint(1, 100000000))
+ captcha_url = "http://gigapeta.com/img/captcha.gif?x=%s" % captcha_key
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ for _ in xrange(5):
+ self.checkErrors()
+
+ captcha = self.decryptCaptcha(captcha_url)
+ self.html = self.load(self.pyfile.url, post={
+ "captcha_key": captcha_key,
+ "captcha": captcha,
+ "download": "Download"})
+
+ m = re.search(r"Location\s*:\s*(.*)", self.req.http.header, re.I)
+ if m:
+ download_url = m.group(1)
+ break
+ elif "Entered figures don&#96;t coincide with the picture" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+ def checkErrors(self):
+ if "All threads for IP" in self.html:
+ self.logDebug("Your IP is already downloading a file - wait and retry")
+ self.wait(5 * 60, True)
+ self.retry()
+
+
+getInfo = create_getInfo(GigapetaCom)
diff --git a/pyload/plugins/hoster/GooIm.py b/pyload/plugins/hoster/GooIm.py
new file mode 100644
index 000000000..13598a8b6
--- /dev/null
+++ b/pyload/plugins/hoster/GooIm.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# https://goo.im/devs/liquidsmooth/3.x/codina/Nightly/LS-KK-v3.2-2014-08-01-codina.zip
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class GooIm(SimpleHoster):
+ __name__ = "GooIm"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?goo\.im/.+'
+
+ __description__ = """Goo.im hoster plugin"""
+ __author_name__ = "zapp-brannigan"
+ __author_mail__ = "fuerst.reinje@web.de"
+
+ FILE_NAME_PATTERN = r'You will be redirected to .*(?P<N>[^/ ]+) in'
+ OFFLINE_PATTERN = r'The file you requested was not found'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+
+ def handleFree(self):
+ url = self.pyfile.url
+ self.html = self.load(url, cookies=True)
+ self.wait(10)
+ self.download(url, cookies=True)
+
+
+getInfo = create_getInfo(GooIm)
diff --git a/pyload/plugins/hoster/HellshareCz.py b/pyload/plugins/hoster/HellshareCz.py
new file mode 100644
index 000000000..5f3236876
--- /dev/null
+++ b/pyload/plugins/hoster/HellshareCz.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class HellshareCz(SimpleHoster):
+ __name__ = "HellshareCz"
+ __type__ = "hoster"
+ __version__ = "0.82"
+
+ __pattern__ = r'(http://(?:www\.)?hellshare\.(?:cz|com|sk|hu|pl)/[^?]*/\d+).*'
+
+ __description__ = """Hellshare.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<h1 id="filename"[^>]*>(?P<N>[^<]+)</h1>'
+ FILE_SIZE_PATTERN = r'<strong id="FileSize_master">(?P<S>[0-9.]*)&nbsp;(?P<U>[kKMG])i?B</strong>'
+ OFFLINE_PATTERN = r'<h1>File not found.</h1>'
+ SHOW_WINDOW_PATTERN = r'<a href="([^?]+/(\d+)/\?do=(fileDownloadButton|relatedFileDownloadButton-\2)-showDownloadWindow)"'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.account else False
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account:
+ self.fail("User not logged in")
+ pyfile.url = re.match(self.__pattern__, pyfile.url).group(1)
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+ if not self.checkTrafficLeft():
+ self.fail("Not enough traffic left for user %s." % self.user)
+
+ m = re.search(self.SHOW_WINDOW_PATTERN, self.html)
+ if m is None:
+ self.parseError('SHOW WINDOW')
+ self.url = "http://www.hellshare.com" + m.group(1)
+ self.logDebug("DOWNLOAD URL: " + self.url)
+
+ self.download(self.url)
+
+
+getInfo = create_getInfo(HellshareCz)
diff --git a/pyload/plugins/hoster/HellspyCz.py b/pyload/plugins/hoster/HellspyCz.py
new file mode 100644
index 000000000..68b61caf0
--- /dev/null
+++ b/pyload/plugins/hoster/HellspyCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class HellspyCz(DeadHoster):
+ __name__ = "HellspyCz"
+ __type__ = "hoster"
+ __version__ = "0.28"
+
+ __pattern__ = r'http://(?:www\.)?(?:hellspy\.(?:cz|com|sk|hu|pl)|sciagaj.pl)(/\S+/\d+)/?.*'
+
+ __description__ = """HellSpy.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(HellspyCz)
diff --git a/pyload/plugins/hoster/HotfileCom.py b/pyload/plugins/hoster/HotfileCom.py
new file mode 100644
index 000000000..1dd8b4f4e
--- /dev/null
+++ b/pyload/plugins/hoster/HotfileCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class HotfileCom(DeadHoster):
+ __name__ = "HotfileCom"
+ __type__ = "hoster"
+ __version__ = "0.37"
+
+ __pattern__ = r'https?://(www.)?hotfile\.com/dl/\d+/[0-9a-zA-Z]+/'
+
+ __description__ = """Hotfile.com hoster plugin"""
+ __author_name__ = ("sitacuisses", "spoob", "mkaay", "JoKoT3")
+ __author_mail__ = ("sitacuisses@yhoo.de", "spoob@pyload.org", "mkaay@mkaay.de", "jokot3@gmail.com")
+
+
+getInfo = create_getInfo(HotfileCom)
diff --git a/pyload/plugins/hoster/HugefilesNet.py b/pyload/plugins/hoster/HugefilesNet.py
new file mode 100644
index 000000000..8a960c7fa
--- /dev/null
+++ b/pyload/plugins/hoster/HugefilesNet.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://hugefiles.net/prthf9ya4w6s
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class HugefilesNet(XFileSharingPro):
+ __name__ = "HugefilesNet"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?hugefiles\.net/\w{12}'
+
+ __description__ = """Hugefiles.net hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ HOSTER_NAME = "hugefiles.net"
+
+ FILE_SIZE_PATTERN = r'File Size:</span>\s*<span[^>]*>(?P<S>[^<]+)</span></div>'
+
+
+getInfo = create_getInfo(HugefilesNet)
diff --git a/pyload/plugins/hoster/HundredEightyUploadCom.py b/pyload/plugins/hoster/HundredEightyUploadCom.py
new file mode 100644
index 000000000..29e152c1d
--- /dev/null
+++ b/pyload/plugins/hoster/HundredEightyUploadCom.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://180upload.com/js9qdm6kjnrs
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class HundredEightyUploadCom(XFileSharingPro):
+ __name__ = "HundredEightyUploadCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?180upload\.com/(\w+).*'
+
+ __description__ = """180upload.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ HOSTER_NAME = "180upload.com"
+
+ FILE_NAME_PATTERN = r'Filename:</b></td><td nowrap>(?P<N>.+)</td></tr>-->'
+ FILE_SIZE_PATTERN = r'Size:</b></td><td>(?P<S>[\d.]+) (?P<U>[A-Z]+)\s*<small>'
+
+
+getInfo = create_getInfo(HundredEightyUploadCom)
diff --git a/pyload/plugins/hoster/IFileWs.py b/pyload/plugins/hoster/IFileWs.py
new file mode 100644
index 000000000..45039f8e0
--- /dev/null
+++ b/pyload/plugins/hoster/IFileWs.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class IFileWs(XFileSharingPro):
+ __name__ = "IFileWs"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?ifile\.ws/\w+(/.+)?'
+
+ __description__ = """Ifile.ws hoster plugin"""
+ __author_name__ = "z00nx"
+ __author_mail__ = "z00nx0@gmail.com"
+
+ HOSTER_NAME = "ifile.ws"
+
+ FILE_INFO_PATTERN = r'<h1\s+style="display:inline;">(?P<N>[^<]+)</h1>\s+\[(?P<S>[^]]+)\]'
+ OFFLINE_PATTERN = r'File Not Found|The file was removed by administrator'
+
+
+getInfo = create_getInfo(IFileWs)
diff --git a/pyload/plugins/hoster/IcyFilesCom.py b/pyload/plugins/hoster/IcyFilesCom.py
new file mode 100644
index 000000000..532cd094b
--- /dev/null
+++ b/pyload/plugins/hoster/IcyFilesCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class IcyFilesCom(DeadHoster):
+ __name__ = "IcyFilesCom"
+ __type__ = "hoster"
+ __version__ = "0.06"
+
+ __pattern__ = r'http://(?:www\.)?icyfiles\.com/(.*)'
+
+ __description__ = """IcyFiles.com hoster plugin"""
+ __author_name__ = "godofdream"
+ __author_mail__ = "soilfiction@gmail.com"
+
+
+getInfo = create_getInfo(IcyFilesCom)
diff --git a/pyload/plugins/hoster/IfileIt.py b/pyload/plugins/hoster/IfileIt.py
new file mode 100644
index 000000000..2707edd5a
--- /dev/null
+++ b/pyload/plugins/hoster/IfileIt.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class IfileIt(SimpleHoster):
+ __name__ = "IfileIt"
+ __type__ = "hoster"
+ __version__ = "0.27"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Ifile.it"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ LINK_PATTERN = r'</span> If it doesn\'t, <a target="_blank" href="([^"]+)">'
+ RECAPTCHA_KEY_PATTERN = r"var __recaptcha_public\s*=\s*'([^']+)';"
+ FILE_INFO_PATTERN = r'<span style="cursor: default;[^>]*>\s*(?P<N>.*?)\s*&nbsp;\s*<strong>\s*(?P<S>[0-9.]+)\s*(?P<U>[kKMG])i?B\s*</strong>\s*</span>'
+ OFFLINE_PATTERN = r'<span style="cursor: default;[^>]*>\s*&nbsp;\s*<strong>\s*</strong>\s*</span>'
+ TEMP_OFFLINE_PATTERN = r'<span class="msg_red">Downloading of this file is temporarily disabled</span>'
+
+
+ def handleFree(self):
+ ukey = re.match(self.__pattern__, self.pyfile.url).group(1)
+ json_url = 'http://ifile.it/new_download-request.json'
+ post_data = {"ukey": ukey, "ab": "0"}
+
+ json_response = json_loads(self.load(json_url, post=post_data))
+ self.logDebug(json_response)
+ if json_response['status'] == 3:
+ self.offline()
+
+ if json_response['captcha']:
+ captcha_key = re.search(self.RECAPTCHA_KEY_PATTERN, self.html).group(1)
+ recaptcha = ReCaptcha(self)
+ post_data['ctype'] = "recaptcha"
+
+ for _ in xrange(5):
+ post_data['recaptcha_challenge'], post_data['recaptcha_response'] = recaptcha.challenge(captcha_key)
+ json_response = json_loads(self.load(json_url, post=post_data))
+ self.logDebug(json_response)
+
+ if json_response['retry']:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("Incorrect captcha")
+
+ if not "ticket_url" in json_response:
+ self.parseError("Download URL")
+
+ self.download(json_response['ticket_url'])
+
+
+getInfo = create_getInfo(IfileIt)
diff --git a/pyload/plugins/hoster/IfolderRu.py b/pyload/plugins/hoster/IfolderRu.py
new file mode 100644
index 000000000..4f84e6b32
--- /dev/null
+++ b/pyload/plugins/hoster/IfolderRu.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class IfolderRu(SimpleHoster):
+ __name__ = "IfolderRu"
+ __type__ = "hoster"
+ __version__ = "0.38"
+
+ __pattern__ = r'http://(?:www\.)?(?:ifolder\.ru|rusfolder\.(?:com|net|ru))/(?:files/)?(?P<ID>\d+).*'
+
+ __description__ = """Ifolder.ru hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_SIZE_REPLACEMENTS = [(u'Кб', 'KB'), (u'Мб', 'MB'), (u'Гб', 'GB')]
+ FILE_NAME_PATTERN = ur'(?:<div><span>)?НазваМОе:(?:</span>)? <b>(?P<N>[^<]+)</b><(?:/div|br)>'
+ FILE_SIZE_PATTERN = ur'(?:<div><span>)?РазЌер:(?:</span>)? <b>(?P<S>[^<]+)</b><(?:/div|br)>'
+ OFFLINE_PATTERN = ur'<p>Ѐайл МПЌер <b>[^<]*</b> (Ме МайЎеМ|уЎалеМ) !!!</p>'
+
+ SESSION_ID_PATTERN = r'<a href=(http://ints.(?:rusfolder.com|ifolder.ru)/ints/sponsor/\?bi=\d*&session=([^&]+)&u=[^>]+)>'
+ INTS_SESSION_PATTERN = r'\(\'ints_session\'\);\s*if\(tag\)\{tag.value = "([^"]+)";\}'
+ HIDDEN_INPUT_PATTERN = r"var v = .*?name='([^']+)' value='1'"
+ LINK_PATTERN = r'<a id="download_file_href" href="([^"]+)"'
+ WRONG_CAPTCHA_PATTERN = ur'<font color=Red>МеверМый кПЎ,<br>ввеЎОте еще раз</font><br>'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.account else False
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ file_id = re.match(self.__pattern__, pyfile.url).group('ID')
+ self.html = self.load("http://rusfolder.com/%s" % file_id, cookies=True, decode=True)
+ self.getFileInfo()
+
+ url = re.search(r"location\.href = '(http://ints\..*?=)'", self.html).group(1)
+ self.html = self.load(url, cookies=True, decode=True)
+
+ url, session_id = re.search(self.SESSION_ID_PATTERN, self.html).groups()
+ self.html = self.load(url, cookies=True, decode=True)
+
+ url = "http://ints.rusfolder.com/ints/frame/?session=%s" % session_id
+ self.html = self.load(url, cookies=True)
+
+ self.wait(31, False)
+
+ captcha_url = "http://ints.rusfolder.com/random/images/?session=%s" % session_id
+ for _ in xrange(5):
+ self.html = self.load(url, cookies=True)
+ action, inputs = self.parseHtmlForm('ID="Form1"')
+ inputs['ints_session'] = re.search(self.INTS_SESSION_PATTERN, self.html).group(1)
+ inputs[re.search(self.HIDDEN_INPUT_PATTERN, self.html).group(1)] = '1'
+ inputs['confirmed_number'] = self.decryptCaptcha(captcha_url, cookies=True)
+ inputs['action'] = '1'
+ self.logDebug(inputs)
+
+ self.html = self.load(url, decode=True, cookies=True, post=inputs)
+ if self.WRONG_CAPTCHA_PATTERN in self.html:
+ self.invalidCaptcha()
+ else:
+ break
+ else:
+ self.fail("Invalid captcha")
+
+ download_url = re.search(self.LINK_PATTERN, self.html).group(1)
+ self.correctCaptcha()
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+
+getInfo = create_getInfo(IfolderRu)
diff --git a/pyload/plugins/hoster/JumbofilesCom.py b/pyload/plugins/hoster/JumbofilesCom.py
new file mode 100644
index 000000000..d3ee9ee9b
--- /dev/null
+++ b/pyload/plugins/hoster/JumbofilesCom.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class JumbofilesCom(SimpleHoster):
+ __name__ = "JumbofilesCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?jumbofiles.com/(\w{12}).*'
+
+ __description__ = """JumboFiles.com hoster plugin"""
+ __author_name__ = "godofdream"
+ __author_mail__ = "soilfiction@gmail.com"
+
+ FILE_INFO_PATTERN = r'<TR><TD>(?P<N>[^<]+?)\s*<small>\((?P<S>[\d.]+)\s*(?P<U>[KMG][bB])\)</small></TD></TR>'
+ OFFLINE_PATTERN = r'Not Found or Deleted / Disabled due to inactivity or DMCA'
+ LINK_PATTERN = r'<meta http-equiv="refresh" content="10;url=(.+)">'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def handleFree(self):
+ ukey = re.match(self.__pattern__, self.pyfile.url).group(1)
+ post_data = {"id": ukey, "op": "download3", "rand": ""}
+ html = self.load(self.pyfile.url, post=post_data, decode=True)
+ url = re.search(self.LINK_PATTERN, html).group(1)
+ self.logDebug("Download " + url)
+ self.download(url)
+
+
+getInfo = create_getInfo(JumbofilesCom)
diff --git a/pyload/plugins/hoster/Keep2shareCC.py b/pyload/plugins/hoster/Keep2shareCC.py
new file mode 100644
index 000000000..088a1b012
--- /dev/null
+++ b/pyload/plugins/hoster/Keep2shareCC.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://k2s.cc/file/55fb73e1c00c5/random.bin
+
+import re
+
+from urlparse import urlparse, urljoin
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class Keep2shareCC(SimpleHoster):
+ __name__ = "Keep2shareCC"
+ __type__ = "hoster"
+ __version__ = "0.10"
+
+ __pattern__ = r'https?://(?:www\.)?(keep2share|k2s|keep2s)\.cc/file/(?P<ID>\w+)'
+
+ __description__ = """Keep2share.cc hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ FILE_NAME_PATTERN = r'File: <span>(?P<N>.+)</span>'
+ FILE_SIZE_PATTERN = r'Size: (?P<S>[^<]+)</div>'
+ OFFLINE_PATTERN = r'File not found or deleted|Sorry, this file is blocked or deleted|Error 404'
+
+ LINK_PATTERN = r'To download this file with slow speed, use <a href="([^"]+)">this link</a>'
+ WAIT_PATTERN = r'Please wait ([\d:]+) to download this file'
+ ALREADY_DOWNLOADING_PATTERN = r'Free account does not allow to download more than one file at the same time'
+
+ RECAPTCHA_KEY = "6LcYcN0SAAAAABtMlxKj7X0hRxOY8_2U86kI1vbb"
+
+
+ def handleFree(self):
+ self.sanitize_url()
+ self.html = self.load(self.pyfile.url)
+
+ self.fid = re.search(r'<input type="hidden" name="slow_id" value="([^"]+)">', self.html).group(1)
+ self.html = self.load(self.pyfile.url, post={'yt0': '', 'slow_id': self.fid})
+
+ m = re.search(r"function download\(\){.*window\.location\.href = '([^']+)';", self.html, re.DOTALL)
+ if m: # Direct mode
+ self.startDownload(m.group(1))
+ else:
+ self.handleCaptcha()
+
+ self.wait(30)
+
+ self.html = self.load(self.pyfile.url, post={'uniqueId': self.fid, 'free': 1})
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.logDebug('Hoster told us to wait for %s' % m.group(1))
+ # string to time convert courtesy of https://stackoverflow.com/questions/10663720
+ ftr = [3600, 60, 1]
+ wait_time = sum([a * b for a, b in zip(ftr, map(int, m.group(1).split(':')))])
+ self.wait(wait_time, reconnect=True)
+ self.retry()
+
+ m = re.search(self.ALREADY_DOWNLOADING_PATTERN, self.html)
+ if m:
+ # if someone is already downloading on our line, wait 30min and retry
+ self.logDebug('Already downloading, waiting for 30 minutes')
+ self.wait(30 * 60, reconnect=True)
+ self.retry()
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("Unable to detect direct link")
+ self.startDownload(m.group(1))
+
+ def handleCaptcha(self):
+ recaptcha = ReCaptcha(self)
+ for _ in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response,
+ 'CaptchaForm%5Bcode%5D': '',
+ 'free': 1,
+ 'freeDownloadRequest': 1,
+ 'uniqueId': self.fid,
+ 'yt0': ''}
+
+ self.html = self.load(self.pyfile.url, post=post_data)
+
+ if 'recaptcha' not in self.html:
+ self.correctCaptcha()
+ break
+ else:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+ else:
+ self.fail("All captcha attempts failed")
+
+ def startDownload(self, url):
+ d = urljoin(self.base_url, url)
+ self.logDebug('Direct Link: ' + d)
+ self.download(d, disposition=True)
+
+ def sanitize_url(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header:
+ self.pyfile.url = header['location']
+ p = urlparse(self.pyfile.url)
+ self.base_url = "%s://%s" % (p.scheme, p.hostname)
+
+
+getInfo = create_getInfo(Keep2shareCC)
diff --git a/pyload/plugins/hoster/LemUploadsCom.py b/pyload/plugins/hoster/LemUploadsCom.py
new file mode 100644
index 000000000..8556e3c9c
--- /dev/null
+++ b/pyload/plugins/hoster/LemUploadsCom.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# BigBuckBunny_320x180.mp4 - 61.7 Mb - http://lemuploads.com/uwol0aly9dld
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class LemUploadsCom(XFileSharingPro):
+ __name__ = "LemUploadsCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?lemuploads.com/\w{12}'
+
+ __description__ = """LemUploads.com hoster plugin"""
+ __author_name__ = "t4skforce"
+ __author_mail__ = "t4skforce1337[AT]gmail[DOT]com"
+
+ HOSTER_NAME = "lemuploads.com"
+
+ OFFLINE_PATTERN = r'<b>File Not Found</b><br><br>'
+ FILE_NAME_PATTERN = r'<b>Password:</b></div>\s*<h2>(?P<N>[^<]+)</h2>'
+
+
+getInfo = create_getInfo(LemUploadsCom)
diff --git a/pyload/plugins/hoster/LetitbitNet.py b/pyload/plugins/hoster/LetitbitNet.py
new file mode 100644
index 000000000..3a8e28a90
--- /dev/null
+++ b/pyload/plugins/hoster/LetitbitNet.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+#
+# API Documentation:
+# http://api.letitbit.net/reg/static/api.pdf
+#
+# Test links:
+# http://letitbit.net/download/07874.0b5709a7d3beee2408bb1f2eefce/random.bin.html
+
+import re
+
+from urllib import urlencode, urlopen
+
+from pyload.common.json_layer import json_loads, json_dumps
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster
+
+
+def api_download_info(url):
+ json_data = ["yw7XQy2v9", ["download/info", {"link": url}]]
+ post_data = urlencode({'r': json_dumps(json_data)})
+ api_rep = urlopen("http://api.letitbit.net/json", data=post_data).read()
+ return json_loads(api_rep)
+
+
+def getInfo(urls):
+ for url in urls:
+ api_rep = api_download_info(url)
+ if api_rep['status'] == 'OK':
+ info = api_rep['data'][0]
+ yield (info['name'], info['size'], 2, url)
+ else:
+ yield (url, 0, 1, url)
+
+
+class LetitbitNet(SimpleHoster):
+ __name__ = "LetitbitNet"
+ __type__ = "hoster"
+ __version__ = "0.24"
+
+ __pattern__ = r'http://(?:www\.)?(letitbit|shareflare).net/download/.*'
+
+ __description__ = """Letitbit.net hoster plugin"""
+ __author_name__ = ("zoidberg", "z00nx")
+ __author_mail__ = ("zoidberg@mujmail.cz", "z00nx0@gmail.com")
+
+ FILE_URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "letitbit.net")]
+
+ HOSTER_NAME = "letitbit.net"
+
+ SECONDS_PATTERN = r'seconds\s*=\s*(\d+);'
+ CAPTCHA_CONTROL_FIELD = r"recaptcha_control_field\s=\s'(?P<value>[^']+)'"
+ RECAPTCHA_KEY = "6Lc9zdMSAAAAAF-7s2wuQ-036pLRbM0p8dDaQdAM"
+
+
+ def setup(self):
+ self.resumeDownload = True
+ #TODO confirm that resume works
+
+ def getFileInfo(self):
+ api_rep = api_download_info(self.pyfile.url)
+ if api_rep['status'] == 'OK':
+ self.api_data = api_rep['data'][0]
+ self.pyfile.name = self.api_data['name']
+ self.pyfile.size = self.api_data['size']
+ else:
+ self.offline()
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('id="ifree_form"')
+ if not action:
+ self.parseError("page 1 / ifree_form")
+
+ domain = "http://www." + self.HOSTER_NAME
+ self.pyfile.size = float(inputs['sssize'])
+ self.logDebug(action, inputs)
+ inputs['desc'] = ""
+
+ self.html = self.load(domain + action, post=inputs, cookies=True)
+
+ # action, inputs = self.parseHtmlForm('id="d3_form"')
+ # if not action:
+ # self.parseError("page 2 / d3_form")
+ # self.logDebug(action, inputs)
+ #
+ # self.html = self.load(action, post = inputs, cookies = True)
+ #
+ # try:
+ # ajax_check_url, captcha_url = re.search(self.CHECK_URL_PATTERN, self.html).groups()
+ # m = re.search(self.SECONDS_PATTERN, self.html)
+ # seconds = int(m.group(1)) if m else 60
+ # self.wait(seconds+1)
+ # except Exception, e:
+ # self.logError(e)
+ # self.parseError("page 3 / js")
+
+ m = re.search(self.SECONDS_PATTERN, self.html)
+ seconds = int(m.group(1)) if m else 60
+ self.logDebug("Seconds found", seconds)
+ m = re.search(self.CAPTCHA_CONTROL_FIELD, self.html)
+ recaptcha_control_field = m.group(1)
+ self.logDebug("ReCaptcha control field found", recaptcha_control_field)
+ self.wait(seconds + 1)
+
+ response = self.load("%s/ajax/download3.php" % domain, post=" ", cookies=True)
+ if response != '1':
+ self.parseError('Unknown response - ajax_check_url')
+ self.logDebug(response)
+
+ recaptcha = ReCaptcha(self)
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": response,
+ "recaptcha_control_field": recaptcha_control_field}
+ self.logDebug("Post data to send", post_data)
+ response = self.load('%s/ajax/check_recaptcha.php' % domain, post=post_data, cookies=True)
+ self.logDebug(response)
+ if not response:
+ self.invalidCaptcha()
+ if response == "error_free_download_blocked":
+ self.logWarning("Daily limit reached")
+ self.wait(secondsToMidnight(gmt=2), True)
+ if response == "error_wrong_captcha":
+ self.logError("Wrong Captcha")
+ self.invalidCaptcha()
+ self.retry()
+ elif response.startswith('['):
+ urls = json_loads(response)
+ elif response.startswith('http://'):
+ urls = [response]
+ else:
+ self.parseError("Unknown response - captcha check")
+
+ self.correctCaptcha()
+
+ for download_url in urls:
+ try:
+ self.logDebug("Download URL", download_url)
+ self.download(download_url)
+ break
+ except Exception, e:
+ self.logError(e)
+ else:
+ self.fail("Download did not finish correctly")
+
+ def handlePremium(self):
+ api_key = self.user
+ premium_key = self.account.getAccountData(self.user)['password']
+
+ json_data = [api_key, ["download/direct_links", {"pass": premium_key, "link": self.pyfile.url}]]
+ api_rep = self.load('http://api.letitbit.net/json', post={'r': json_dumps(json_data)})
+ self.logDebug('API Data: ' + api_rep)
+ api_rep = json_loads(api_rep)
+
+ if api_rep['status'] == 'FAIL':
+ self.fail(api_rep['data'])
+
+ direct_link = api_rep['data'][0][0]
+ self.logDebug('Direct Link: ' + direct_link)
+
+ self.download(direct_link, disposition=True)
diff --git a/pyload/plugins/hoster/LinksnappyCom.py b/pyload/plugins/hoster/LinksnappyCom.py
new file mode 100644
index 000000000..aed74d09b
--- /dev/null
+++ b/pyload/plugins/hoster/LinksnappyCom.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urlsplit
+
+from pyload.common.json_layer import json_loads, json_dumps
+from pyload.plugins.Hoster import Hoster
+
+
+class LinksnappyCom(Hoster):
+ __name__ = "LinksnappyCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?linksnappy\.com'
+
+ __description__ = """Linksnappy.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ SINGLE_CHUNK_HOSTERS = ('easybytez.com')
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Linksnappy.com")
+ self.fail("No Linksnappy.com account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ host = self._get_host(pyfile.url)
+ json_params = json_dumps({'link': pyfile.url,
+ 'type': host,
+ 'username': self.user,
+ 'password': self.account.getAccountData(self.user)['password']})
+ r = self.load('http://gen.linksnappy.com/genAPI.php',
+ post={'genLinks': json_params})
+ self.logDebug("JSON data: " + r)
+
+ j = json_loads(r)['links'][0]
+
+ if j['error']:
+ self.logError('Error converting the link: %s' % j['error'])
+ self.fail('Error converting the link')
+
+ pyfile.name = j['filename']
+ new_url = j['generated']
+
+ if host in self.SINGLE_CHUNK_HOSTERS:
+ self.chunkLimit = 1
+ else:
+ self.setup()
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: " + new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"html302": "<title>302 Found</title>"})
+ if check == "html302":
+ self.retry(wait_time=5, reason="Linksnappy returns only HTML data.")
+
+ @staticmethod
+ def _get_host(url):
+ host = urlsplit(url).netloc
+ return re.search(r'[\w-]+\.\w+$', host).group(0)
diff --git a/pyload/plugins/hoster/LoadTo.py b/pyload/plugins/hoster/LoadTo.py
new file mode 100644
index 000000000..bd931f91e
--- /dev/null
+++ b/pyload/plugins/hoster/LoadTo.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://www.load.to/JWydcofUY6/random.bin
+# http://www.load.to/oeSmrfkXE/random100.bin
+
+import re
+
+from pyload.plugins.internal.CaptchaService import SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class LoadTo(SimpleHoster):
+ __name__ = "LoadTo"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'http://(?:www\.)?load\.to/\w+'
+
+ __description__ = """ Load.to hoster plugin """
+ __author_name__ = ("halfman", "stickell")
+ __author_mail__ = ("Pulpan3@gmail.com", "l.stickell@yahoo.it")
+
+ FILE_NAME_PATTERN = r'<h1>(?P<N>.+)</h1>'
+ FILE_SIZE_PATTERN = r'Size: (?P<S>[\d.]+) (?P<U>\w+)'
+ OFFLINE_PATTERN = r'>Can\'t find file'
+
+ LINK_PATTERN = r'<form method="post" action="(.+?)"'
+ WAIT_PATTERN = r'type="submit" value="Download \((\d+)\)"'
+ SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.noscript\?k=([^"]+)'
+
+ FILE_URL_REPLACEMENTS = [(r'(\w)$', r'\1/')]
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ # Search for Download URL
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("Unable to detect download URL")
+
+ download_url = m.group(1)
+
+ # Set Timer - may be obsolete
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.wait(m.group(1))
+
+ # Load.to is using solvemedia captchas since ~july 2014:
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m is None:
+ self.download(download_url)
+ else:
+ captcha_key = m.group(1)
+ solvemedia = SolveMedia(self)
+ captcha_challenge, captcha_response = solvemedia.challenge(captcha_key)
+ self.download(download_url, post={"adcopy_challenge": captcha_challenge, "adcopy_response": captcha_response})
+ check = self.checkDownload({"404": re.compile("\A<h1>404 Not Found</h1>")})
+ if check == "404":
+ self.logWarning("The captcha you entered was incorrect. Please try again.")
+ self.invalidCaptcha()
+ self.retry()
+
+
+getInfo = create_getInfo(LoadTo)
diff --git a/pyload/plugins/hoster/LomafileCom.py b/pyload/plugins/hoster/LomafileCom.py
new file mode 100644
index 000000000..942afa1f4
--- /dev/null
+++ b/pyload/plugins/hoster/LomafileCom.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class LomafileCom(SimpleHoster):
+ __name__ = "LomafileCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'https?://lomafile\.com/.+/[\w\.]+'
+
+ __description__ = """ Lomafile.com hoster plugin """
+ __author_name__ = "nath_schwarz"
+ __author_mail__ = "nathan.notwhite@gmail.com"
+
+ FILE_NAME_PATTERN = r'Filename:[^>]*>(?P<N>[\w\.]+)'
+ FILE_SIZE_PATTERN = r'\((?P<S>\d+)\s(?P<U>\w+)\)'
+ OFFLINE_PATTERN = r'Software error'
+
+
+ def handleFree(self):
+ for _ in range(3):
+ captcha_id = re.search(r'src="http://lomafile\.com/captchas/(?P<id>\w+)\.jpg"', self.html)
+ if not captcha_id:
+ self.parseError("Unable to parse captcha id.")
+ else:
+ captcha_id = captcha_id.group("id")
+
+ form_id = re.search(r'name="id" value="(?P<id>\w+)"', self.html)
+ if not form_id:
+ self.parseError("Unable to parse form id")
+ else:
+ form_id = form_id.group("id")
+
+ captcha = self.decryptCaptcha("http://lomafile.com/captchas/" + captcha_id + ".jpg")
+
+ self.wait(60)
+
+ self.html = self.load(self.pyfile.url, post={
+ "op": "download2",
+ "id": form_id,
+ "rand": captcha_id,
+ "code": captcha,
+ "down_direct": "1"})
+
+ download_url = re.search(r'http://[\d\.]+:\d+/d/\w+/[\w\.]+', self.html)
+ if download_url is None:
+ self.invalidCaptcha()
+ self.logDebug("Invalid captcha.")
+ else:
+ download_url = download_url.group(0)
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+ else:
+ self.fail("Invalid captcha-code entered.")
+
+
+getInfo = create_getInfo(LomafileCom)
diff --git a/pyload/plugins/hoster/LuckyShareNet.py b/pyload/plugins/hoster/LuckyShareNet.py
new file mode 100644
index 000000000..cafddb0a8
--- /dev/null
+++ b/pyload/plugins/hoster/LuckyShareNet.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.lib.bottle import json_loads
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class LuckyShareNet(SimpleHoster):
+ __name__ = "LuckyShareNet"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?luckyshare.net/(?P<ID>\d{10,})'
+
+ __description__ = """LuckyShare.net hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ FILE_INFO_PATTERN = r"<h1 class='file_name'>(?P<N>\S+)</h1>\s*<span class='file_size'>Filesize: (?P<S>[\d.]+)(?P<U>\w+)</span>"
+ OFFLINE_PATTERN = r'There is no such file available'
+ RECAPTCHA_KEY = "6LdivsgSAAAAANWh-d7rPE1mus4yVWuSQIJKIYNw"
+
+
+ def parseJson(self, rep):
+ if 'AJAX Error' in rep:
+ html = self.load(self.pyfile.url, decode=True)
+ m = re.search(r"waitingtime = (\d+);", html)
+ if m:
+ waittime = int(m.group(1))
+ self.logDebug('You have to wait %d seconds between free downloads' % waittime)
+ self.retry(wait_time=waittime)
+ else:
+ self.parseError('Unable to detect wait time between free downloads')
+ elif 'Hash expired' in rep:
+ self.retry(reason="Hash expired")
+ return json_loads(rep)
+
+ # TODO: There should be a filesize limit for free downloads
+ # TODO: Some files could not be downloaded in free mode
+ def handleFree(self):
+ file_id = re.match(self.__pattern__, self.pyfile.url).group('ID')
+ self.logDebug('File ID: ' + file_id)
+ rep = self.load(r"http://luckyshare.net/download/request/type/time/file/" + file_id, decode=True)
+ self.logDebug('JSON: ' + rep)
+ json = self.parseJson(rep)
+
+ self.wait(int(json['time']))
+
+ recaptcha = ReCaptcha(self)
+ for _ in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ rep = self.load(r"http://luckyshare.net/download/verify/challenge/%s/response/%s/hash/%s" %
+ (challenge, response, json['hash']), decode=True)
+ self.logDebug('JSON: ' + rep)
+ if 'link' in rep:
+ json.update(self.parseJson(rep))
+ self.correctCaptcha()
+ break
+ elif 'Verification failed' in rep:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+ else:
+ self.parseError('Unable to get downlaod link')
+
+ if not json['link']:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.logDebug('Direct URL: ' + json['link'])
+ self.download(json['link'])
+
+
+getInfo = create_getInfo(LuckyShareNet)
diff --git a/pyload/plugins/hoster/MediafireCom.py b/pyload/plugins/hoster/MediafireCom.py
new file mode 100644
index 000000000..bbf9f06b6
--- /dev/null
+++ b/pyload/plugins/hoster/MediafireCom.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+from pyload.network.RequestFactory import getURL
+
+
+def replace_eval(js_expr):
+ return js_expr.replace(r'eval("', '').replace(r"\'", r"'").replace(r'\"', r'"')
+
+
+def checkHTMLHeader(url):
+ try:
+ for _ in xrange(3):
+ header = getURL(url, just_header=True)
+ for line in header.splitlines():
+ line = line.lower()
+ if 'location' in line:
+ url = line.split(':', 1)[1].strip()
+ if 'error.php?errno=320' in url:
+ return url, 1
+ if not url.startswith('http://'):
+ url = 'http://www.mediafire.com' + url
+ break
+ elif 'content-disposition' in line:
+ return url, 2
+ else:
+ break
+ except:
+ return url, 3
+
+ return url, 0
+
+
+def getInfo(urls):
+ for url in urls:
+ location, status = checkHTMLHeader(url)
+ if status:
+ file_info = (url, 0, status, url)
+ else:
+ file_info = parseFileInfo(MediafireCom, url, getURL(url, decode=True))
+ yield file_info
+
+
+class MediafireCom(SimpleHoster):
+ __name__ = "MediafireCom"
+ __type__ = "hoster"
+ __version__ = "0.79"
+
+ __pattern__ = r'http://(?:www\.)?mediafire\.com/(file/|(view/?|download.php)?\?)(\w{11}|\w{15})($|/)'
+
+ __description__ = """Mediafire.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'<div class="download_link"[^>]*(?:z-index:(?P<zindex>\d+))?[^>]*>\s*<a href="(?P<href>http://[^"]+)"'
+ JS_KEY_PATTERN = r"DoShow\('mfpromo1'\);[^{]*{((\w+)='';.*?)eval\(\2\);"
+ JS_ZMODULO_PATTERN = r"\('z-index'\)\) \% (\d+)\)\);"
+ SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.noscript\?k=([^"]+)'
+ PAGE1_ACTION_PATTERN = r'<link rel="canonical" href="([^"]+)"/>'
+ PASSWORD_PATTERN = r'<form name="form_password"'
+
+ FILE_NAME_PATTERN = r'<META NAME="description" CONTENT="(?P<N>[^"]+)"/>'
+ FILE_INFO_PATTERN = r"oFileSharePopup\.ald\('(?P<ID>[^']*)','(?P<N>[^']*)','(?P<S>[^']*)','','(?P<sha256>[^']*)'\)"
+ OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>'
+
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+ pyfile.url = re.sub(r'/view/?\?', '/?', pyfile.url)
+
+ self.url, result = checkHTMLHeader(pyfile.url)
+ self.logDebug('Location (%d): %s' % (result, self.url))
+
+ if result == 0:
+ self.html = self.load(self.url, decode=True)
+ self.checkCaptcha()
+ self.multiDL = True
+ self.check_data = self.getFileInfo()
+
+ if self.account:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ elif result == 1:
+ self.offline()
+ else:
+ self.multiDL = True
+ self.download(self.url, disposition=True)
+
+ def handleFree(self):
+ passwords = self.getPassword().splitlines()
+ while self.PASSWORD_PATTERN in self.html:
+ if len(passwords):
+ password = passwords.pop(0)
+ self.logInfo("Password protected link, trying " + password)
+ self.html = self.load(self.url, post={"downloadp": password})
+ else:
+ self.fail("No or incorrect password")
+
+ m = re.search(r'kNO = r"(http://.*?)";', self.html)
+ if m is None:
+ self.parseError("Download URL")
+ download_url = m.group(1)
+ self.logDebug("DOWNLOAD LINK:", download_url)
+
+ self.download(download_url)
+
+ def checkCaptcha(self):
+ for _ in xrange(5):
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ solvemedia = SolveMedia(self)
+ captcha_challenge, captcha_response = solvemedia.challenge(captcha_key)
+ self.html = self.load(self.url, post={"adcopy_challenge": captcha_challenge,
+ "adcopy_response": captcha_response}, decode=True)
+ else:
+ break
+ else:
+ self.fail("No valid recaptcha solution received")
diff --git a/pyload/plugins/hoster/MegaDebridEu.py b/pyload/plugins/hoster/MegaDebridEu.py
new file mode 100644
index 000000000..6c980009e
--- /dev/null
+++ b/pyload/plugins/hoster/MegaDebridEu.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote_plus
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+
+
+class MegaDebridEu(Hoster):
+ __name__ = "MegaDebridEu"
+ __type__ = "hoster"
+ __version__ = "0.4"
+
+ __pattern__ = r'^https?://(?:w{3}\d+\.mega-debrid.eu|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/download/file/[^/]+/.+$'
+
+ __description__ = """mega-debrid.eu hoster plugin"""
+ __author_name__ = "D.Ducatel"
+ __author_mail__ = "dducatel@je-geek.fr"
+
+ API_URL = "https://www.mega-debrid.eu/api.php"
+
+
+ def getFilename(self, url):
+ try:
+ return unquote_plus(url.rsplit("/", 1)[1])
+ except IndexError:
+ return ""
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.exitOnFail(_("Please enter your %s account or deactivate this plugin") % "Mega-debrid.eu")
+ else:
+ if not self.connectToApi():
+ self.exitOnFail(_("Unable to connect to %s") % "Mega-debrid.eu")
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+ new_url = self.debridLink(pyfile.url)
+ self.logDebug("New URL: " + new_url)
+
+ filename = self.getFilename(new_url)
+ if filename != "":
+ pyfile.name = filename
+ self.download(new_url, disposition=True)
+
+ def connectToApi(self):
+ """
+ Connexion to the mega-debrid API
+ Return True if succeed
+ """
+ user, data = self.account.selectAccount()
+ jsonResponse = self.load(self.API_URL,
+ get={'action': 'connectUser', 'login': user, 'password': data['password']})
+ response = json_loads(jsonResponse)
+
+ if response['response_code'] == "ok":
+ self.token = response['token']
+ return True
+ else:
+ return False
+
+ def debridLink(self, linkToDebrid):
+ """
+ Debrid a link
+ Return The debrided link if succeed or original link if fail
+ """
+ jsonResponse = self.load(self.API_URL, get={'action': 'getLink', 'token': self.token},
+ post={"link": linkToDebrid})
+ response = json_loads(jsonResponse)
+
+ if response['response_code'] == "ok":
+ debridedLink = response['debridLink'][1:-1]
+ return debridedLink
+ else:
+ self.exitOnFail("Unable to debrid %s" % linkToDebrid)
+
+ def exitOnFail(self, msg):
+ """
+ exit the plugin on fail case
+ And display the reason of this failure
+ """
+ if self.getConfig("unloadFailing"):
+ self.logError(msg)
+ self.resetAccount()
+ else:
+ self.fail(msg)
diff --git a/pyload/plugins/hoster/MegaFilesSe.py b/pyload/plugins/hoster/MegaFilesSe.py
new file mode 100644
index 000000000..48306cd7f
--- /dev/null
+++ b/pyload/plugins/hoster/MegaFilesSe.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class MegaFilesSe(XFileSharingPro):
+ __name__ = "MegaFilesSe"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?megafiles\.se/\w{12}'
+
+ __description__ = """MegaFiles.se hoster plugin"""
+ __author_name__ = "t4skforce"
+ __author_mail__ = "t4skforce1337[AT]gmail[DOT]com"
+
+ HOSTER_NAME = "megafiles.se"
+
+ OFFLINE_PATTERN = r'<b><font[^>]*>File Not Found</font></b><br><br>'
+ FILE_NAME_PATTERN = r'<div[^>]+>\s*<b>(?P<N>[^<]+)</b>\s*</div>'
+
+
+getInfo = create_getInfo(MegaFilesSe)
diff --git a/pyload/plugins/hoster/MegaNz.py b/pyload/plugins/hoster/MegaNz.py
new file mode 100644
index 000000000..801df9c9d
--- /dev/null
+++ b/pyload/plugins/hoster/MegaNz.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+
+import random
+import re
+
+from Crypto.Cipher import AES
+from Crypto.Util import Counter
+from array import array
+from base64 import standard_b64decode
+from os import remove
+
+from pyload.common.json_layer import json_loads, json_dumps
+from pyload.plugins.Hoster import Hoster
+
+
+class MegaNz(Hoster):
+ __name__ = "MegaNz"
+ __type__ = "hoster"
+ __version__ = "0.14"
+
+ __pattern__ = r'https?://([a-z0-9]+\.)?mega\.co\.nz/#!([a-zA-Z0-9!_\-]+)'
+
+ __description__ = """Mega.co.nz hoster plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "ranan@pyload.org"
+
+ API_URL = "https://g.api.mega.co.nz/cs?id=%d"
+ FILE_SUFFIX = ".crypted"
+
+
+ def b64_decode(self, data):
+ data = data.replace("-", "+").replace("_", "/")
+ return standard_b64decode(data + '=' * (-len(data) % 4))
+
+ def getCipherKey(self, key):
+ """ Construct the cipher key from the given data """
+ a = array("I", key)
+ key_array = array("I", [a[0] ^ a[4], a[1] ^ a[5], a[2] ^ a[6], a[3] ^ a[7]])
+ return key_array
+
+ def callApi(self, **kwargs):
+ """ Dispatch a call to the api, see https://mega.co.nz/#developers """
+ # generate a session id, no idea where to obtain elsewhere
+ uid = random.randint(10 << 9, 10 ** 10)
+
+ resp = self.load(self.API_URL % uid, post=json_dumps([kwargs]))
+ self.logDebug("Api Response: " + resp)
+ return json_loads(resp)
+
+ def decryptAttr(self, data, key):
+
+ cbc = AES.new(self.getCipherKey(key), AES.MODE_CBC, "\0" * 16)
+ attr = cbc.decrypt(self.b64_decode(data))
+ self.logDebug("Decrypted Attr: " + attr)
+ if not attr.startswith("MEGA"):
+ self.fail(_("Decryption failed"))
+
+ # Data is padded, 0-bytes must be stripped
+ return json_loads(attr.replace("MEGA", "").rstrip("\0").strip())
+
+ def decryptFile(self, key):
+ """ Decrypts the file at lastDownload` """
+
+ # upper 64 bit of counter start
+ n = key[16:24]
+
+ # convert counter to long and shift bytes
+ ctr = Counter.new(128, initial_value=long(n.encode("hex"), 16) << 64)
+ cipher = AES.new(self.getCipherKey(key), AES.MODE_CTR, counter=ctr)
+
+ self.pyfile.setStatus("decrypting")
+
+ file_crypted = self.lastDownload
+ file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]
+ f = open(file_crypted, "rb")
+ df = open(file_decrypted, "wb")
+
+ # TODO: calculate CBC-MAC for checksum
+
+ size = 2 ** 15 # buffer size, 32k
+ while True:
+ buf = f.read(size)
+ if not buf:
+ break
+
+ df.write(cipher.decrypt(buf))
+
+ f.close()
+ df.close()
+ remove(file_crypted)
+
+ self.lastDownload = file_decrypted
+
+ def process(self, pyfile):
+
+ key = None
+
+ # match is guaranteed because plugin was chosen to handle url
+ node = re.match(self.__pattern__, pyfile.url).group(2)
+ if "!" in node:
+ node, key = node.split("!")
+
+ self.logDebug("File id: %s | Key: %s" % (node, key))
+
+ if not key:
+ self.fail(_("No file key provided in the URL"))
+
+ # g is for requesting a download url
+ # this is similar to the calls in the mega js app, documentation is very bad
+ dl = self.callApi(a="g", g=1, p=node, ssl=1)[0]
+
+ if "e" in dl:
+ e = dl['e']
+ # ETEMPUNAVAIL (-18): Resource temporarily not available, please try again later
+ if e == -18:
+ self.retry()
+ else:
+ self.fail(_("Error code:") + e)
+
+ # TODO: map other error codes, e.g
+ # EACCESS (-11): Access violation (e.g., trying to write to a read-only share)
+
+ key = self.b64_decode(key)
+ attr = self.decryptAttr(dl['at'], key)
+
+ pyfile.name = attr['n'] + self.FILE_SUFFIX
+
+ self.download(dl['g'])
+ self.decryptFile(key)
+
+ # Everything is finished and final name can be set
+ pyfile.name = attr['n']
diff --git a/pyload/plugins/hoster/MegacrypterCom.py b/pyload/plugins/hoster/MegacrypterCom.py
new file mode 100644
index 000000000..0cc17bfe3
--- /dev/null
+++ b/pyload/plugins/hoster/MegacrypterCom.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads, json_dumps
+from pyload.plugins.hoster.MegaNz import MegaNz
+
+
+class MegacrypterCom(MegaNz):
+ __name__ = "MegacrypterCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'(https?://[a-z0-9]{0,10}\.?megacrypter\.com/[a-zA-Z0-9!_\-]+)'
+
+ __description__ = """Megacrypter.com decrypter plugin"""
+ __author_name__ = "GonzaloSR"
+ __author_mail__ = "gonzalo@gonzalosr.com"
+
+ API_URL = "http://megacrypter.com/api"
+ FILE_SUFFIX = ".crypted"
+
+
+ def callApi(self, **kwargs):
+ """ Dispatch a call to the api, see megacrypter.com/api_doc """
+ self.logDebug("JSON request: " + json_dumps(kwargs))
+ resp = self.load(self.API_URL, post=json_dumps(kwargs))
+ self.logDebug("API Response: " + resp)
+ return json_loads(resp)
+
+ def process(self, pyfile):
+ # match is guaranteed because plugin was chosen to handle url
+ node = re.match(self.__pattern__, pyfile.url).group(1)
+
+ # get Mega.co.nz link info
+ info = self.callApi(link=node, m="info")
+
+ # get crypted file URL
+ dl = self.callApi(link=node, m="dl")
+
+ # TODO: map error codes, implement password protection
+ # if info['pass'] is True:
+ # crypted_file_key, md5_file_key = info['key'].split("#")
+
+ key = self.b64_decode(info['key'])
+
+ pyfile.name = info['name'] + self.FILE_SUFFIX
+
+ self.download(dl['url'])
+ self.decryptFile(key)
+
+ # Everything is finished and final name can be set
+ pyfile.name = info['name']
diff --git a/pyload/plugins/hoster/MegareleaseOrg.py b/pyload/plugins/hoster/MegareleaseOrg.py
new file mode 100644
index 000000000..6a689b6dd
--- /dev/null
+++ b/pyload/plugins/hoster/MegareleaseOrg.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class MegareleaseOrg(XFileSharingPro):
+ __name__ = "MegareleaseOrg"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?megarelease.org/\w{12}'
+
+ __description__ = """Megarelease.org hoster plugin"""
+ __author_name__ = ("derek3x", "stickell")
+ __author_mail__ = ("derek3x@vmail.me", "l.stickell@yahoo.it")
+
+ HOSTER_NAME = "megarelease.org"
+
+ FILE_INFO_PATTERN = r'<font color="red">%s/(?P<N>.+)</font> \((?P<S>[^)]+)\)</font>' % __pattern__
+
+
+getInfo = create_getInfo(MegareleaseOrg)
diff --git a/pyload/plugins/hoster/MegasharesCom.py b/pyload/plugins/hoster/MegasharesCom.py
new file mode 100644
index 000000000..36e13a531
--- /dev/null
+++ b/pyload/plugins/hoster/MegasharesCom.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class MegasharesCom(SimpleHoster):
+ __name__ = "MegasharesCom"
+ __type__ = "hoster"
+ __version__ = "0.24"
+
+ __pattern__ = r'http://(?:www\.)?megashares.com/.*'
+
+ __description__ = """Megashares.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<h1 class="black xxl"[^>]*title="(?P<N>[^"]+)">'
+ FILE_SIZE_PATTERN = r'<strong><span class="black">Filesize:</span></strong> (?P<S>[0-9.]+) (?P<U>[kKMG])i?B<br />'
+ OFFLINE_PATTERN = r'<dd class="red">(Invalid Link Request|Link has been deleted)'
+
+ LINK_PATTERN = r'<div id="show_download_button_%d"[^>]*>\s*<a href="([^"]+)">'
+ PASSPORT_LEFT_PATTERN = r'Your Download Passport is: <[^>]*>(\w+).*\s*You have\s*<[^>]*>\s*([0-9.]+) ([kKMG]i?B)'
+ PASSPORT_RENEW_PATTERN = r'Your download passport will renew in\s*<strong>(\d+)</strong>:<strong>(\d+)</strong>:<strong>(\d+)</strong>'
+ REACTIVATE_NUM_PATTERN = r'<input[^>]*id="random_num" value="(\d+)" />'
+ REACTIVATE_PASSPORT_PATTERN = r'<input[^>]*id="passport_num" value="(\w+)" />'
+ REQUEST_URI_PATTERN = r'var request_uri = "([^"]+)";'
+ NO_SLOTS_PATTERN = r'<dd class="red">All download slots for this link are currently filled'
+
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = self.premium
+
+ def handlePremium(self):
+ self.handleDownload(True)
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ if self.NO_SLOTS_PATTERN in self.html:
+ self.retry(wait_time=5 * 60)
+
+ self.getFileInfo()
+ # if self.pyfile.size > 576716800:
+ # self.fail("This file is too large for free download")
+
+ # Reactivate passport if needed
+ m = re.search(self.REACTIVATE_PASSPORT_PATTERN, self.html)
+ if m:
+ passport_num = m.group(1)
+ request_uri = re.search(self.REQUEST_URI_PATTERN, self.html).group(1)
+
+ for _ in xrange(5):
+ random_num = re.search(self.REACTIVATE_NUM_PATTERN, self.html).group(1)
+
+ verifyinput = self.decryptCaptcha(
+ "http://d01.megashares.com/index.php?secgfx=gfx&random_num=%s" % random_num)
+ self.logInfo("Reactivating passport %s: %s %s" % (passport_num, random_num, verifyinput))
+
+ url = ("http://d01.megashares.com%s&rs=check_passport_renewal" % request_uri +
+ "&rsargs[]=%s&rsargs[]=%s&rsargs[]=%s" % (verifyinput, random_num, passport_num) +
+ "&rsargs[]=replace_sec_pprenewal&rsrnd=%s" % str(int(time() * 1000)))
+ self.logDebug(url)
+ response = self.load(url)
+
+ if 'Thank you for reactivating your passport.' in response:
+ self.correctCaptcha()
+ self.retry()
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail("Failed to reactivate passport")
+
+ # Check traffic left on passport
+ m = re.search(self.PASSPORT_LEFT_PATTERN, self.html)
+ if m is None:
+ self.fail('Passport not found')
+ self.logInfo("Download passport: %s" % m.group(1))
+ data_left = float(m.group(2)) * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[m.group(3)]
+ self.logInfo("Data left: %s %s (%d MB needed)" % (m.group(2), m.group(3), self.pyfile.size / 1048576))
+
+ if not data_left:
+ m = re.search(self.PASSPORT_RENEW_PATTERN, self.html)
+ renew = m.group(1) + m.group(2) + m.group(3) * 60 * 60 if m else 10 * 60
+ self.retry(max_tries=15, wait_time=renew, reason="Unable to get passport")
+
+ self.handleDownload(False)
+
+ def handleDownload(self, premium=False):
+ # Find download link;
+ m = re.search(self.LINK_PATTERN % (1 if premium else 2), self.html)
+ msg = '%s download URL' % ('Premium' if premium else 'Free')
+ if m is None:
+ self.parseError(msg)
+
+ download_url = m.group(1)
+ self.logDebug("%s: %s" % (msg, download_url))
+ self.download(download_url)
+
+
+getInfo = create_getInfo(MegasharesCom)
diff --git a/pyload/plugins/hoster/MovReelCom.py b/pyload/plugins/hoster/MovReelCom.py
new file mode 100644
index 000000000..3f97d3fca
--- /dev/null
+++ b/pyload/plugins/hoster/MovReelCom.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class MovReelCom(XFileSharingPro):
+ __name__ = "MovReelCom"
+ __type__ = "hoster"
+ __version__ = "1.20"
+
+ __pattern__ = r'http://(?:www\.)?movreel.com/.*'
+
+ __description__ = """MovReel.com hoster plugin"""
+ __author_name__ = "JorisV83"
+ __author_mail__ = "jorisv83-pyload@yahoo.com"
+
+ HOSTER_NAME = "movreel.com"
+
+ FILE_INFO_PATTERN = r'<h3>(?P<N>.+?) <small><sup>(?P<S>[\d.]+) (?P<U>..)</sup> </small></h3>'
+ OFFLINE_PATTERN = r'<b>File Not Found</b><br><br>'
+ LINK_PATTERN = r'<a href="(http://[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*/.*)">Download Link</a>'
+
+
+getInfo = create_getInfo(MovReelCom)
diff --git a/pyload/plugins/hoster/MultiDebridCom.py b/pyload/plugins/hoster/MultiDebridCom.py
new file mode 100644
index 000000000..765022eef
--- /dev/null
+++ b/pyload/plugins/hoster/MultiDebridCom.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+
+
+class MultiDebridCom(Hoster):
+ __name__ = "MultiDebridCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/dl/'
+
+ __description__ = """Multi-debrid.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Multi-debrid.com")
+ self.fail("No Multi-debrid.com account provided")
+ else:
+ self.logDebug("Original URL: %s" % pyfile.url)
+ page = self.req.load('http://multi-debrid.com/api.php',
+ get={'user': self.user, 'pass': self.account.getAccountData(self.user)['password'],
+ 'link': pyfile.url})
+ self.logDebug("JSON data: " + page)
+ page = json_loads(page)
+ if page['status'] != 'ok':
+ self.fail('Unable to unrestrict link')
+ new_url = page['link']
+
+ if new_url != pyfile.url:
+ self.logDebug("Unrestricted URL: " + new_url)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/MultishareCz.py b/pyload/plugins/hoster/MultishareCz.py
new file mode 100644
index 000000000..819478659
--- /dev/null
+++ b/pyload/plugins/hoster/MultishareCz.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class MultishareCz(SimpleHoster):
+ __name__ = "MultishareCz"
+ __type__ = "hoster"
+ __version__ = "0.34"
+
+ __pattern__ = r'http://(?:www\.)?multishare.cz/stahnout/(?P<ID>\d+).*'
+
+ __description__ = """MultiShare.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = ur'(?:<li>Název|Soubor): <strong>(?P<N>[^<]+)</strong><(?:/li><li|br)>Velikost: <strong>(?P<S>[^<]+)</strong>'
+ OFFLINE_PATTERN = ur'<h1>Stáhnout soubor</h1><p><strong>PoşadovanÜ soubor neexistuje.</strong></p>'
+ FILE_SIZE_REPLACEMENTS = [('&nbsp;', '')]
+
+
+ def process(self, pyfile):
+ msurl = re.match(self.__pattern__, pyfile.url)
+ if msurl:
+ self.fileID = msurl.group('ID')
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ else:
+ self.handleOverriden()
+
+ def handleFree(self):
+ self.download("http://www.multishare.cz/html/download_free.php?ID=%s" % self.fileID)
+
+ def handlePremium(self):
+ if not self.checkCredit():
+ self.logWarning("Not enough credit left to download file")
+ self.resetAccount()
+
+ self.download("http://www.multishare.cz/html/download_premium.php?ID=%s" % self.fileID)
+
+ def handleOverriden(self):
+ if not self.premium:
+ self.fail("Only premium users can download from other hosters")
+
+ self.html = self.load('http://www.multishare.cz/html/mms_ajax.php', post={"link": self.pyfile.url}, decode=True)
+ self.getFileInfo()
+
+ if not self.checkCredit():
+ self.fail("Not enough credit left to download file")
+
+ url = "http://dl%d.mms.multishare.cz/html/mms_process.php" % round(random() * 10000 * random())
+ params = {"u_ID": self.acc_info['u_ID'], "u_hash": self.acc_info['u_hash'], "link": self.pyfile.url}
+ self.logDebug(url, params)
+ self.download(url, get=params)
+
+ def checkCredit(self):
+ self.acc_info = self.account.getAccountInfo(self.user, True)
+ self.logInfo("User %s has %i MB left" % (self.user, self.acc_info['trafficleft'] / 1024))
+
+ return self.pyfile.size / 1024 <= self.acc_info['trafficleft']
+
+
+getInfo = create_getInfo(MultishareCz)
diff --git a/pyload/plugins/hoster/MyvideoDe.py b/pyload/plugins/hoster/MyvideoDe.py
new file mode 100644
index 000000000..4ce75b4a2
--- /dev/null
+++ b/pyload/plugins/hoster/MyvideoDe.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+from pyload.unescape import unescape
+
+
+class MyvideoDe(Hoster):
+ __name__ = "MyvideoDe"
+ __type__ = "hoster"
+ __version__ = "0.9"
+
+ __pattern__ = r'http://(?:www\.)?myvideo.de/watch/'
+
+ __description__ = """Myvideo.de hoster plugin"""
+ __author_name__ = "spoob"
+ __author_mail__ = "spoob@pyload.org"
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.download_html()
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ self.html = self.load(self.pyfile.url)
+
+ def get_file_url(self):
+ videoId = re.search(r"addVariable\('_videoid','(.*)'\);p.addParam\('quality'", self.html).group(1)
+ videoServer = re.search("rel='image_src' href='(.*)thumbs/.*' />", self.html).group(1)
+ file_url = videoServer + videoId + ".flv"
+ return file_url
+
+ def get_file_name(self):
+ file_name_pattern = r"<h1 class='globalHd'>(.*)</h1>"
+ return unescape(re.search(file_name_pattern, self.html).group(1).replace("/", "") + '.flv')
+
+ def file_exists(self):
+ self.download_html()
+ self.load(str(self.pyfile.url), cookies=False, just_header=True)
+ if self.req.lastEffectiveURL == "http://www.myvideo.de/":
+ return False
+ return True
diff --git a/pyload/plugins/hoster/NarodRu.py b/pyload/plugins/hoster/NarodRu.py
new file mode 100644
index 000000000..22c0ba908
--- /dev/null
+++ b/pyload/plugins/hoster/NarodRu.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class NarodRu(SimpleHoster):
+ __name__ = "NarodRu"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?narod(\.yandex)?\.ru/(disk|start/[0-9]+\.\w+-narod\.yandex\.ru)/(?P<ID>\d+)/.+'
+
+ __description__ = """Narod.ru hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<dt class="name">(?:<[^<]*>)*(?P<N>[^<]+)</dt>'
+ FILE_SIZE_PATTERN = r'<dd class="size">(?P<S>\d[^<]*)</dd>'
+ OFFLINE_PATTERN = r'<title>404</title>|Ѐайл уЎалеМ с сервОса|ЗакПМчОлся срПк храМеМОя файла\.'
+
+ FILE_SIZE_REPLACEMENTS = [(u'КБ', 'KB'), (u'МБ', 'MB'), (u'ГБ', 'GB')]
+ FILE_URL_REPLACEMENTS = [("narod.yandex.ru/", "narod.ru/"),
+ (r"/start/[0-9]+\.\w+-narod\.yandex\.ru/([0-9]{6,15})/\w+/(\w+)", r"/disk/\1/\2")]
+
+ CAPTCHA_PATTERN = r'<number url="(.*?)">(\w+)</number>'
+ LINK_PATTERN = r'<a class="h-link" rel="yandex_bar" href="(.+?)">'
+
+
+ def handleFree(self):
+ for _ in xrange(5):
+ self.html = self.load('http://narod.ru/disk/getcapchaxml/?rnd=%d' % int(random() * 777))
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.parseError('Captcha')
+ post_data = {"action": "sendcapcha"}
+ captcha_url, post_data['key'] = m.groups()
+ post_data['rep'] = self.decryptCaptcha(captcha_url)
+
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ url = 'http://narod.ru' + m.group(1)
+ self.correctCaptcha()
+ break
+ elif u'<b class="error-msg"><strong>ОшОблОсь?</strong>' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.parseError('Download link')
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.logDebug('Download link: ' + url)
+ self.download(url)
+
+
+getInfo = create_getInfo(NarodRu)
diff --git a/pyload/plugins/hoster/NetloadIn.py b/pyload/plugins/hoster/NetloadIn.py
new file mode 100644
index 000000000..949b1aa92
--- /dev/null
+++ b/pyload/plugins/hoster/NetloadIn.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import sleep, time
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+
+
+def getInfo(urls):
+ ## returns list of tupels (name, size (in bytes), status (see FileDatabase), url)
+
+ apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id="
+ id_regex = re.compile(NetloadIn.__pattern__)
+ urls_per_query = 80
+
+ for chunk in chunks(urls, urls_per_query):
+ ids = ""
+ for url in chunk:
+ match = id_regex.search(url)
+ if match:
+ ids = ids + match.group(1) + ";"
+
+ api = getURL(apiurl + ids, decode=True)
+
+ if api is None or len(api) < 10:
+ print "Netload prefetch: failed "
+ return
+ if api.find("unknown_auth") >= 0:
+ print "Netload prefetch: Outdated auth code "
+ return
+
+ result = []
+
+ for i, r in enumerate(api.splitlines()):
+ try:
+ tmp = r.split(";")
+ try:
+ size = int(tmp[2])
+ except:
+ size = 0
+ result.append((tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i]))
+ except:
+ print "Netload prefetch: Error while processing response: "
+ print r
+
+ yield result
+
+
+class NetloadIn(Hoster):
+ __name__ = "NetloadIn"
+ __type__ = "hoster"
+ __version__ = "0.45"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?netload\.in/(?:datei(.*?)(?:\.htm|/)|index.php?id=10&file_id=)'
+
+ __description__ = """Netload.in hoster plugin"""
+ __author_name__ = ("spoob", "RaNaN", "Gregy")
+ __author_mail__ = ("spoob@pyload.org", "ranan@pyload.org", "gregy@gregy.cz")
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+
+ def process(self, pyfile):
+ self.url = pyfile.url
+ self.prepare()
+ pyfile.setStatus("downloading")
+ self.proceed(self.url)
+
+ def prepare(self):
+ self.download_api_data()
+
+ if self.api_data and self.api_data['filename']:
+ self.pyfile.name = self.api_data['filename']
+
+ if self.premium:
+ self.logDebug("Netload: Use Premium Account")
+ settings = self.load("http://www.netload.in/index.php?id=2&lang=en")
+ if '<option value="2" selected="selected">Direkter Download' in settings:
+ self.logDebug("Using direct download")
+ return True
+ else:
+ self.logDebug("Direct downloads not enabled. Parsing html for a download URL")
+
+ if self.download_html():
+ return True
+ else:
+ self.fail("Failed")
+ return False
+
+ def download_api_data(self, n=0):
+ url = self.url
+ id_regex = re.compile(self.__pattern__)
+ match = id_regex.search(url)
+
+ if match:
+ #normalize url
+ self.url = 'http://www.netload.in/datei%s.htm' % match.group(1)
+ self.logDebug("URL: %s" % self.url)
+ else:
+ self.api_data = False
+ return
+
+ apiurl = "http://api.netload.in/info.php"
+ src = self.load(apiurl, cookies=False,
+ get={"file_id": match.group(1), "auth": "Zf9SnQh9WiReEsb18akjvQGqT0I830e8", "bz": "1",
+ "md5": "1"}, decode=True).strip()
+ if not src and n <= 3:
+ sleep(0.2)
+ self.download_api_data(n + 1)
+ return
+
+ self.logDebug("Netload: APIDATA: " + src)
+ self.api_data = {}
+ if src and ";" in src and src not in ("unknown file_data", "unknown_server_data", "No input file specified."):
+ lines = src.split(";")
+ self.api_data['exists'] = True
+ self.api_data['fileid'] = lines[0]
+ self.api_data['filename'] = lines[1]
+ self.api_data['size'] = lines[2]
+ self.api_data['status'] = lines[3]
+ if self.api_data['status'] == "online":
+ self.api_data['checksum'] = lines[4].strip()
+ else:
+ self.api_data = False # check manually since api data is useless sometimes
+
+ if lines[0] == lines[1] and lines[2] == "0": # useless api data
+ self.api_data = False
+ else:
+ self.api_data = False
+
+ def final_wait(self, page):
+ wait_time = self.get_wait_time(page)
+ self.setWait(wait_time)
+ self.logDebug("Netload: final wait %d seconds" % wait_time)
+ self.wait()
+ self.url = self.get_file_url(page)
+
+ def download_html(self):
+ self.logDebug("Netload: Entering download_html")
+ page = self.load(self.url, decode=True)
+ t = time() + 30
+
+ if "/share/templates/download_hddcrash.tpl" in page:
+ self.logError("Netload HDD Crash")
+ self.fail(_("File temporarily not available"))
+
+ if not self.api_data:
+ self.logDebug("API Data may be useless, get details from html page")
+
+ if "* The file was deleted" in page:
+ self.offline()
+
+ name = re.search(r'class="dl_first_filename">([^<]+)', page, re.MULTILINE)
+ # the found filename is not truncated
+ if name:
+ name = name.group(1).strip()
+ if not name.endswith(".."):
+ self.pyfile.name = name
+
+ captchawaited = False
+ for i in xrange(10):
+
+ if not page:
+ page = self.load(self.url)
+ t = time() + 30
+
+ if "/share/templates/download_hddcrash.tpl" in page:
+ self.logError("Netload HDD Crash")
+ self.fail(_("File temporarily not available"))
+
+ self.logDebug("Netload: try number %d " % i)
+
+ if ">Your download is being prepared.<" in page:
+ self.logDebug("Netload: We will prepare your download")
+ self.final_wait(page)
+ return True
+ if ">An access request has been made from IP address <" in page:
+ wait = self.get_wait_time(page)
+ if not wait:
+ self.logDebug("Netload: Wait was 0 setting 30")
+ wait = 30 * 60
+ self.logInfo(_("Netload: waiting between downloads %d s." % wait))
+ self.wantReconnect = True
+ self.setWait(wait)
+ self.wait()
+
+ return self.download_html()
+
+ self.logDebug("Netload: Trying to find captcha")
+
+ try:
+ url_captcha_html = "http://netload.in/" + re.search('(index.php\?id=10&amp;.*&amp;captcha=1)',
+ page).group(1).replace("amp;", "")
+ except:
+ page = None
+ continue
+
+ try:
+ page = self.load(url_captcha_html, cookies=True)
+ captcha_url = "http://netload.in/" + re.search('(share/includes/captcha.php\?t=\d*)', page).group(1)
+ except:
+ self.logDebug("Netload: Could not find captcha, try again from beginning")
+ captchawaited = False
+ continue
+
+ file_id = re.search('<input name="file_id" type="hidden" value="(.*)" />', page).group(1)
+ if not captchawaited:
+ wait = self.get_wait_time(page)
+ if i == 0:
+ self.pyfile.waitUntil = time() # dont wait contrary to time on website
+ else:
+ self.pyfile.waitUntil = t
+ self.logInfo(_("Netload: waiting for captcha %d s.") % (self.pyfile.waitUntil - time()))
+ #self.setWait(wait)
+ self.wait()
+ captchawaited = True
+
+ captcha = self.decryptCaptcha(captcha_url)
+ page = self.load("http://netload.in/index.php?id=10", post={"file_id": file_id, "captcha_check": captcha},
+ cookies=True)
+
+ return False
+
+ def get_file_url(self, page):
+ try:
+ file_url_pattern = r"<a class=\"Orange_Link\" href=\"(http://.+)\".?>Or click here"
+ attempt = re.search(file_url_pattern, page)
+ if attempt is not None:
+ return attempt.group(1)
+ else:
+ self.logDebug("Netload: Backup try for final link")
+ file_url_pattern = r"<a href=\"(.+)\" class=\"Orange_Link\">Click here"
+ attempt = re.search(file_url_pattern, page)
+ return "http://netload.in/" + attempt.group(1)
+ except:
+ self.logDebug("Netload: Getting final link failed")
+ return None
+
+ def get_wait_time(self, page):
+ wait_seconds = int(re.search(r"countdown\((.+),'change\(\)'\)", page).group(1)) / 100
+ return wait_seconds
+
+ def proceed(self, url):
+ self.logDebug("Netload: Downloading..")
+
+ self.download(url, disposition=True)
+
+ check = self.checkDownload({"empty": re.compile(r"^$"), "offline": re.compile("The file was deleted")})
+
+ if check == "empty":
+ self.logInfo(_("Downloaded File was empty"))
+ self.retry()
+ elif check == "offline":
+ self.offline()
diff --git a/pyload/plugins/hoster/NosuploadCom.py b/pyload/plugins/hoster/NosuploadCom.py
new file mode 100644
index 000000000..e4feabdd0
--- /dev/null
+++ b/pyload/plugins/hoster/NosuploadCom.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class NosuploadCom(XFileSharingPro):
+ __name__ = "NosuploadCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?nosupload\.com/\?d=\w{12}'
+
+ __description__ = """Nosupload.com hoster plugin"""
+ __author_name__ = "igel"
+ __author_mail__ = "igelkun@myopera.com"
+
+ HOSTER_NAME = "nosupload.com"
+
+ FILE_SIZE_PATTERN = r'<p><strong>Size:</strong> (?P<S>[0-9\.]+) (?P<U>[kKMG]?B)</p>'
+ LINK_PATTERN = r'<a class="select" href="(http://.+?)">Download</a>'
+ WAIT_PATTERN = r'Please wait.*?>(\d+)</span>'
+
+
+ def getDownloadLink(self):
+ # stage1: press the "Free Download" button
+ data = self.getPostParameters()
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+
+ # stage2: wait some time and press the "Download File" button
+ data = self.getPostParameters()
+ wait_time = re.search(self.WAIT_PATTERN, self.html, re.MULTILINE | re.DOTALL).group(1)
+ self.logDebug("hoster told us to wait %s seconds" % wait_time)
+ self.wait(wait_time)
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+
+ # stage3: get the download link
+ return re.search(self.LINK_PATTERN, self.html, re.S).group(1)
+
+
+getInfo = create_getInfo(NosuploadCom)
diff --git a/pyload/plugins/hoster/NovafileCom.py b/pyload/plugins/hoster/NovafileCom.py
new file mode 100644
index 000000000..1346bbde9
--- /dev/null
+++ b/pyload/plugins/hoster/NovafileCom.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://novafile.com/vfun4z6o2cit
+# http://novafile.com/s6zrr5wemuz4
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class NovafileCom(XFileSharingPro):
+ __name__ = "NovafileCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?novafile\.com/\w{12}'
+
+ __description__ = """Novafile.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ HOSTER_NAME = "novafile.com"
+
+ FILE_SIZE_PATTERN = r'<div class="size">(?P<S>.+?)</div>'
+ ERROR_PATTERN = r'class="alert[^"]*alert-separate"[^>]*>\s*(?:<p>)?(.*?)\s*</'
+ LINK_PATTERN = r'<a href="(http://s\d+\.novafile\.com/.*?)" class="btn btn-green">Download File</a>'
+ WAIT_PATTERN = r'<p>Please wait <span id="count"[^>]*>(\d+)</span> seconds</p>'
+
+
+ def setup(self):
+ self.multiDL = False
+
+
+getInfo = create_getInfo(NovafileCom)
diff --git a/pyload/plugins/hoster/NowDownloadEu.py b/pyload/plugins/hoster/NowDownloadEu.py
new file mode 100644
index 000000000..6e42a55bb
--- /dev/null
+++ b/pyload/plugins/hoster/NowDownloadEu.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pyload.utils import fixup
+
+
+class NowDownloadEu(SimpleHoster):
+ __name__ = "NowDownloadEu"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?nowdownload\.(ch|co|eu|sx)/(dl/|download\.php\?id=)(?P<ID>\w+)'
+
+ __description__ = """NowDownload.ch hoster plugin"""
+ __author_name__ = ("godofdream", "Walter Purcaro")
+ __author_mail__ = ("soilfiction@gmail.com", "vuolter@gmail.com")
+
+ FILE_INFO_PATTERN = r'Downloading</span> <br> (?P<N>.*) (?P<S>[0-9,.]+) (?P<U>[kKMG])i?B </h4>'
+ OFFLINE_PATTERN = r'(This file does not exist!)'
+
+ TOKEN_PATTERN = r'"(/api/token\.php\?token=[a-z0-9]+)"'
+ CONTINUE_PATTERN = r'"(/dl2/[a-z0-9]+/[a-z0-9]+)"'
+ WAIT_PATTERN = r'\.countdown\(\{until: \+(\d+),'
+ LINK_PATTERN = r'"(http://f\d+\.nowdownload\.ch/dl/[a-z0-9]+/[a-z0-9]+/[^<>"]*?)"'
+
+ FILE_NAME_REPLACEMENTS = [("&#?\w+;", fixup), (r'<[^>]*>', '')]
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+ self.chunkLimit = -1
+
+ def handleFree(self):
+ tokenlink = re.search(self.TOKEN_PATTERN, self.html)
+ continuelink = re.search(self.CONTINUE_PATTERN, self.html)
+ if tokenlink is None or continuelink is None:
+ self.fail('Plugin out of Date')
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait = int(m.group(1))
+ else:
+ wait = 60
+
+ baseurl = "http://www.nowdownload.ch"
+ self.html = self.load(baseurl + str(tokenlink.group(1)))
+ self.wait(wait)
+
+ self.html = self.load(baseurl + str(continuelink.group(1)))
+
+ url = re.search(self.LINK_PATTERN, self.html)
+ if url is None:
+ self.fail('Download Link not Found (Plugin out of Date?)')
+ self.logDebug('Download link: ' + str(url.group(1)))
+ self.download(str(url.group(1)))
+
+
+getInfo = create_getInfo(NowDownloadEu)
diff --git a/pyload/plugins/hoster/OboomCom.py b/pyload/plugins/hoster/OboomCom.py
new file mode 100644
index 000000000..04efa31b7
--- /dev/null
+++ b/pyload/plugins/hoster/OboomCom.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# https://www.oboom.com/B7CYZIEB/10Mio.dat
+
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+class OboomCom(Hoster):
+ __name__ = "OboomCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:www\.)?oboom\.com/(#(id=|/)?)?(?P<ID>[A-Z0-9]{8})'
+
+ __description__ = """oboom.com hoster plugin"""
+ __author_name__ = "stanley"
+ __author_mail__ = "stanley.foerster@gmail.com"
+
+ RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX"
+
+
+ def loadUrl(self, url, get=None):
+ if get is None:
+ get = dict()
+ return json_loads(self.load(url, get, decode=True))
+
+ def getFileId(self, url):
+ self.fileId = re.match(OboomCom.__pattern__, url).group('ID')
+
+ def getSessionToken(self):
+ if self.premium:
+ accountInfo = self.account.getAccountInfo(self.user, True)
+ if "session" in accountInfo:
+ self.sessionToken = accountInfo['session']
+ else:
+ self.fail("Could not retrieve premium session")
+ else:
+ apiUrl = "https://www.oboom.com/1.0/guestsession"
+ result = self.loadUrl(apiUrl)
+ if result[0] == 200:
+ self.sessionToken = result[1]
+ else:
+ self.fail("Could not retrieve token for guest session. Error code %s" % result[0])
+
+ def solveCaptcha(self):
+ recaptcha = ReCaptcha(self)
+ for _ in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ apiUrl = "https://www.oboom.com/1.0/download/ticket"
+ params = {"recaptcha_challenge_field": challenge,
+ "recaptcha_response_field": response,
+ "download_id": self.fileId,
+ "token": self.sessionToken}
+ result = self.loadUrl(apiUrl, params)
+
+ if result[0] == 200:
+ self.downloadToken = result[1]
+ self.downloadAuth = result[2]
+ self.correctCaptcha()
+ self.setWait(30)
+ self.wait()
+ break
+ elif result[0] == 400:
+ if result[1] == "incorrect-captcha-sol":
+ self.invalidCaptcha()
+ elif result[1] == "captcha-timeout":
+ self.invalidCaptcha()
+ elif result[1] == "forbidden":
+ self.retry(5, 15 * 60, "Service unavailable")
+ elif result[0] == 403:
+ if result[1] == -1: # another download is running
+ self.setWait(15 * 60)
+ else:
+ self.setWait(result[1], reconnect=True)
+ self.wait()
+ self.retry(5)
+ else:
+ self.invalidCaptcha()
+ self.fail("Received invalid captcha 5 times")
+
+ def getFileInfo(self, token, fileId):
+ apiUrl = "https://api.oboom.com/1.0/info"
+ params = {"token": token, "items": fileId, "http_errors": 0}
+
+ result = self.loadUrl(apiUrl, params)
+ if result[0] == 200:
+ item = result[1][0]
+ if item['state'] == "online":
+ self.fileSize = item['size']
+ self.fileName = item['name']
+ else:
+ self.offline()
+ else:
+ self.fail("Could not retrieve file info. Error code %s: %s" % (result[0], result[1]))
+
+ def getDownloadTicket(self):
+ apiUrl = "https://api.oboom.com/1.0/dl"
+ params = {"item": self.fileId, "http_errors": 0}
+ if self.premium:
+ params['token'] = self.sessionToken
+ else:
+ params['token'] = self.downloadToken
+ params['auth'] = self.downloadAuth
+
+ result = self.loadUrl(apiUrl, params)
+ if result[0] == 200:
+ self.downloadDomain = result[1]
+ self.downloadTicket = result[2]
+ else:
+ self.fail("Could not retrieve download ticket. Error code %s" % result[0])
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.multiDL = self.premium
+
+ def process(self, pyfile):
+ self.pyfile.url.replace(".com/#id=", ".com/#")
+ self.pyfile.url.replace(".com/#/", ".com/#")
+ self.getFileId(self.pyfile.url)
+ self.getSessionToken()
+ self.getFileInfo(self.sessionToken, self.fileId)
+ self.pyfile.name = self.fileName
+ self.pyfile.size = self.fileSize
+ if not self.premium:
+ self.solveCaptcha()
+ self.getDownloadTicket()
+ self.download("https://%s/1.0/dlh" % self.downloadDomain, get={"ticket": self.downloadTicket, "http_errors": 0})
diff --git a/pyload/plugins/hoster/OneFichierCom.py b/pyload/plugins/hoster/OneFichierCom.py
new file mode 100644
index 000000000..8fdecb342
--- /dev/null
+++ b/pyload/plugins/hoster/OneFichierCom.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://5pnm24ltcw.1fichier.com/
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class OneFichierCom(SimpleHoster):
+ __name__ = "OneFichierCom"
+ __type__ = "hoster"
+ __version__ = "0.61"
+
+ __pattern__ = r'(http://(?P<id>\w+)\.(?P<host>(1fichier|d(es)?fichiers|pjointe)\.(com|fr|net|org)|(cjoint|mesfichiers|piecejointe|oi)\.(org|net)|tenvoi\.(com|org|net)|dl4free\.com|alterupload\.com|megadl.fr))/?'
+
+ __description__ = """1fichier.com hoster plugin"""
+ __author_name__ = ("fragonib", "the-razer", "zoidberg", "imclem", "stickell", "Elrick69")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es", "daniel_ AT gmx DOT net", "zoidberg@mujmail.cz",
+ "imclem on github", "l.stickell@yahoo.it", "elrick69[AT]rocketmail[DOT]com")
+
+ FILE_NAME_PATTERN = r'">Filename :</th>\s*<td>(?P<N>[^<]+)</td>'
+ FILE_SIZE_PATTERN = r'<th>Size :</th>\s*<td>(?P<S>[^<]+)</td>'
+ OFFLINE_PATTERN = r'The (requested)? file (could not be found|has been deleted)'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://\g<id>.\g<host>/en/')]
+
+ WAITING_PATTERN = r'Warning ! Without premium status, you must wait between each downloads'
+ NOT_PARALLEL = r'Warning ! Without premium status, you can download only one file at a time'
+ WAIT_TIME = 10 * 60 # Retry time between each free download
+ RETRY_TIME = 15 * 60 # Default retry time in seconds (if detected parallel download)
+
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.resumeDownload = True
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ if self.WAITING_PATTERN in self.html:
+ self.logInfo('You have to wait been each free download! Retrying in %d seconds.' % self.WAIT_TIME)
+ self.waitAndRetry(self.WAIT_TIME)
+ else: # detect parallel download
+ m = re.search(self.NOT_PARALLEL, self.html)
+ if m:
+ self.waitAndRetry(self.RETRY_TIME)
+
+ url, inputs = self.parseHtmlForm('action="http://%s' % self.file_info['id'])
+ if not url:
+ self.parseError("Download link not found")
+
+ # Check for protection
+ if "pass" in inputs:
+ inputs['pass'] = self.getPassword()
+ inputs['submit'] = "Download"
+
+ self.download(url, post=inputs)
+
+ # Check download
+ self.checkDownloadedFile()
+
+ def handlePremium(self):
+ url, inputs = self.parseHtmlForm('action="http://%s' % self.file_info['id'])
+ if not url:
+ self.parseError("Download link not found")
+
+ # Check for protection
+ if "pass" in inputs:
+ inputs['pass'] = self.getPassword()
+ inputs['submit'] = "Download"
+
+ self.download(url, post=inputs)
+
+ # Check download
+ self.checkDownloadedFile()
+
+ def checkDownloadedFile(self):
+ check = self.checkDownload({"wait": self.WAITING_PATTERN})
+ if check == "wait":
+ self.waitAndRetry(int(self.lastcheck.group(1)) * 60)
+
+ def waitAndRetry(self, wait_time):
+ self.wait(wait_time, True)
+ self.retry()
+
+
+
+getInfo = create_getInfo(OneFichierCom)
diff --git a/pyload/plugins/hoster/OverLoadMe.py b/pyload/plugins/hoster/OverLoadMe.py
new file mode 100644
index 000000000..8061b2e1d
--- /dev/null
+++ b/pyload/plugins/hoster/OverLoadMe.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import unquote
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class OverLoadMe(Hoster):
+ __name__ = "OverLoadMe"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://.*overload\.me.*'
+
+ __description__ = """Over-Load.me hoster plugin"""
+ __author_name__ = "marley"
+ __author_mail__ = "marley@over-load.me"
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): # incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+ def setup(self):
+ self.chunkLimit = 5
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Over-Load")
+ self.fail("No Over-Load account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ data = self.account.getAccountData(self.user)
+
+ page = self.load("https://api.over-load.me/getdownload.php",
+ get={"auth": data['password'], "link": pyfile.url})
+ data = json_loads(page)
+
+ self.logDebug("Returned Data: %s" % data)
+
+ if data['err'] == 1:
+ self.logWarning(data['msg'])
+ self.tempOffline()
+ else:
+ if pyfile.name is not None and pyfile.name.endswith('.tmp') and data['filename']:
+ pyfile.name = data['filename']
+ pyfile.size = parseFileSize(data['filesize'])
+ new_url = data['downloadlink']
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'):
+ # only use when name wasn't already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload(
+ {"error": "<title>An error occured while processing your request</title>"})
+
+ if check == "error":
+ # usual this download can safely be retried
+ self.retry(reason="An error occured while generating link.", wait_time=60)
diff --git a/pyload/plugins/hoster/PandaPlanet.py b/pyload/plugins/hoster/PandaPlanet.py
new file mode 100644
index 000000000..8b26202df
--- /dev/null
+++ b/pyload/plugins/hoster/PandaPlanet.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# test.bin - 214 B - http://pandapla.net/pew1cz3ot586
+# BigBuckBunny_320x180.mp4 - 61.7 Mb - http://pandapla.net/tz0rgjfyyoh7
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class PandaPlanet(XFileSharingPro):
+ __name__ = "PandaPlanet"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?pandapla\.net/\w{12}'
+
+ __description__ = """Pandapla.net hoster plugin"""
+ __author_name__ = "t4skforce"
+ __author_mail__ = "t4skforce1337[AT]gmail[DOT]com"
+
+ HOSTER_NAME = "pandapla.net"
+
+ FILE_SIZE_PATTERN = r'File Size:</b>\s*</td>\s*<td[^>]*>(?P<S>[^<]+)</td>\s*</tr>'
+ FILE_NAME_PATTERN = r'File Name:</b>\s*</td>\s*<td[^>]*>(?P<N>[^<]+)</td>\s*</tr>'
+ LINK_PATTERN = r'(http://([^/]*?%s|\d+\.\d+\.\d+\.\d+)(:\d+)?(/d/|(?:/files)?/\d+/\w+/)[^"\'<]+\/(?!video\.mp4)[^"\'<]+)' % HOSTER_NAME
+
+
+getInfo = create_getInfo(PandaPlanet)
diff --git a/pyload/plugins/hoster/PornhostCom.py b/pyload/plugins/hoster/PornhostCom.py
new file mode 100644
index 000000000..802557873
--- /dev/null
+++ b/pyload/plugins/hoster/PornhostCom.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class PornhostCom(Hoster):
+ __name__ = "PornhostCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?pornhost\.com/([0-9]+/[0-9]+\.html|[0-9]+)'
+
+ __description__ = """Pornhost.com hoster plugin"""
+ __author_name__ = "jeix"
+ __author_mail__ = "jeix@hasnomail.de"
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ # Old interface
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ url = re.search(r'download this file</label>.*?<a href="(.*?)"', self.html)
+ if url is None:
+ url = re.search(r'"(http://dl[0-9]+\.pornhost\.com/files/.*?/.*?/.*?/.*?/.*?/.*?\..*?)"', self.html)
+ if url is None:
+ url = re.search(r'width: 894px; height: 675px">.*?<img src="(.*?)"', self.html)
+ if url is None:
+ url = re.search(r'"http://file[0-9]+\.pornhost\.com/[0-9]+/.*?"',
+ self.html) # TODO: fix this one since it doesn't match
+
+ return url.group(1).strip()
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ name = re.search(r'<title>pornhost\.com - free file hosting with a twist - gallery(.*?)</title>', self.html)
+ if name is None:
+ name = re.search(r'id="url" value="http://www\.pornhost\.com/(.*?)/"', self.html)
+ if name is None:
+ name = re.search(r'<title>pornhost\.com - free file hosting with a twist -(.*?)</title>', self.html)
+ if name is None:
+ name = re.search(r'"http://file[0-9]+\.pornhost\.com/.*?/(.*?)"', self.html)
+
+ name = name.group(1).strip() + ".flv"
+
+ return name
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if (re.search(r'gallery not found', self.html) is not None or
+ re.search(r'You will be redirected to', self.html) is not None):
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/PornhubCom.py b/pyload/plugins/hoster/PornhubCom.py
new file mode 100644
index 000000000..5236fe09a
--- /dev/null
+++ b/pyload/plugins/hoster/PornhubCom.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class PornhubCom(Hoster):
+ __name__ = "PornhubCom"
+ __type__ = "hoster"
+ __version__ = "0.5"
+
+ __pattern__ = r'http://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=[\w\d]+'
+
+ __description__ = """Pornhub.com hoster plugin"""
+ __author_name__ = "jeix"
+ __author_mail__ = "jeix@hasnomail.de"
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ url = "http://www.pornhub.com//gateway.php"
+ video_id = self.pyfile.url.split('=')[-1]
+ # thanks to jD team for this one v
+ post_data = "\x00\x03\x00\x00\x00\x01\x00\x0c\x70\x6c\x61\x79\x65\x72\x43\x6f\x6e\x66\x69\x67\x00\x02\x2f\x31\x00\x00\x00\x44\x0a\x00\x00\x00\x03\x02\x00"
+ post_data += chr(len(video_id))
+ post_data += video_id
+ post_data += "\x02\x00\x02\x2d\x31\x02\x00\x20"
+ post_data += "add299463d4410c6d1b1c418868225f7"
+
+ content = self.req.load(url, post=str(post_data))
+
+ new_content = ""
+ for x in content:
+ if ord(x) < 32 or ord(x) > 176:
+ new_content += '#'
+ else:
+ new_content += x
+
+ content = new_content
+
+ return re.search(r'flv_url.*(http.*?)##post_roll', content).group(1)
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ m = re.search(r'<title[^>]+>([^<]+) - ', self.html)
+ if m:
+ name = m.group(1)
+ else:
+ matches = re.findall('<h1>(.*?)</h1>', self.html)
+ if len(matches) > 1:
+ name = matches[1]
+ else:
+ name = matches[0]
+
+ return name + '.flv'
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r'This video is no longer in our database or is in conversion', self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/PotloadCom.py b/pyload/plugins/hoster/PotloadCom.py
new file mode 100644
index 000000000..6a97d0289
--- /dev/null
+++ b/pyload/plugins/hoster/PotloadCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class PotloadCom(XFileSharingPro):
+ __name__ = "PotloadCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?potload\.com/\w{12}'
+
+ __description__ = """Potload.com hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+ HOSTER_NAME = "potload.com"
+
+ FILE_INFO_PATTERN = r'<h[1-6]>(?P<N>.+) \((?P<S>\d+) (?P<U>\w+)\)</h'
+
+
+getInfo = create_getInfo(PotloadCom)
diff --git a/pyload/plugins/hoster/Premium4Me.py b/pyload/plugins/hoster/Premium4Me.py
new file mode 100644
index 000000000..439242aa7
--- /dev/null
+++ b/pyload/plugins/hoster/Premium4Me.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+from os import remove
+from os.path import exists
+from urllib import quote
+
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import fs_encode
+
+
+class Premium4Me(Hoster):
+ __name__ = "Premium4Me"
+ __type__ = "hoster"
+ __version__ = "0.08"
+
+ __pattern__ = r'http://(?:www\.)?premium.to/.*'
+
+ __description__ = """Premium.to hoster plugin"""
+ __author_name__ = ("RaNaN", "zoidberg", "stickell")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+
+ def setup(self):
+ self.resumeDownload = True
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "premium.to")
+ self.fail("No premium.to account provided")
+
+ self.logDebug("premium.to: Old URL: %s" % pyfile.url)
+
+ tra = self.getTraffic()
+
+ #raise timeout to 2min
+ self.req.setOption("timeout", 120)
+
+ self.download(
+ "http://premium.to/api/getfile.php?authcode=%s&link=%s" % (self.account.authcode, quote(pyfile.url, "")),
+ disposition=True)
+
+ check = self.checkDownload({"nopremium": "No premium account available"})
+
+ if check == "nopremium":
+ self.retry(60, 5 * 60, "No premium account available")
+
+ err = ''
+ if self.req.http.code == '420':
+ # Custom error code send - fail
+ lastDownload = fs_encode(self.lastDownload)
+
+ if exists(lastDownload):
+ f = open(lastDownload, "rb")
+ err = f.read(256).strip()
+ f.close()
+ remove(lastDownload)
+ else:
+ err = 'File does not exist'
+
+ trb = self.getTraffic()
+ self.logInfo("Filesize: %d, Traffic used %d, traffic left %d" % (pyfile.size, tra - trb, trb))
+
+ if err:
+ self.fail(err)
+
+ def getTraffic(self):
+ try:
+ traffic = int(self.load("http://premium.to/api/traffic.php?authcode=%s" % self.account.authcode))
+ except:
+ traffic = 0
+ return traffic
diff --git a/pyload/plugins/hoster/PremiumizeMe.py b/pyload/plugins/hoster/PremiumizeMe.py
new file mode 100644
index 000000000..16649f492
--- /dev/null
+++ b/pyload/plugins/hoster/PremiumizeMe.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+
+
+class PremiumizeMe(Hoster):
+ __name__ = "PremiumizeMe"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = None #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.coreReady
+
+ __description__ = """Premiumize.me hoster plugin"""
+ __author_name__ = "Florian Franzen"
+ __author_mail__ = "FlorianFranzen@gmail.com"
+
+
+ def process(self, pyfile):
+ # Check account
+ if not self.account or not self.account.canUse():
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "premiumize.me")
+ self.fail("No valid premiumize.me account provided")
+
+ # In some cases hostsers do not supply us with a filename at download, so we
+ # are going to set a fall back filename (e.g. for freakshare or xfileshare)
+ pyfile.name = pyfile.name.split('/').pop() # Remove everthing before last slash
+
+ # Correction for automatic assigned filename: Removing html at end if needed
+ suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"]
+ temp = pyfile.name.split('.')
+ if temp.pop() in suffix_to_remove:
+ pyfile.name = ".".join(temp)
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ # Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)
+ answer = self.load(
+ "https://api.premiumize.me/pm-api/v1.php?method=directdownloadlink&params[login]=%s&params[pass]=%s&params[link]=%s" % (
+ user, data['password'], pyfile.url))
+ data = json_loads(answer)
+
+ # Check status and decide what to do
+ status = data['status']
+ if status == 200:
+ self.download(data['result']['location'], disposition=True)
+ elif status == 400:
+ self.fail("Invalid link")
+ elif status == 404:
+ self.offline()
+ elif status >= 500:
+ self.tempOffline()
+ else:
+ self.fail(data['statusmessage'])
diff --git a/pyload/plugins/hoster/PromptfileCom.py b/pyload/plugins/hoster/PromptfileCom.py
new file mode 100644
index 000000000..108f470d2
--- /dev/null
+++ b/pyload/plugins/hoster/PromptfileCom.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class PromptfileCom(SimpleHoster):
+ __name__ = "PromptfileCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:www\.)?promptfile\.com/'
+
+ __description__ = """Promptfile.com hoster plugin"""
+ __author_name__ = "igel"
+ __author_mail__ = "igelkun@myopera.com"
+
+ FILE_INFO_PATTERN = r'<span style="[^"]*" title="[^"]*">(?P<N>.*?) \((?P<S>[\d.]+) (?P<U>\w+)\)</span>'
+ OFFLINE_PATTERN = r'<span style="[^"]*" title="File Not Found">File Not Found</span>'
+
+ CHASH_PATTERN = r'<input type="hidden" name="chash" value="([^"]*)" />'
+ LINK_PATTERN = r"clip: {\s*url: '(https?://(?:www\.)promptfile[^']*)',"
+
+
+ def handleFree(self):
+ # STAGE 1: get link to continue
+ m = re.search(self.CHASH_PATTERN, self.html)
+ if m is None:
+ self.parseError("Unable to detect chash")
+ chash = m.group(1)
+ self.logDebug("read chash %s" % chash)
+ # continue to stage2
+ self.html = self.load(self.pyfile.url, decode=True, post={'chash': chash})
+
+ # STAGE 2: get the direct link
+ m = re.search(self.LINK_PATTERN, self.html, re.MULTILINE | re.DOTALL)
+ if m is None:
+ self.parseError("Unable to detect direct link")
+ direct = m.group(1)
+ self.logDebug("found direct link: " + direct)
+ self.download(direct, disposition=True)
+
+
+getInfo = create_getInfo(PromptfileCom)
diff --git a/pyload/plugins/hoster/QuickshareCz.py b/pyload/plugins/hoster/QuickshareCz.py
new file mode 100644
index 000000000..d82c64888
--- /dev/null
+++ b/pyload/plugins/hoster/QuickshareCz.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class QuickshareCz(SimpleHoster):
+ __name__ = "QuickshareCz"
+ __type__ = "hoster"
+ __version__ = "0.54"
+
+ __pattern__ = r'http://(?:[^/]*\.)?quickshare.cz/stahnout-soubor/.*'
+
+ __description__ = """Quickshare.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<th width="145px">Název:</th>\s*<td style="word-wrap:break-word;">(?P<N>[^<]+)</td>'
+ FILE_SIZE_PATTERN = r'<th>Velikost:</th>\s*<td>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</td>'
+ OFFLINE_PATTERN = r'<script type="text/javascript">location.href=\'/chyba\';</script>'
+
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ # parse js variables
+ self.jsvars = dict((x, y.strip("'")) for x, y in re.findall(r"var (\w+) = ([0-9.]+|'[^']*')", self.html))
+ self.logDebug(self.jsvars)
+ pyfile.name = self.jsvars['ID3']
+
+ # determine download type - free or premium
+ if self.premium:
+ if 'UU_prihlasen' in self.jsvars:
+ if self.jsvars['UU_prihlasen'] == '0':
+ self.logWarning('User not logged in')
+ self.relogin(self.user)
+ self.retry()
+ elif float(self.jsvars['UU_kredit']) < float(self.jsvars['kredit_odecet']):
+ self.logWarning('Not enough credit left')
+ self.premium = False
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ check = self.checkDownload({"err": re.compile(r"\AChyba!")}, max_size=100)
+ if check == "err":
+ self.fail("File not m or plugin defect")
+
+ def handleFree(self):
+ # get download url
+ download_url = '%s/download.php' % self.jsvars['server']
+ data = dict((x, self.jsvars[x]) for x in self.jsvars if x in ("ID1", "ID2", "ID3", "ID4"))
+ self.logDebug("FREE URL1:" + download_url, data)
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.load(download_url, post=data)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ m = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if m is None:
+ self.fail('File not found')
+ download_url = m.group(1)
+ self.logDebug("FREE URL2:" + download_url)
+
+ # check errors
+ m = re.search(r'/chyba/(\d+)', download_url)
+ if m:
+ if m.group(1) == '1':
+ self.retry(60, 2 * 60, "This IP is already downloading")
+ elif m.group(1) == '2':
+ self.retry(60, 60, "No free slots available")
+ else:
+ self.fail('Error %d' % m.group(1))
+
+ # download file
+ self.download(download_url)
+
+ def handlePremium(self):
+ download_url = '%s/download_premium.php' % self.jsvars['server']
+ data = dict((x, self.jsvars[x]) for x in self.jsvars if x in ("ID1", "ID2", "ID4", "ID5"))
+ self.logDebug("PREMIUM URL:" + download_url, data)
+ self.download(download_url, get=data)
+
+
+getInfo = create_getInfo(QuickshareCz)
diff --git a/pyload/plugins/hoster/RPNetBiz.py b/pyload/plugins/hoster/RPNetBiz.py
new file mode 100644
index 000000000..e305c35ce
--- /dev/null
+++ b/pyload/plugins/hoster/RPNetBiz.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+from pyload.common.json_layer import json_loads
+
+
+class RPNetBiz(Hoster):
+ __name__ = "RPNetBiz"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __description__ = """RPNet.biz hoster plugin"""
+
+ __pattern__ = r'https?://.*rpnet\.biz'
+ __author_name__ = "Dman"
+ __author_mail__ = "dmanugm@gmail.com"
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ link_status = {'generated': pyfile.url}
+ elif not self.account:
+ # Check account
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rpnet")
+ self.fail("No rpnet account provided")
+ else:
+ (user, data) = self.account.selectAccount()
+
+ self.logDebug("Original URL: %s" % pyfile.url)
+ # Get the download link
+ response = self.load("https://premium.rpnet.biz/client_api.php",
+ get={"username": user, "password": data['password'],
+ "action": "generate", "links": pyfile.url})
+
+ self.logDebug("JSON data: %s" % response)
+ link_status = json_loads(response)['links'][0] # get the first link... since we only queried one
+
+ # Check if we only have an id as a HDD link
+ if 'id' in link_status:
+ self.logDebug("Need to wait at least 30 seconds before requery")
+ self.setWait(30) # wait for 30 seconds
+ self.wait()
+ # Lets query the server again asking for the status on the link,
+ # we need to keep doing this until we reach 100
+ max_tries = 30
+ my_try = 0
+ while (my_try <= max_tries):
+ self.logDebug("Try: %d ; Max Tries: %d" % (my_try, max_tries))
+ response = self.load("https://premium.rpnet.biz/client_api.php",
+ get={"username": user, "password": data['password'],
+ "action": "downloadInformation", "id": link_status['id']})
+ self.logDebug("JSON data hdd query: %s" % response)
+ download_status = json_loads(response)['download']
+
+ if download_status['status'] == '100':
+ link_status['generated'] = download_status['rpnet_link']
+ self.logDebug("Successfully downloaded to rpnet HDD: %s" % link_status['generated'])
+ break
+ else:
+ self.logDebug("At %s%% for the file download" % download_status['status'])
+
+ self.setWait(30)
+ self.wait()
+ my_try += 1
+
+ if my_try > max_tries: # We went over the limit!
+ self.fail("Waited for about 15 minutes for download to finish but failed")
+
+ if 'generated' in link_status:
+ self.download(link_status['generated'], disposition=True)
+ elif 'error' in link_status:
+ self.fail(link_status['error'])
+ else:
+ self.fail("Something went wrong, not supposed to enter here")
diff --git a/pyload/plugins/hoster/RapidgatorNet.py b/pyload/plugins/hoster/RapidgatorNet.py
new file mode 100644
index 000000000..46fe285b7
--- /dev/null
+++ b/pyload/plugins/hoster/RapidgatorNet.py
@@ -0,0 +1,191 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.common.json_layer import json_loads
+from pyload.network.HTTPRequest import BadHeader
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import AdsCaptcha, ReCaptcha, SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class RapidgatorNet(SimpleHoster):
+ __name__ = "RapidgatorNet"
+ __type__ = "hoster"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(?:www\.)?(rapidgator\.net|rg\.to)/file/\w+'
+
+ __description__ = """Rapidgator.net hoster plugin"""
+ __author_name__ = ("zoidberg", "chrox", "stickell", "Walter Purcaro")
+ __author_mail__ = ("zoidberg@mujmail.cz", "", "l.stickell@yahoo.it", "vuolter@gmail.com")
+
+ API_URL = "http://rapidgator.net/api/file"
+
+ FILE_NAME_PATTERN = r'<title>Download file (?P<N>.*)</title>'
+ FILE_SIZE_PATTERN = r'File size:\s*<strong>(?P<S>[\d\.]+) (?P<U>\w+)</strong>'
+ OFFLINE_PATTERN = r'>(File not found|Error 404)'
+
+ JSVARS_PATTERN = r"\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*'?(.*?)'?;"
+ PREMIUM_ONLY_ERROR_PATTERN = r'You can download files up to|This file can be downloaded by premium only<'
+ DOWNLOAD_LIMIT_ERROR_PATTERN = r'You have reached your (daily|hourly) downloads limit'
+ WAIT_PATTERN = r'(?:Delay between downloads must be not less than|Try again in)\s*(\d+)\s*(hour|min)'
+ LINK_PATTERN = r"return '(http://\w+.rapidgator.net/.*)';"
+
+ RECAPTCHA_KEY_PATTERN = r'"http://api\.recaptcha\.net/challenge\?k=(.*?)"'
+ ADSCAPTCHA_SRC_PATTERN = r'(http://api\.adscaptcha\.com/Get\.aspx[^"\']*)'
+ SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.script\?k=(.*?)"'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+ self.sid = None
+ self.chunkLimit = 1
+ self.req.setOption("timeout", 120)
+
+ def process(self, pyfile):
+ if self.account:
+ self.sid = self.account.getAccountData(self.user).get('SID', None)
+
+ if self.sid:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def api_response(self, cmd):
+ try:
+ json = self.load('%s/%s' % (self.API_URL, cmd),
+ get={'sid': self.sid,
+ 'url': self.pyfile.url}, decode=True)
+ self.logDebug('API:%s' % cmd, json, "SID: %s" % self.sid)
+ json = json_loads(json)
+ status = json['response_status']
+ msg = json['response_details']
+ except BadHeader, e:
+ self.logError('API:%s' % cmd, e, "SID: %s" % self.sid)
+ status = e.code
+ msg = e
+
+ if status == 200:
+ return json['response']
+ elif status == 423:
+ self.account.empty(self.user)
+ self.retry()
+ else:
+ self.account.relogin(self.user)
+ self.retry(wait_time=60)
+
+ def handlePremium(self):
+ #self.logDebug("ACCOUNT_DATA", self.account.getAccountData(self.user))
+ self.api_data = self.api_response('info')
+ self.api_data['md5'] = self.api_data['hash']
+ self.pyfile.name = self.api_data['filename']
+ self.pyfile.size = self.api_data['size']
+ url = self.api_response('download')['url']
+ self.download(url)
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ self.checkFree()
+
+ jsvars = dict(re.findall(self.JSVARS_PATTERN, self.html))
+ self.logDebug(jsvars)
+
+ self.req.http.lastURL = self.pyfile.url
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+
+ url = "http://rapidgator.net%s?fid=%s" % (
+ jsvars.get('startTimerUrl', '/download/AjaxStartTimer'), jsvars['fid'])
+ jsvars.update(self.getJsonResponse(url))
+
+ self.wait(int(jsvars.get('secs', 45)) + 1, False)
+
+ url = "http://rapidgator.net%s?sid=%s" % (
+ jsvars.get('getDownloadUrl', '/download/AjaxGetDownload'), jsvars['sid'])
+ jsvars.update(self.getJsonResponse(url))
+
+ self.req.http.lastURL = self.pyfile.url
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With:"])
+
+ url = "http://rapidgator.net%s" % jsvars.get('captchaUrl', '/download/captcha')
+ self.html = self.load(url)
+
+ for _ in xrange(5):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ link = m.group(1)
+ self.logDebug(link)
+ self.download(link, disposition=True)
+ break
+ else:
+ captcha, captcha_key = self.getCaptcha()
+ captcha_challenge, captcha_response = captcha.challenge(captcha_key)
+
+ self.html = self.load(url, post={
+ "DownloadCaptchaForm[captcha]": "",
+ "adcopy_challenge": captcha_challenge,
+ "adcopy_response": captcha_response
+ })
+
+ if "The verification code is incorrect" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ else:
+ self.parseError("Download link")
+
+ def getCaptcha(self):
+ m = re.search(self.ADSCAPTCHA_SRC_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = AdsCaptcha(self)
+ else:
+ m = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = ReCaptcha(self)
+ else:
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = SolveMedia(self)
+ else:
+ self.parseError("Captcha")
+
+ return captcha, captcha_key
+
+ def checkFree(self):
+ m = re.search(self.PREMIUM_ONLY_ERROR_PATTERN, self.html)
+ if m:
+ self.fail("Premium account needed for download")
+ else:
+ m = re.search(self.WAIT_PATTERN, self.html)
+
+ if m:
+ wait_time = int(m.group(1)) * {"hour": 60, "min": 1}[m.group(2)]
+ else:
+ m = re.search(self.DOWNLOAD_LIMIT_ERROR_PATTERN, self.html)
+ if m is None:
+ return
+ elif m.group(1) == "daily":
+ self.logWarning("You have reached your daily downloads limit for today")
+ wait_time = secondsToMidnight(gmt=2)
+ else:
+ wait_time = 1 * 60 * 60
+
+ self.logDebug("Waiting %d minutes" % wait_time / 60)
+ self.wait(wait_time, True)
+ self.retry()
+
+ def getJsonResponse(self, url):
+ response = self.load(url, decode=True)
+ if not response.startswith('{'):
+ self.retry()
+ self.logDebug(url, response)
+ return json_loads(response)
+
+
+getInfo = create_getInfo(RapidgatorNet)
diff --git a/pyload/plugins/hoster/RapidshareCom.py b/pyload/plugins/hoster/RapidshareCom.py
new file mode 100644
index 000000000..fefa06fd7
--- /dev/null
+++ b/pyload/plugins/hoster/RapidshareCom.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+
+
+def getInfo(urls):
+ ids = ""
+ names = ""
+
+ p = re.compile(RapidshareCom.__pattern__)
+
+ for url in urls:
+ r = p.search(url)
+ if r.group("name"):
+ ids += "," + r.group("id")
+ names += "," + r.group("name")
+ elif r.group("name_new"):
+ ids += "," + r.group("id_new")
+ names += "," + r.group("name_new")
+
+ url = "http://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=checkfiles&files=%s&filenames=%s" % (ids[1:], names[1:])
+
+ api = getURL(url)
+ result = []
+ i = 0
+ for res in api.split():
+ tmp = res.split(",")
+ if tmp[4] in ("0", "4", "5"):
+ status = 1
+ elif tmp[4] == "1":
+ status = 2
+ else:
+ status = 3
+
+ result.append((tmp[1], tmp[2], status, urls[i]))
+ i += 1
+
+ yield result
+
+
+class RapidshareCom(Hoster):
+ __name__ = "RapidshareCom"
+ __type__ = "hoster"
+ __version__ = "1.39"
+
+ __pattern__ = r'https?://(?:www\.)?rapidshare.com/(?:files/(?P<id>\d*?)/(?P<name>[^?]+)|#!download\|(?:\w+)\|(?P<id_new>\d+)\|(?P<name_new>[^|]+))'
+ __config__ = [("server",
+ "Cogent;Deutsche Telekom;Level(3);Level(3) #2;GlobalCrossing;Level(3) #3;Teleglobe;GlobalCrossing #2;TeliaSonera #2;Teleglobe #2;TeliaSonera #3;TeliaSonera",
+ "Preferred Server", "None")]
+
+ __description__ = """Rapidshare.com hoster plugin"""
+ __author_name__ = ("spoob", "RaNaN", "mkaay")
+ __author_mail__ = ("spoob@pyload.org", "ranan@pyload.org", "mkaay@mkaay.de")
+
+
+ def setup(self):
+ self.no_download = True
+ self.api_data = None
+ self.offset = 0
+ self.dl_dict = {}
+
+ self.id = None
+ self.name = None
+
+ self.chunkLimit = -1 if self.premium else 1
+ self.multiDL = self.resumeDownload = self.premium
+
+ def process(self, pyfile):
+ self.url = pyfile.url
+ self.prepare()
+
+ def prepare(self):
+ m = re.match(self.__pattern__, self.url)
+
+ if m.group("name"):
+ self.id = m.group("id")
+ self.name = m.group("name")
+ else:
+ self.id = m.group("id_new")
+ self.name = m.group("name_new")
+
+ self.download_api_data()
+ if self.api_data['status'] == "1":
+ self.pyfile.name = self.get_file_name()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ elif self.api_data['status'] == "2":
+ self.logInfo(_("Rapidshare: Traffic Share (direct download)"))
+ self.pyfile.name = self.get_file_name()
+
+ self.download(self.pyfile.url, get={"directstart": 1})
+
+ elif self.api_data['status'] in ("0", "4", "5"):
+ self.offline()
+ elif self.api_data['status'] == "3":
+ self.tempOffline()
+ else:
+ self.fail("Unknown response code.")
+
+ def handleFree(self):
+ while self.no_download:
+ self.dl_dict = self.freeWait()
+
+ #tmp = "#!download|%(server)s|%(id)s|%(name)s|%(size)s"
+ download = "http://%(host)s/cgi-bin/rsapi.cgi?sub=download&editparentlocation=0&bin=1&fileid=%(id)s&filename=%(name)s&dlauth=%(auth)s" % self.dl_dict
+
+ self.logDebug("RS API Request: %s" % download)
+ self.download(download, ref=False)
+
+ check = self.checkDownload({"ip": "You need RapidPro to download more files from your IP address",
+ "auth": "Download auth invalid"})
+ if check == "ip":
+ self.setWait(60)
+ self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
+ self.wait()
+ self.handleFree()
+ elif check == "auth":
+ self.logInfo(_("Invalid Auth Code, download will be restarted"))
+ self.offset += 5
+ self.handleFree()
+
+ def handlePremium(self):
+ info = self.account.getAccountInfo(self.user, True)
+ self.logDebug("%s: Use Premium Account" % self.__name__)
+ url = self.api_data['mirror']
+ self.download(url, get={"directstart": 1})
+
+ def download_api_data(self, force=False):
+ """
+ http://images.rapidshare.com/apidoc.txt
+ """
+ if self.api_data and not force:
+ return
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_file = {"sub": "checkfiles", "incmd5": "1", "files": self.id, "filenames": self.name}
+ src = self.load(api_url_base, cookies=False, get=api_param_file).strip()
+ self.logDebug("RS INFO API: %s" % src)
+ if src.startswith("ERROR"):
+ return
+ fields = src.split(",")
+
+ # status codes:
+ # 0=File not found
+ # 1=File OK (Anonymous downloading)
+ # 3=Server down
+ # 4=File marked as illegal
+ # 5=Anonymous file locked, because it has more than 10 downloads already
+ # 50+n=File OK (TrafficShare direct download type "n" without any logging.)
+ # 100+n=File OK (TrafficShare direct download type "n" with logging.
+ # Read our privacy policy to see what is logged.)
+
+ self.api_data = {"fileid": fields[0], "filename": fields[1], "size": int(fields[2]), "serverid": fields[3],
+ "status": fields[4], "shorthost": fields[5], "checksum": fields[6].strip().lower()}
+
+ if int(self.api_data['status']) > 100:
+ self.api_data['status'] = str(int(self.api_data['status']) - 100)
+ elif int(self.api_data['status']) > 50:
+ self.api_data['status'] = str(int(self.api_data['status']) - 50)
+
+ self.api_data['mirror'] = "http://rs%(serverid)s%(shorthost)s.rapidshare.com/files/%(fileid)s/%(filename)s" % self.api_data
+
+ def freeWait(self):
+ """downloads html with the important information
+ """
+ self.no_download = True
+
+ id = self.id
+ name = self.name
+
+ prepare = "https://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=download&fileid=%(id)s&filename=%(name)s&try=1&cbf=RSAPIDispatcher&cbid=1" % {
+ "name": name, "id": id}
+
+ self.logDebug("RS API Request: %s" % prepare)
+ result = self.load(prepare, ref=False)
+ self.logDebug("RS API Result: %s" % result)
+
+ between_wait = re.search("You need to wait (\d+) seconds", result)
+
+ if "You need RapidPro to download more files from your IP address" in result:
+ self.setWait(60)
+ self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
+ self.wait()
+ elif ("Too many users downloading from this server right now" in result or
+ "All free download slots are full" in result):
+ self.setWait(120)
+ self.logInfo(_("RapidShareCom: No free slots"))
+ self.wait()
+ elif "This file is too big to download it for free" in result:
+ self.fail(_("You need a premium account for this file"))
+ elif "Filename invalid." in result:
+ self.fail(_("Filename reported invalid"))
+ elif between_wait:
+ self.setWait(int(between_wait.group(1)))
+ self.wantReconnect = True
+ self.wait()
+ else:
+ self.no_download = False
+
+ tmp, info = result.split(":")
+ data = info.split(",")
+
+ dl_dict = {"id": id,
+ "name": name,
+ "host": data[0],
+ "auth": data[1],
+ "server": self.api_data['serverid'],
+ "size": self.api_data['size']}
+ self.setWait(int(data[2]) + 2 + self.offset)
+ self.wait()
+
+ return dl_dict
+
+ def get_file_name(self):
+ if self.api_data['filename']:
+ return self.api_data['filename']
+ return self.url.split("/")[-1]
diff --git a/pyload/plugins/hoster/RarefileNet.py b/pyload/plugins/hoster/RarefileNet.py
new file mode 100644
index 000000000..7c6632aac
--- /dev/null
+++ b/pyload/plugins/hoster/RarefileNet.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+from pyload.utils import html_unescape
+
+
+class RarefileNet(XFileSharingPro):
+ __name__ = "RarefileNet"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?rarefile.net/\w{12}'
+
+ __description__ = """Rarefile.net hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_NAME = "rarefile.net"
+
+ FILE_NAME_PATTERN = r'<td><font color="red">(?P<N>.*?)</font></td>'
+ FILE_SIZE_PATTERN = r'<td>Size : (?P<S>.+?)&nbsp;'
+ LINK_PATTERN = r'<a href="(?P<link>[^"]+)">(?P=link)</a>'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+ def handleCaptcha(self, inputs):
+ captcha_div = re.search(r'<b>Enter code.*?<div.*?>(.*?)</div>', self.html, re.S).group(1)
+ self.logDebug(captcha_div)
+ numerals = re.findall('<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
+ inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))])
+ self.logDebug("CAPTCHA", inputs['code'], numerals)
+ return 3
+
+
+getInfo = create_getInfo(RarefileNet)
diff --git a/pyload/plugins/hoster/RealdebridCom.py b/pyload/plugins/hoster/RealdebridCom.py
new file mode 100644
index 000000000..a458cc5d0
--- /dev/null
+++ b/pyload/plugins/hoster/RealdebridCom.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import quote, unquote
+from time import time
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class RealdebridCom(Hoster):
+ __name__ = "RealdebridCom"
+ __type__ = "hoster"
+ __version__ = "0.53"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?real-debrid\..*'
+
+ __description__ = """Real-Debrid.com hoster plugin"""
+ __author_name__ = "Devirex Hazzard"
+ __author_mail__ = "naibaf_11@yahoo.de"
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if not name or name.endswith(".."): # incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+ def setup(self):
+ self.chunkLimit = 3
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Real-debrid")
+ self.fail("No Real-debrid account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ password = self.getPassword().splitlines()
+ if not password:
+ password = ""
+ else:
+ password = password[0]
+
+ url = "https://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % (
+ quote(pyfile.url, ""), password, int(time() * 1000))
+ page = self.load(url)
+ data = json_loads(page)
+
+ self.logDebug("Returned Data: %s" % data)
+
+ if data['error'] != 0:
+ if data['message'] == "Your file is unavailable on the hoster.":
+ self.offline()
+ else:
+ self.logWarning(data['message'])
+ self.tempOffline()
+ else:
+ if pyfile.name is not None and pyfile.name.endswith('.tmp') and data['file_name']:
+ pyfile.name = data['file_name']
+ pyfile.size = parseFileSize(data['file_size'])
+ new_url = data['generated_links'][0][-1]
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload(
+ {"error": "<title>An error occured while processing your request</title>"})
+
+ if check == "error":
+ #usual this download can safely be retried
+ self.retry(wait_time=60, reason="An error occured while generating link.")
diff --git a/pyload/plugins/hoster/RedtubeCom.py b/pyload/plugins/hoster/RedtubeCom.py
new file mode 100644
index 000000000..42c24628e
--- /dev/null
+++ b/pyload/plugins/hoster/RedtubeCom.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+from pyload.unescape import unescape
+
+
+class RedtubeCom(Hoster):
+ __name__ = "RedtubeCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?redtube\.com/\d+'
+
+ __description__ = """Redtube.com hoster plugin"""
+ __author_name__ = "jeix"
+ __author_mail__ = "jeix@hasnomail.de"
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ file_url = unescape(re.search(r'hashlink=(http.*?)"', self.html).group(1))
+
+ return file_url
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ return re.search('<title>(.*?)- RedTube - Free Porn Videos</title>', self.html).group(1).strip() + ".flv"
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r'This video has been removed.', self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/RehostTo.py b/pyload/plugins/hoster/RehostTo.py
new file mode 100644
index 000000000..d3d3fcd8b
--- /dev/null
+++ b/pyload/plugins/hoster/RehostTo.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from urllib import quote, unquote
+
+from pyload.plugins.Hoster import Hoster
+
+
+class RehostTo(Hoster):
+ __name__ = "RehostTo"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'https?://.*rehost.to\..*'
+
+ __description__ = """Rehost.com hoster plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def getFilename(self, url):
+ return unquote(url.rsplit("/", 1)[1])
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rehost.to")
+ self.fail("No rehost.to account provided")
+
+ data = self.account.getAccountInfo(self.user)
+ long_ses = data['long_ses']
+
+ self.logDebug("Rehost.to: Old URL: %s" % pyfile.url)
+ new_url = "http://rehost.to/process_download.php?user=cookie&pass=%s&dl=%s" % (long_ses, quote(pyfile.url, ""))
+
+ #raise timeout to 2min
+ self.req.setOption("timeout", 120)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/RemixshareCom.py b/pyload/plugins/hoster/RemixshareCom.py
new file mode 100644
index 000000000..dfd7db5a0
--- /dev/null
+++ b/pyload/plugins/hoster/RemixshareCom.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://remixshare.com/download/p946u
+#
+# Note:
+# The remixshare.com website is very very slow, so
+# if your download not starts because of pycurl timeouts:
+# Adjust timeouts in /usr/share/pyload/pyload/network/HTTPRequest.py
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class RemixshareCom(SimpleHoster):
+ __name__ = "RemixshareCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://remixshare\.com/(download|dl)/\w+'
+
+ __description__ = """Remixshare.com hoster plugin"""
+ __author_name__ = ("zapp-brannigan", "Walter Purcaro")
+ __author_mail__ = ("fuerst.reinje@web.de", "vuolter@gmail.com")
+
+ FILE_INFO_PATTERN = r'title=\'.+?\'>(?P<N>.+?)</span><span class=\'light2\'>&nbsp;\((?P<S>\d+)&nbsp;(?P<U>\w+)\)<'
+ OFFLINE_PATTERN = r'<h1>Ooops!<'
+
+ LINK_PATTERN = r'(http://remixshare\.com/downloadfinal/.+?)"'
+ TOKEN_PATTERN = r'var acc = (\d+)'
+ WAIT_PATTERN = r'var XYZ = r"(\d+)"'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+
+ def handleFree(self):
+ b = re.search(self.LINK_PATTERN, self.html)
+ if not b:
+ self.parseError("Cannot parse download url")
+ c = re.search(self.TOKEN_PATTERN, self.html)
+ if not c:
+ self.parseError("Cannot parse file token")
+ dl_url = b.group(1) + c.group(1)
+
+ #Check if we have to wait
+ seconds = re.search(self.WAIT_PATTERN, self.html)
+ if seconds:
+ self.logDebug("Wait " + seconds.group(1))
+ self.wait(seconds.group(1))
+
+ # Finally start downloading...
+ self.logDebug("Download URL = r" + dl_url)
+ self.download(dl_url, disposition=True)
+
+
+getInfo = create_getInfo(RemixshareCom)
diff --git a/pyload/plugins/hoster/RgHostNet.py b/pyload/plugins/hoster/RgHostNet.py
new file mode 100644
index 000000000..0240f3a05
--- /dev/null
+++ b/pyload/plugins/hoster/RgHostNet.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class RgHostNet(SimpleHoster):
+ __name__ = "RgHostNet"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?rghost\.net/\d+(?:r=\d+)?'
+
+ __description__ = """RgHost.net hoster plugin"""
+ __author_name__ = "z00nx"
+ __author_mail__ = "z00nx0@gmail.com"
+
+ FILE_INFO_PATTERN = r'<h1>\s+(<a[^>]+>)?(?P<N>[^<]+)(</a>)?\s+<small[^>]+>\s+\((?P<S>[^)]+)\)\s+</small>\s+</h1>'
+ OFFLINE_PATTERN = r'File is deleted|this page is not found'
+ LINK_PATTERN = r'''<a\s+href="([^"]+)"\s+class="btn\s+large\s+download"[^>]+>Download</a>'''
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("Unable to detect the direct link")
+ download_link = m.group(1)
+ self.download(download_link, disposition=True)
+
+
+getInfo = create_getInfo(RgHostNet)
diff --git a/pyload/plugins/hoster/RyushareCom.py b/pyload/plugins/hoster/RyushareCom.py
new file mode 100644
index 000000000..326c55e0c
--- /dev/null
+++ b/pyload/plugins/hoster/RyushareCom.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://ryushare.com/cl0jy8ric2js/random.bin
+
+import re
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+from pyload.plugins.internal.CaptchaService import SolveMedia
+
+
+class RyushareCom(XFileSharingPro):
+ __name__ = "RyushareCom"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'http://(?:www\.)?ryushare\.com/\w+'
+
+ __description__ = """Ryushare.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell", "quareevo")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it", "quareevo@arcor.de")
+
+ HOSTER_NAME = "ryushare.com"
+
+ FILE_SIZE_PATTERN = r'You have requested <font color="red">[^<]+</font> \((?P<S>[\d\.]+) (?P<U>\w+)'
+
+ WAIT_PATTERN = r'You have to wait ((?P<hour>\d+) hour[s]?, )?((?P<min>\d+) minute[s], )?(?P<sec>\d+) second[s]'
+ LINK_PATTERN = r'<a href="([^"]+)">Click here to download<'
+ SOLVEMEDIA_PATTERN = r'http:\/\/api\.solvemedia\.com\/papi\/challenge\.script\?k=(.*?)"'
+
+
+ def getDownloadLink(self):
+ retry = False
+ self.html = self.load(self.pyfile.url)
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+ if "method_premium" in inputs:
+ del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post=inputs)
+ action, inputs = self.parseHtmlForm('F1')
+
+ self.setWait(65)
+ # Wait 1 hour
+ if "You have reached the download-limit" in self.html:
+ self.setWait(1 * 60 * 60, True)
+ retry = True
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait = m.groupdict(0)
+ waittime = int(wait['hour']) * 60 * 60 + int(wait['min']) * 60 + int(wait['sec'])
+ self.setWait(waittime, True)
+ retry = True
+
+ self.wait()
+ if retry:
+ self.retry()
+
+ for _ in xrange(5):
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m is None:
+ self.parseError("Error parsing captcha")
+
+ captchaKey = m.group(1)
+ captcha = SolveMedia(self)
+ challenge, response = captcha.challenge(captchaKey)
+
+ inputs['adcopy_challenge'] = challenge
+ inputs['adcopy_response'] = response
+
+ self.html = self.load(self.pyfile.url, post=inputs)
+ if "WRONG CAPTCHA" in self.html:
+ self.invalidCaptcha()
+ self.logInfo("Invalid Captcha")
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("You have entered 5 invalid captcha codes")
+
+ if "Click here to download" in self.html:
+ return re.search(r'<a href="([^"]+)">Click here to download</a>', self.html).group(1)
+
+
+getInfo = create_getInfo(RyushareCom)
diff --git a/pyload/plugins/hoster/SecureUploadEu.py b/pyload/plugins/hoster/SecureUploadEu.py
new file mode 100644
index 000000000..befe5f0e9
--- /dev/null
+++ b/pyload/plugins/hoster/SecureUploadEu.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class SecureUploadEu(XFileSharingPro):
+ __name__ = "SecureUploadEu"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?secureupload\.eu/(\w){12}(/\w+)'
+
+ __description__ = """SecureUpload.eu hoster plugin"""
+ __author_name__ = "z00nx"
+ __author_mail__ = "z00nx0@gmail.com"
+
+ HOSTER_NAME = "secureupload.eu"
+
+ FILE_INFO_PATTERN = r'<h3>Downloading (?P<N>[^<]+) \((?P<S>[^<]+)\)</h3>'
+ OFFLINE_PATTERN = r'The file was removed|File Not Found'
+
+
+getInfo = create_getInfo(SecureUploadEu)
diff --git a/pyload/plugins/hoster/SendmywayCom.py b/pyload/plugins/hoster/SendmywayCom.py
new file mode 100644
index 000000000..87cbfcc0d
--- /dev/null
+++ b/pyload/plugins/hoster/SendmywayCom.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class SendmywayCom(XFileSharingPro):
+ __name__ = "SendmywayCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?sendmyway.com/\w{12}'
+
+ __description__ = """SendMyWay hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_NAME = "sendmyway.com"
+
+ FILE_NAME_PATTERN = r'<p class="file-name" ><.*?>\s*(?P<N>.+)'
+ FILE_SIZE_PATTERN = r'<small>\((?P<S>\d+) bytes\)</small>'
+
+
+getInfo = create_getInfo(SendmywayCom)
diff --git a/pyload/plugins/hoster/SendspaceCom.py b/pyload/plugins/hoster/SendspaceCom.py
new file mode 100644
index 000000000..7a0908c8d
--- /dev/null
+++ b/pyload/plugins/hoster/SendspaceCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class SendspaceCom(SimpleHoster):
+ __name__ = "SendspaceCom"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'http://(?:www\.)?sendspace.com/file/.*'
+
+ __description__ = """Sendspace.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<h2 class="bgray">\s*<(?:b|strong)>(?P<N>[^<]+)</'
+ FILE_SIZE_PATTERN = r'<div class="file_description reverse margin_center">\s*<b>File Size:</b>\s*(?P<S>[0-9.]+)(?P<U>[kKMG])i?B\s*</div>'
+ OFFLINE_PATTERN = r'<div class="msg error" style="cursor: default">Sorry, the file you requested is not available.</div>'
+
+ LINK_PATTERN = r'<a id="download_button" href="([^"]+)"'
+ CAPTCHA_PATTERN = r'<td><img src="(/captchas/captcha.php?captcha=([^"]+))"></td>'
+ USER_CAPTCHA_PATTERN = r'<td><img src="/captchas/captcha.php?user=([^"]+))"></td>'
+
+
+ def handleFree(self):
+ params = {}
+ for _ in xrange(3):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ if 'captcha_hash' in params:
+ self.correctCaptcha()
+ download_url = m.group(1)
+ break
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ if 'captcha_hash' in params:
+ self.invalidCaptcha()
+ captcha_url1 = "http://www.sendspace.com/" + m.group(1)
+ m = re.search(self.USER_CAPTCHA_PATTERN, self.html)
+ captcha_url2 = "http://www.sendspace.com/" + m.group(1)
+ params = {'captcha_hash': m.group(2),
+ 'captcha_submit': 'Verify',
+ 'captcha_answer': self.decryptCaptcha(captcha_url1) + " " + self.decryptCaptcha(captcha_url2)}
+ else:
+ params = {'download': "Regular Download"}
+
+ self.logDebug(params)
+ self.html = self.load(self.pyfile.url, post=params)
+ else:
+ self.fail("Download link not found")
+
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+
+create_getInfo(SendspaceCom)
diff --git a/pyload/plugins/hoster/Share4webCom.py b/pyload/plugins/hoster/Share4webCom.py
new file mode 100644
index 000000000..a3d92d9f4
--- /dev/null
+++ b/pyload/plugins/hoster/Share4webCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.UnibytesCom import UnibytesCom
+from pyload.plugins.internal.SimpleHoster import create_getInfo
+
+
+class Share4webCom(UnibytesCom):
+ __name__ = "Share4webCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?share4web\.com/get/\w+'
+
+ __description__ = """Share4web.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ HOSTER_NAME = "share4web.com"
+
+
+getInfo = create_getInfo(UnibytesCom)
diff --git a/pyload/plugins/hoster/Share76Com.py b/pyload/plugins/hoster/Share76Com.py
new file mode 100644
index 000000000..2cd736992
--- /dev/null
+++ b/pyload/plugins/hoster/Share76Com.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class Share76Com(DeadHoster):
+ __name__ = "Share76Com"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?share76.com/\w{12}'
+
+ __description__ = """Share76.com hoster plugin"""
+ __author_name__ = "me"
+ __author_mail__ = None
+
+
+getInfo = create_getInfo(Share76Com)
diff --git a/pyload/plugins/hoster/ShareFilesCo.py b/pyload/plugins/hoster/ShareFilesCo.py
new file mode 100644
index 000000000..b75eb0740
--- /dev/null
+++ b/pyload/plugins/hoster/ShareFilesCo.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class ShareFilesCo(DeadHoster):
+ __name__ = "ShareFilesCo"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?sharefiles\.co/\w{12}'
+
+ __description__ = """Sharefiles.co hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+getInfo = create_getInfo(ShareFilesCo)
diff --git a/pyload/plugins/hoster/ShareRapidCom.py b/pyload/plugins/hoster/ShareRapidCom.py
new file mode 100644
index 000000000..b474103fc
--- /dev/null
+++ b/pyload/plugins/hoster/ShareRapidCom.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.network.RequestFactory import getRequest
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+
+
+def getInfo(urls):
+ h = getRequest()
+ h.c.setopt(HTTPHEADER,
+ ["Accept: text/html",
+ "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"])
+ for url in urls:
+ html = h.load(url, decode=True)
+ file_info = parseFileInfo(ShareRapidCom, url, html)
+ yield file_info
+
+
+class ShareRapidCom(SimpleHoster):
+ __name__ = "ShareRapidCom"
+ __type__ = "hoster"
+ __version__ = "0.54"
+
+ __pattern__ = r'http://(?:www\.)?(share|mega)rapid\.cz/soubor/\d+/.+'
+
+ __description__ = """MegaRapid.cz hoster plugin"""
+ __author_name__ = ("MikyWoW", "zoidberg", "stickell", "Walter Purcaro")
+ __author_mail__ = ("mikywow@seznam.cz", "zoidberg@mujmail.cz", "l.stickell@yahoo.it", "vuolter@gmail.com")
+
+ FILE_NAME_PATTERN = r'<h1[^>]*><span[^>]*>(?:<a[^>]*>)?(?P<N>[^<]+)'
+ FILE_SIZE_PATTERN = r'<td class="i">Velikost:</td>\s*<td class="h"><strong>\s*(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</strong></td>'
+ OFFLINE_PATTERN = ur'Nastala chyba 404|Soubor byl smazán'
+
+ SH_CHECK_TRAFFIC = True
+
+ LINK_PATTERN = r'<a href="([^"]+)" title="Stahnout">([^<]+)</a>'
+ ERR_LOGIN_PATTERN = ur'<div class="error_div"><strong>Stahování je přístupné pouze přihlášenÃœm uÅŸivatelům'
+ ERR_CREDIT_PATTERN = ur'<div class="error_div"><strong>Stahování zdarma je moÅŸné jen přes náš'
+
+
+ def setup(self):
+ self.chunkLimit = 1
+
+ def handlePremium(self):
+ try:
+ self.html = self.load(self.pyfile.url, decode=True)
+ except BadHeader, e:
+ self.account.relogin(self.user)
+ self.retry(max_tries=3, reason=str(e))
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ link = m.group(1)
+ self.logDebug("Premium link: %s" % link)
+ self.download(link, disposition=True)
+ else:
+ if re.search(self.ERR_LOGIN_PATTERN, self.html):
+ self.relogin(self.user)
+ self.retry(max_tries=3, reason="User login failed")
+ elif re.search(self.ERR_CREDIT_PATTERN, self.html):
+ self.fail("Not enough credit left")
+ else:
+ self.fail("Download link not found")
diff --git a/pyload/plugins/hoster/SharebeesCom.py b/pyload/plugins/hoster/SharebeesCom.py
new file mode 100644
index 000000000..287dbf59c
--- /dev/null
+++ b/pyload/plugins/hoster/SharebeesCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SharebeesCom(DeadHoster):
+ __name__ = "SharebeesCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?sharebees.com/\w{12}'
+
+ __description__ = """ShareBees hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(SharebeesCom)
diff --git a/pyload/plugins/hoster/ShareonlineBiz.py b/pyload/plugins/hoster/ShareonlineBiz.py
new file mode 100644
index 000000000..b1d9ae5cb
--- /dev/null
+++ b/pyload/plugins/hoster/ShareonlineBiz.py
@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+def getInfo(urls):
+ api_url_base = "http://api.share-online.biz/linkcheck.php"
+
+ urls = [url.replace("https://", "http://") for url in urls]
+
+ for chunk in chunks(urls, 90):
+ api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/", "").rstrip("/") for x in
+ chunk)} # api only supports old style links
+ src = getURL(api_url_base, post=api_param_file, decode=True)
+ result = []
+ for i, res in enumerate(src.split("\n")):
+ if not res:
+ continue
+ fields = res.split(";")
+
+ if fields[1] == "OK":
+ status = 2
+ elif fields[1] in ("DELETED", "NOT FOUND"):
+ status = 1
+ else:
+ status = 3
+
+ result.append((fields[2], int(fields[3]), status, chunk[i]))
+ yield result
+
+
+class ShareonlineBiz(Hoster):
+ __name__ = "ShareonlineBiz"
+ __type__ = "hoster"
+ __version__ = "0.40"
+
+ __pattern__ = r'https?://(?:www\.)?(share-online\.biz|egoshare\.com)/(download.php\?id=|dl/)(?P<ID>\w+)'
+
+ __description__ = """Shareonline.biz hoster plugin"""
+ __author_name__ = ("spoob", "mkaay", "zoidberg", "Walter Purcaro")
+ __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de", "zoidberg@mujmail.cz", "vuolter@gmail.com")
+
+ ERROR_INFO_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>'
+
+
+ def setup(self):
+ # range request not working?
+ # api supports resume, only one chunk
+ # website isn't supporting resuming in first place
+ self.file_id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ self.pyfile.url = "http://www.share-online.biz/dl/" + self.file_id
+
+ self.resumeDownload = self.premium
+ self.multiDL = False
+ #self.chunkLimit = 1
+
+ self.check_data = None
+
+ def process(self, pyfile):
+ if self.premium:
+ self.handlePremium()
+ #web-download fallback removed - didn't work anyway
+ else:
+ self.handleFree()
+
+ # check = self.checkDownload({"failure": re.compile(self.ERROR_INFO_PATTERN)})
+ # if check == "failure":
+ # try:
+ # self.retry(reason=self.lastCheck.group(1).decode("utf8"))
+ # except:
+ # self.retry(reason="Unknown error")
+
+ if self.api_data:
+ self.check_data = {"size": int(self.api_data['size']), "md5": self.api_data['md5']}
+
+ def loadAPIData(self):
+ api_url_base = "http://api.share-online.biz/linkcheck.php?md5=1"
+ api_param_file = {"links": self.file_id} # api only supports old style links
+ src = self.load(api_url_base, cookies=False, post=api_param_file, decode=True)
+
+ fields = src.split(";")
+ self.api_data = {"fileid": fields[0],
+ "status": fields[1]}
+ if not self.api_data['status'] == "OK":
+ self.offline()
+ else:
+ self.api_data['filename'] = fields[2]
+ self.api_data['size'] = fields[3] # in bytes
+ self.api_data['md5'] = fields[4].strip().lower().replace("\n\n", "") # md5
+
+ def handleFree(self):
+ self.loadAPIData()
+ self.pyfile.name = self.api_data['filename']
+ self.pyfile.size = int(self.api_data['size'])
+
+ self.html = self.load(self.pyfile.url, cookies=True) # refer, stuff
+ self.setWait(3)
+ self.wait()
+
+ self.html = self.load("%s/free/" % self.pyfile.url, post={"dl_free": "1", "choice": "free"}, decode=True)
+ self.checkErrors()
+
+ m = re.search(r'var wait=(\d+);', self.html)
+
+ recaptcha = ReCaptcha(self)
+ for _ in xrange(5):
+ challenge, response = recaptcha.challenge("6LdatrsSAAAAAHZrB70txiV5p-8Iv8BtVxlTtjKX")
+ self.setWait(int(m.group(1)) if m else 30)
+ response = self.load("%s/free/captcha/%d" % (self.pyfile.url, int(time() * 1000)), post={
+ 'dl_free': '1',
+ 'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response})
+
+ if not response == '0':
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.invalidCaptcha()
+ self.fail("No valid captcha solution received")
+
+ download_url = response.decode("base64")
+ self.logDebug(download_url)
+ if not download_url.startswith("http://"):
+ self.parseError("download url")
+
+ self.wait()
+ self.download(download_url)
+ # check download
+ check = self.checkDownload({
+ "cookie": re.compile(r'<div id="dl_failure"'),
+ "fail": re.compile(r"<title>Share-Online")
+ })
+ if check == "cookie":
+ self.invalidCaptcha()
+ self.retry(5, 60, "Cookie failure")
+ elif check == "fail":
+ self.invalidCaptcha()
+ self.retry(5, 5 * 60, "Download failed")
+ else:
+ self.correctCaptcha()
+
+ def handlePremium(self): #: should be working better loading (account) api internally
+ self.account.getAccountInfo(self.user, True)
+ src = self.load("http://api.share-online.biz/account.php",
+ {"username": self.user, "password": self.account.accounts[self.user]['password'],
+ "act": "download", "lid": self.file_id})
+
+ self.api_data = dlinfo = {}
+ for line in src.splitlines():
+ key, value = line.split(": ")
+ dlinfo[key.lower()] = value
+
+ self.logDebug(dlinfo)
+ if not dlinfo['status'] == "online":
+ self.offline()
+ else:
+ self.pyfile.name = dlinfo['name']
+ self.pyfile.size = int(dlinfo['size'])
+
+ dlLink = dlinfo['url']
+ if dlLink == "server_under_maintenance":
+ self.tempOffline()
+ else:
+ self.multiDL = True
+ self.download(dlLink)
+
+ def checkErrors(self):
+ m = re.search(r"/failure/(.*?)/1", self.req.lastEffectiveURL)
+ if m is None:
+ return
+
+ err = m.group(1)
+ m = re.search(self.ERROR_INFO_PATTERN, self.html)
+ msg = m.group(1) if m else ""
+ self.logError(err, msg or "Unknown error occurred")
+
+ if err == "invalid":
+ self.fail(msg or "File not available")
+ elif err in ("freelimit", "size", "proxy"):
+ self.fail(msg or "Premium account needed")
+ else:
+ if err in 'server':
+ self.setWait(600, False)
+ elif err in 'expired':
+ self.setWait(30, False)
+ else:
+ self.setWait(300, True)
+
+ self.wait()
+ self.retry(max_tries=25, reason=msg)
diff --git a/pyload/plugins/hoster/ShareplaceCom.py b/pyload/plugins/hoster/ShareplaceCom.py
new file mode 100644
index 000000000..60bb596cc
--- /dev/null
+++ b/pyload/plugins/hoster/ShareplaceCom.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.Hoster import Hoster
+
+
+class ShareplaceCom(Hoster):
+ __name__ = "ShareplaceCom"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'(http://)?(?:www\.)?shareplace\.(com|org)/\?[a-zA-Z0-9]+'
+
+ __description__ = """Shareplace.com hoster plugin"""
+ __author_name__ = "ACCakut"
+ __author_mail__ = None
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.prepare()
+ self.download(self.get_file_url())
+
+ def prepare(self):
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+
+ wait_time = self.get_waiting_time()
+ self.setWait(wait_time)
+ self.logDebug("%s: Waiting %d seconds." % (self.__name__, wait_time))
+ self.wait()
+
+ def get_waiting_time(self):
+ if not self.html:
+ self.download_html()
+
+ #var zzipitime = 15;
+ m = re.search(r'var zzipitime = (\d+);', self.html)
+ if m:
+ sec = int(m.group(1))
+ else:
+ sec = 0
+
+ return sec
+
+ def download_html(self):
+ url = re.sub("shareplace.com\/\?", "shareplace.com//index1.php/?a=", self.pyfile.url)
+ self.html = self.load(url, decode=True)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = re.search(r"var beer = '(.*?)';", self.html)
+ if url:
+ url = url.group(1)
+ url = unquote(
+ url.replace("http://http:/", "").replace("vvvvvvvvv", "").replace("lllllllll", "").replace(
+ "teletubbies", ""))
+ self.logDebug("URL: %s" % url)
+ return url
+ else:
+ self.fail("absolute filepath could not be found. offline? ")
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ return re.search("<title>\s*(.*?)\s*</title>", self.html).group(1)
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r"HTTP Status 404", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/ShragleCom.py b/pyload/plugins/hoster/ShragleCom.py
new file mode 100644
index 000000000..0ec93fcdc
--- /dev/null
+++ b/pyload/plugins/hoster/ShragleCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class ShragleCom(DeadHoster):
+ __name__ = "ShragleCom"
+ __type__ = "hoster"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(?:www\.)?(cloudnator|shragle).com/files/(?P<ID>.*?)/'
+
+ __description__ = """Cloudnator.com (Shragle.com) hoster plugin"""
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+
+getInfo = create_getInfo(ShragleCom)
diff --git a/pyload/plugins/hoster/SimplyPremiumCom.py b/pyload/plugins/hoster/SimplyPremiumCom.py
new file mode 100644
index 000000000..760b7ff1b
--- /dev/null
+++ b/pyload/plugins/hoster/SimplyPremiumCom.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from datetime import datetime, timedelta
+
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+
+
+class SimplyPremiumCom(Hoster):
+ __name__ = "SimplyPremiumCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://.*(simply-premium)\.com'
+
+ __description__ = """Simply-Premium.com hoster plugin"""
+ __author_name__ = "EvolutionClip"
+ __author_mail__ = "evolutionclip@live.de"
+
+
+ def setup(self):
+ self.chunkLimit = 16
+ self.resumeDownload = False
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Simply-Premium.com")
+ self.fail("No Simply-Premium.com account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ for i in xrange(5):
+ page = self.load('http://www.simply-premium.com/premium.php?info&link=' + pyfile.url)
+ self.logDebug("JSON data: " + page)
+ if page != '':
+ break
+ else:
+ self.logInfo("Unable to get API data, waiting 1 minute and retry")
+ self.retry(5, 60, "Unable to get API data")
+
+ if '<valid>0</valid>' in page or (
+ "You are not allowed to download from this host" in page and self.premium):
+ self.account.relogin(self.user)
+ self.retry()
+ elif "NOTFOUND" in page:
+ self.offline()
+ elif "downloadlimit" in page:
+ self.logWarning("Reached maximum connctions")
+ self.retry(5, 60, "Reached maximum connctions")
+ elif "trafficlimit" in page:
+ self.logWarning("Reached daily limit for this host")
+ self.retry(1, secondsToMidnight(gmt=2), "Daily limit for this host reached")
+ elif "hostererror" in page:
+ self.logWarning("Hoster temporarily unavailable, waiting 1 minute and retry")
+ self.retry(5, 60, "Hoster is temporarily unavailable")
+ #page = json_loads(page)
+ #new_url = page.keys()[0]
+ #self.api_data = page[new_url]
+
+ try:
+ self.pyfile.name = re.search(r'<name>([^<]+)</name>', page).group(1)
+ except AttributeError:
+ self.pyfile.name = ""
+
+ try:
+ self.pyfile.size = re.search(r'<size>(\d+)</size>', page).group(1)
+ except AttributeError:
+ self.pyfile.size = 0
+
+ try:
+ new_url = re.search(r'<download>([^<]+)</download>', page).group(1)
+ except AttributeError:
+ new_url = 'http://www.simply-premium.com/premium.php?link=' + pyfile.url
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: " + new_url)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/SimplydebridCom.py b/pyload/plugins/hoster/SimplydebridCom.py
new file mode 100644
index 000000000..c6b03c124
--- /dev/null
+++ b/pyload/plugins/hoster/SimplydebridCom.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class SimplydebridCom(Hoster):
+ __name__ = "SimplydebridCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/sd.php/*'
+
+ __description__ = """Simply-debrid.com hoster plugin"""
+ __author_name__ = "Kagenoshin"
+ __author_mail__ = "kagenoshin@gmx.ch"
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "simply-debrid.com")
+ self.fail("No simply-debrid.com account provided")
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+
+ #fix the links for simply-debrid.com!
+ new_url = pyfile.url
+ new_url = new_url.replace("clz.to", "cloudzer.net/file")
+ new_url = new_url.replace("http://share-online", "http://www.share-online")
+ new_url = new_url.replace("ul.to", "uploaded.net/file")
+ new_url = new_url.replace("uploaded.com", "uploaded.net")
+ new_url = new_url.replace("filerio.com", "filerio.in")
+ new_url = new_url.replace("lumfile.com", "lumfile.se")
+ if('fileparadox' in new_url):
+ new_url = new_url.replace("http://", "https://")
+
+ if re.match(self.__pattern__, new_url):
+ new_url = new_url
+
+ self.logDebug("New URL: %s" % new_url)
+
+ if not re.match(self.__pattern__, new_url):
+ page = self.load('http://simply-debrid.com/api.php', get={'dl': new_url}) # +'&u='+self.user+'&p='+self.account.getAccountData(self.user)['password'])
+ if 'tiger Link' in page or 'Invalid Link' in page or ('API' in page and 'ERROR' in page):
+ self.fail('Unable to unrestrict link')
+ new_url = page
+
+ self.setWait(5)
+ self.wait()
+ self.logDebug("Unrestricted URL: " + new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"bad1": "No address associated with hostname", "bad2": "<html"})
+
+ if check == "bad1" or check == "bad2":
+ self.retry(24, 3 * 60, "Bad file downloaded")
diff --git a/pyload/plugins/hoster/SockshareCom.py b/pyload/plugins/hoster/SockshareCom.py
new file mode 100644
index 000000000..36e03a5ae
--- /dev/null
+++ b/pyload/plugins/hoster/SockshareCom.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from os import rename
+
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class SockshareCom(SimpleHoster):
+ __name__ = "SockshareCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?sockshare\.com/(mobile/)?(file|embed)/(?P<ID>\w+)'
+
+ __description__ = """Sockshare.com hoster plugin"""
+ __author_name__ = ("jeix", "stickell", "Walter Purcaro")
+ __author_mail__ = ("jeix@hasnomail.de", "l.stickell@yahoo.it", "vuolter@gmail.com")
+
+ FILE_INFO_PATTERN = r'site-content">\s*<h1>(?P<N>.+)<strong>\( (?P<S>[^)]+) \)</strong></h1>'
+ OFFLINE_PATTERN = r'>This file doesn\'t exist, or has been removed.<'
+ TEMP_OFFLINE_PATTERN = r'(>This content server has been temporarily disabled for upgrades|Try again soon\\. You can still download it below\\.<)'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.sockshare.com/file/\g<ID>')]
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+ self.chunkLimit = -1
+
+ def handleFree(self):
+ name = self.pyfile.name
+ link = self._getLink()
+ self.logDebug("Direct link: " + link)
+ self.download(link, disposition=True)
+ self.processName(name)
+
+ def _getLink(self):
+ hash_data = re.search(r'<input type="hidden" value="([a-z0-9]+)" name="hash">', self.html)
+ if not hash_data:
+ self.parseError("Unable to detect hash")
+
+ post_data = {"hash": hash_data.group(1), "confirm": "Continue+as+Free+User"}
+ self.html = self.load(self.pyfile.url, post=post_data)
+ if ">You have exceeded the daily stream limit for your country\\. You can wait until tomorrow" in self.html:
+ self.logWarning("You have exceeded your daily stream limit for today")
+ self.wait(secondsToMidnight(gmt=2), True)
+ elif re.search(self.TEMP_OFFLINE_PATTERN, self.html):
+ self.retry(wait_time=2 * 60 * 60, reason="Server temporarily offline") # 2 hours wait
+
+ patterns = (r'(/get_file\.php\?id=[A-Z0-9]+&key=[a-zA-Z0-9=]+&original=1)',
+ r'(/get_file\.php\?download=[A-Z0-9]+&key=[a-z0-9]+)',
+ r'(/get_file\.php\?download=[A-Z0-9]+&key=[a-z0-9]+&original=1)',
+ r'<a href="/gopro\.php">Tired of ads and waiting\? Go Pro!</a>[\t\n\rn ]+</div>[\t\n\rn ]+<a href="(/.*?)"')
+ for pattern in patterns:
+ link = re.search(pattern, self.html)
+ if link:
+ break
+ else:
+ link = re.search(r"playlist: '(/get_file\.php\?stream=[a-zA-Z0-9=]+)'", self.html)
+ if link:
+ self.html = self.load("http://www.sockshare.com" + link.group(1))
+ link = re.search(r'media:content url="(http://.*?)"', self.html)
+ if link is None:
+ link = re.search(r'\"(http://media\\-b\\d+\\.sockshare\\.com/download/\\d+/.*?)\"', self.html)
+ else:
+ self.parseError('Unable to detect a download link')
+
+ link = link.group(1).replace("&amp;", "&")
+ if link.startswith("http://"):
+ return link
+ else:
+ return "http://www.sockshare.com" + link
+
+ def processName(self, name_old):
+ name = self.pyfile.name
+ if name <= name_old:
+ return
+ name_new = re.sub(r'\.[^.]+$', "", name_old) + name[len(name_old):]
+ filename = self.lastDownload
+ self.pyfile.name = name_new
+ rename(filename, filename.rsplit(name)[0] + name_new)
+ self.logInfo("%(name)s renamed to %(newname)s" % {"name": name, "newname": name_new})
+
+
+getInfo = create_getInfo(SockshareCom)
diff --git a/pyload/plugins/hoster/SoundcloudCom.py b/pyload/plugins/hoster/SoundcloudCom.py
new file mode 100644
index 000000000..afe8eaf62
--- /dev/null
+++ b/pyload/plugins/hoster/SoundcloudCom.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+import pycurl
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class SoundcloudCom(Hoster):
+ __name__ = "SoundcloudCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:www\.)?soundcloud\.com/(?P<UID>.*?)/(?P<SID>.*)'
+
+ __description__ = """SoundCloud.com hoster plugin"""
+ __author_name__ = "Peekayy"
+ __author_mail__ = "peekayy.dev@gmail.com"
+
+
+ def process(self, pyfile):
+ # default UserAgent of HTTPRequest fails for this hoster so we use this one
+ self.req.http.c.setopt(pycurl.USERAGENT, 'Mozilla/5.0')
+ page = self.load(pyfile.url)
+ m = re.search(r'<div class="haudio.*?large.*?" data-sc-track="(?P<ID>[0-9]*)"', page)
+ songId = clientId = ""
+ if m:
+ songId = m.group("ID")
+ if len(songId) <= 0:
+ self.logError("Could not find song id")
+ self.offline()
+ else:
+ m = re.search(r'"clientID":"(?P<CID>.*?)"', page)
+ if m:
+ clientId = m.group("CID")
+
+ if len(clientId) <= 0:
+ clientId = "b45b1aa10f1ac2941910a7f0d10f8e28"
+
+ m = re.search(r'<em itemprop="name">\s(?P<TITLE>.*?)\s</em>', page)
+ if m:
+ pyfile.name = m.group("TITLE") + ".mp3"
+ else:
+ pyfile.name = re.match(self.__pattern__, pyfile.url).group("SID") + ".mp3"
+
+ # url to retrieve the actual song url
+ page = self.load("https://api.sndcdn.com/i1/tracks/%s/streams" % songId, get={"client_id": clientId})
+ # getting streams
+ # for now we choose the first stream found in all cases
+ # it could be improved if relevant for this hoster
+ streams = [
+ (result.group("QUALITY"), result.group("URL"))
+ for result in re.finditer(r'"(?P<QUALITY>.*?)":"(?P<URL>.*?)"', page)
+ ]
+ self.logDebug("Found Streams", streams)
+ self.logDebug("Downloading", streams[0][0], streams[0][1])
+ self.download(streams[0][1])
diff --git a/pyload/plugins/hoster/SpeedLoadOrg.py b/pyload/plugins/hoster/SpeedLoadOrg.py
new file mode 100644
index 000000000..74753b029
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedLoadOrg.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SpeedLoadOrg(DeadHoster):
+ __name__ = "SpeedLoadOrg"
+ __type__ = "hoster"
+ __version__ = "1.02"
+
+ __pattern__ = r'http://(?:www\.)?speedload\.org/(?P<ID>\w+)'
+
+ __description__ = """Speedload.org hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+getInfo = create_getInfo(SpeedLoadOrg)
diff --git a/pyload/plugins/hoster/SpeedfileCz.py b/pyload/plugins/hoster/SpeedfileCz.py
new file mode 100644
index 000000000..85df88d85
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedfileCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SpeedfileCz(DeadHoster):
+ __name__ = "SpeedFileCz"
+ __type__ = "hoster"
+ __version__ = "0.32"
+
+ __pattern__ = r'http://(?:www\.)?speedfile.cz/.*'
+
+ __description__ = """Speedfile.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(SpeedfileCz)
diff --git a/pyload/plugins/hoster/SpeedyshareCom.py b/pyload/plugins/hoster/SpeedyshareCom.py
new file mode 100644
index 000000000..ed6fc443f
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedyshareCom.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Testlink:
+# http://speedy.sh/ep2qY/Zapp-Brannigan.jpg
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class SpeedyshareCom(SimpleHoster):
+ __name__ = "SpeedyshareCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(www\.)?(speedyshare.com|speedy.sh)/.*"
+ __version__ = "0.01"
+ __description__ = """speedyshare.com hoster plugin"""
+ __author_name__ = ("zapp-brannigan")
+ __author_mail__ = ("fuerst.reinje@web.de")
+
+ FILE_NAME_PATTERN = r'class=downloadfilename>(?P<N>.*)</span></td>'
+ FILE_SIZE_PATTERN = r'class=sizetagtext>(?P<S>.*) (?P<U>[kKmM]?[iI]?[bB]?)</div>'
+ LINK_PATTERN = r'<a href=\'(.*)\'><img src=/gf/slowdownload.png alt=\'Slow Download\' border=0'
+ FILE_OFFLINE_PATTERN = r'class=downloadfilenamenotfound>.*</span>'
+ BASE_URL = 'www.speedyshare.com'
+
+ def setup(self):
+ self.multiDL = False
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ try:
+ dl_link = re.search(self.LINK_PATTERN, self.html).group(1)
+ self.logDebug("Link: " + dl_link)
+ except:
+ self.parseError("Unable to find download link")
+ self.download(self.BASE_URL + dl_link, disposition=True)
+ check = self.checkDownload({"is_html": re.compile("html")})
+ if check == "is_html":
+ self.fail("The downloaded file is html, maybe the plugin is out of date")
+
+
+getInfo = create_getInfo(SpeedyshareCom)
diff --git a/pyload/plugins/hoster/StreamCz.py b/pyload/plugins/hoster/StreamCz.py
new file mode 100644
index 000000000..7b20049be
--- /dev/null
+++ b/pyload/plugins/hoster/StreamCz.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+
+ html = getURL(url)
+ if re.search(StreamCz.OFFLINE_PATTERN, html):
+ # File offline
+ result.append((url, 0, 1, url))
+ else:
+ result.append((url, 0, 2, url))
+ yield result
+
+
+class StreamCz(Hoster):
+ __name__ = "StreamCz"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'https?://(?:www\.)?stream\.cz/[^/]+/\d+.*'
+
+ __description__ = """Stream.cz hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<link rel="video_src" href="http://www.stream.cz/\w+/(\d+)-([^"]+)" />'
+ OFFLINE_PATTERN = r'<h1 class="commonTitle">Str.nku nebylo mo.n. nal.zt \(404\)</h1>'
+
+ CDN_PATTERN = r'<param name="flashvars" value="[^"]*&id=(?P<ID>\d+)(?:&cdnLQ=(?P<cdnLQ>\d*))?(?:&cdnHQ=(?P<cdnHQ>\d*))?(?:&cdnHD=(?P<cdnHD>\d*))?&'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.search(self.OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ m = re.search(self.CDN_PATTERN, self.html)
+ if m is None:
+ self.fail("Parse error (CDN)")
+ cdn = m.groupdict()
+ self.logDebug(cdn)
+ for cdnkey in ("cdnHD", "cdnHQ", "cdnLQ"):
+ if cdnkey in cdn and cdn[cdnkey] > '':
+ cdnid = cdn[cdnkey]
+ break
+ else:
+ self.fail("Stream URL not found")
+
+ m = re.search(self.FILE_NAME_PATTERN, self.html)
+ if m is None:
+ self.fail("Parse error (NAME)")
+ pyfile.name = "%s-%s.%s.mp4" % (m.group(2), m.group(1), cdnkey[-2:])
+
+ download_url = "http://cdn-dispatcher.stream.cz/?id=" + cdnid
+ self.logInfo("STREAM (%s): %s" % (cdnkey[-2:], download_url))
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/StreamcloudEu.py b/pyload/plugins/hoster/StreamcloudEu.py
new file mode 100644
index 000000000..0e36a047c
--- /dev/null
+++ b/pyload/plugins/hoster/StreamcloudEu.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import sleep
+
+from pyload.network.HTTPRequest import HTTPRequest
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class StreamcloudEu(XFileSharingPro):
+ __name__ = "StreamcloudEu"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?streamcloud\.eu/\S+'
+
+ __description__ = """Streamcloud.eu hoster plugin"""
+ __author_name__ = "seoester"
+ __author_mail__ = "seoester@googlemail.com"
+
+ HOSTER_NAME = "streamcloud.eu"
+
+ LINK_PATTERN = r'file: "(http://(stor|cdn)\d+\.streamcloud.eu:?\d*/.*/video\.(mp4|flv))",'
+
+
+ def setup(self):
+ super(StreamcloudEu, self).setup()
+ self.multiDL = True
+
+ def getDownloadLink(self):
+ m = re.search(self.LINK_PATTERN, self.html, re.S)
+ if m:
+ return m.group(1)
+
+ for i in xrange(5):
+ self.logDebug("Getting download link: #%d" % i)
+ data = self.getPostParameters()
+ httpRequest = HTTPRequest(options=self.req.options)
+ httpRequest.cj = self.req.cj
+ sleep(10)
+ self.html = httpRequest.load(self.pyfile.url, post=data, referer=False, cookies=True, decode=True)
+ self.header = httpRequest.header
+
+ m = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if m:
+ break
+
+ m = re.search(self.LINK_PATTERN, self.html, re.S)
+ if m:
+ break
+
+ else:
+ if self.errmsg and 'captcha' in self.errmsg:
+ self.fail("No valid captcha code entered")
+ else:
+ self.fail("Download link not found")
+
+ return m.group(1)
+
+ def getPostParameters(self):
+ for i in xrange(3):
+ if not self.errmsg:
+ self.checkErrors()
+
+ if hasattr(self, "FORM_PATTERN"):
+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
+ else:
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+
+ if not inputs:
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry()
+ else:
+ self.parseError("Form not found")
+
+ self.logDebug(self.HOSTER_NAME, inputs)
+
+ if 'op' in inputs and inputs['op'] in ("download1", "download2", "download3"):
+ if "password" in inputs:
+ if self.passwords:
+ inputs['password'] = self.passwords.pop(0)
+ else:
+ self.fail("No or invalid passport")
+
+ if not self.premium:
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1)) + 1
+ self.setWait(wait_time, False)
+ else:
+ wait_time = 0
+
+ self.captcha = self.handleCaptcha(inputs)
+
+ if wait_time:
+ self.wait()
+
+ self.errmsg = None
+ self.logDebug("getPostParameters {0}".format(i))
+ return inputs
+
+ else:
+ inputs['referer'] = self.pyfile.url
+
+ if self.premium:
+ inputs['method_premium'] = "Premium Download"
+ if 'method_free' in inputs:
+ del inputs['method_free']
+ else:
+ inputs['method_free'] = "Free Download"
+ if 'method_premium' in inputs:
+ del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post=inputs, ref=False)
+ self.errmsg = None
+
+ else:
+ self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))
+
+
+getInfo = create_getInfo(StreamcloudEu)
diff --git a/pyload/plugins/hoster/TurbobitNet.py b/pyload/plugins/hoster/TurbobitNet.py
new file mode 100644
index 000000000..1fbdf9e87
--- /dev/null
+++ b/pyload/plugins/hoster/TurbobitNet.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+
+import random
+import re
+import time
+
+from Crypto.Cipher import ARC4
+from binascii import hexlify, unhexlify
+from pycurl import HTTPHEADER
+from urllib import quote
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+
+
+class TurbobitNet(SimpleHoster):
+ __name__ = "TurbobitNet"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?(turbobit.net|unextfiles.com)/(?!download/folder/)(?:download/free/)?(?P<ID>\w+).*'
+
+ __description__ = """Turbobit.net plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r"<span class='file-icon1[^>]*>(?P<N>[^<]+)</span>\s*\((?P<S>[^\)]+)\)\s*</h1>" #: long filenames are shortened
+ FILE_NAME_PATTERN = r'<meta name="keywords" content="\s+(?P<N>[^,]+)' #: full name but missing on page2
+ OFFLINE_PATTERN = r'<h2>File Not Found</h2>|html\(\'File (?:was )?not found'
+
+ FILE_URL_REPLACEMENTS = [(r"http://(?:www\.)?(turbobit.net|unextfiles.com)/(?:download/free/)?(?P<ID>\w+).*",
+ "http://turbobit.net/\g<ID>.html")]
+ SH_COOKIES = [(".turbobit.net", "user_lang", "en")]
+
+ LINK_PATTERN = r'(?P<url>/download/redirect/[^"\']+)'
+ LIMIT_WAIT_PATTERN = r'<div id="time-limit-text">\s*.*?<span id=\'timeout\'>(\d+)</span>'
+ CAPTCHA_KEY_PATTERN = r'src="http://api\.recaptcha\.net/challenge\?k=([^"]+)"'
+ CAPTCHA_SRC_PATTERN = r'<img alt="Captcha" src="(.*?)"'
+
+
+ def handleFree(self):
+ self.url = "http://turbobit.net/download/free/%s" % self.file_info['ID']
+ self.html = self.load(self.url)
+
+ rtUpdate = self.getRtUpdate()
+
+ self.solveCaptcha()
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.url = self.getDownloadUrl(rtUpdate)
+
+ self.wait()
+ self.html = self.load(self.url)
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With:"])
+ self.downloadFile()
+
+ def solveCaptcha(self):
+ for _ in xrange(5):
+ m = re.search(self.LIMIT_WAIT_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1))
+ self.wait(wait_time, wait_time > 60)
+ self.retry()
+
+ action, inputs = self.parseHtmlForm("action='#'")
+ if not inputs:
+ self.parseError("captcha form")
+ self.logDebug(inputs)
+
+ if inputs['captcha_type'] == 'recaptcha':
+ recaptcha = ReCaptcha(self)
+ m = re.search(self.CAPTCHA_KEY_PATTERN, self.html)
+ captcha_key = m.group(1) if m else '6LcTGLoSAAAAAHCWY9TTIrQfjUlxu6kZlTYP50_c'
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(
+ captcha_key)
+ else:
+ m = re.search(self.CAPTCHA_SRC_PATTERN, self.html)
+ if m is None:
+ self.parseError('captcha')
+ captcha_url = m.group(1)
+ inputs['captcha_response'] = self.decryptCaptcha(captcha_url)
+
+ self.logDebug(inputs)
+ self.html = self.load(self.url, post=inputs)
+
+ if not "<div class='download-timer-header'>" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("Invalid captcha")
+
+ def getRtUpdate(self):
+ rtUpdate = self.getStorage("rtUpdate")
+ if not rtUpdate:
+ if self.getStorage("version") != self.__version__ or int(
+ self.getStorage("timestamp", 0)) + 86400000 < timestamp():
+ # that's right, we are even using jdownloader updates
+ rtUpdate = getURL("http://update0.jdownloader.org/pluginstuff/tbupdate.js")
+ rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
+ # but we still need to fix the syntax to work with other engines than rhino
+ rtUpdate = re.sub(r'for each\(var (\w+) in(\[[^\]]+\])\)\{',
+ r'zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];', rtUpdate)
+ rtUpdate = re.sub(r"for\((\w+)=", r"for(var \1=", rtUpdate)
+
+ self.logDebug("rtUpdate")
+ self.setStorage("rtUpdate", rtUpdate)
+ self.setStorage("timestamp", timestamp())
+ self.setStorage("version", self.__version__)
+ else:
+ self.logError("Unable to download, wait for update...")
+ self.tempOffline()
+
+ return rtUpdate
+
+ def getDownloadUrl(self, rtUpdate):
+ self.req.http.lastURL = self.url
+
+ m = re.search("(/\w+/timeout\.js\?\w+=)([^\"\'<>]+)", self.html)
+ url = "http://turbobit.net%s%s" % (m.groups() if m else (
+ '/files/timeout.js?ver=', ''.join(random.choice('0123456789ABCDEF') for _ in xrange(32))))
+ fun = self.load(url)
+
+ self.setWait(65, False)
+
+ for b in [1, 3]:
+ self.jscode = "var id = \'%s\';var b = %d;var inn = \'%s\';%sout" % (
+ self.file_info['ID'], b, quote(fun), rtUpdate)
+
+ try:
+ out = self.js.eval(self.jscode)
+ self.logDebug("URL", self.js.engine, out)
+ if out.startswith('/download/'):
+ return "http://turbobit.net%s" % out.strip()
+ except Exception, e:
+ self.logError(e)
+ else:
+ if self.retries >= 2:
+ # retry with updated js
+ self.delStorage("rtUpdate")
+ self.retry()
+
+ def decrypt(self, data):
+ cipher = ARC4.new(hexlify('E\x15\xa1\x9e\xa3M\xa0\xc6\xa0\x84\xb6H\x83\xa8o\xa0'))
+ return unhexlify(cipher.encrypt(unhexlify(data)))
+
+ def getLocalTimeString(self):
+ lt = time.localtime()
+ tz = time.altzone if lt.tm_isdst else time.timezone
+ return "%s GMT%+03d%02d" % (time.strftime("%a %b %d %Y %H:%M:%S", lt), -tz // 3600, tz % 3600)
+
+ def handlePremium(self):
+ self.logDebug("Premium download as user %s" % self.user)
+ self.html = self.load(self.pyfile.url) # Useless in 0.5
+ self.downloadFile()
+
+ def downloadFile(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("download link")
+ self.url = "http://turbobit.net" + m.group('url')
+ self.logDebug(self.url)
+ self.download(self.url)
+
+
+getInfo = create_getInfo(TurbobitNet)
diff --git a/pyload/plugins/hoster/TurbouploadCom.py b/pyload/plugins/hoster/TurbouploadCom.py
new file mode 100644
index 000000000..eb5978145
--- /dev/null
+++ b/pyload/plugins/hoster/TurbouploadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class TurbouploadCom(DeadHoster):
+ __name__ = "TurbouploadCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?turboupload.com/(\w+).*'
+
+ __description__ = """Turboupload.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+getInfo = create_getInfo(TurbouploadCom)
diff --git a/pyload/plugins/hoster/TusfilesNet.py b/pyload/plugins/hoster/TusfilesNet.py
new file mode 100644
index 000000000..0e01ec805
--- /dev/null
+++ b/pyload/plugins/hoster/TusfilesNet.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class TusfilesNet(XFileSharingPro):
+ __name__ = "TusfilesNet"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?tusfiles\.net/(?P<ID>\w+)'
+
+ __description__ = """Tusfiles.net hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ HOSTER_NAME = "tusfiles.net"
+
+ FILE_INFO_PATTERN = r'\](?P<N>.+) - (?P<S>[\d.]+) (?P<U>\w+)\['
+ OFFLINE_PATTERN = r'>File Not Found|<Title>TusFiles - Fast Sharing Files!'
+
+ SH_COOKIES = [(".tusfiles.net", "lang", "english")]
+
+
+ def setup(self):
+ self.multiDL = False
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+getInfo = create_getInfo(TusfilesNet)
diff --git a/pyload/plugins/hoster/TwoSharedCom.py b/pyload/plugins/hoster/TwoSharedCom.py
new file mode 100644
index 000000000..108d31c6f
--- /dev/null
+++ b/pyload/plugins/hoster/TwoSharedCom.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class TwoSharedCom(SimpleHoster):
+ __name__ = "TwoSharedCom"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?2shared.com/(account/)?(download|get|file|document|photo|video|audio)/.*'
+
+ __description__ = """2Shared.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<h1>(?P<N>.*)</h1>'
+ FILE_SIZE_PATTERN = r'<span class="dtitle">File size:</span>\s*(?P<S>[0-9,.]+) (?P<U>[kKMG])i?B'
+ OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted\.'
+
+ LINK_PATTERN = r"window.location ='([^']+)';"
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError('Download link')
+ link = m.group(1)
+ self.logDebug("Download URL %s" % link)
+
+ self.download(link)
+
+
+getInfo = create_getInfo(TwoSharedCom)
diff --git a/pyload/plugins/hoster/UlozTo.py b/pyload/plugins/hoster/UlozTo.py
new file mode 100644
index 000000000..2f1fdc595
--- /dev/null
+++ b/pyload/plugins/hoster/UlozTo.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+def convertDecimalPrefix(m):
+ # decimal prefixes used in filesize and traffic
+ return ("%%.%df" % {'k': 3, 'M': 6, 'G': 9}[m.group(2)] % float(m.group(1))).replace('.', '')
+
+
+class UlozTo(SimpleHoster):
+ __name__ = "UlozTo"
+ __type__ = "hoster"
+ __version__ = "0.98"
+
+ __pattern__ = r'http://(?:www\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj.cz|zachowajto.pl)/(?:live/)?(?P<id>\w+/[^/?]*)'
+
+ __description__ = """Uloz.to hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r'<p>File <strong>(?P<N>[^<]+)</strong> is password protected</p>'
+ FILE_NAME_PATTERN = r'<title>(?P<N>[^<]+) \| Uloz.to</title>'
+ FILE_SIZE_PATTERN = r'<span id="fileSize">.*?(?P<S>[0-9.]+\s[kMG]?B)</span>'
+ OFFLINE_PATTERN = r'<title>404 - Page not found</title>|<h1 class="h1">File (has been deleted|was banned)</h1>'
+
+ FILE_SIZE_REPLACEMENTS = [('([0-9.]+)\s([kMG])B', convertDecimalPrefix)]
+ FILE_URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "www.ulozto.net")]
+
+ ADULT_PATTERN = r'<form action="(?P<link>[^\"]*)" method="post" id="frm-askAgeForm">'
+ PASSWD_PATTERN = r'<div class="passwordProtectedFile">'
+ VIPLINK_PATTERN = r'<a href="[^"]*\?disclaimer=1" class="linkVip">'
+ FREE_URL_PATTERN = r'<div class="freeDownloadForm"><form action="([^"]+)"'
+ PREMIUM_URL_PATTERN = r'<div class="downloadForm"><form action="([^"]+)"'
+ TOKEN_PATTERN = r'<input type="hidden" name="_token_" id="[^\"]*" value="(?P<token>[^\"]*)" />'
+
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ pyfile.url = re.sub(r"(?<=http://)([^/]+)", "www.ulozto.net", pyfile.url)
+ self.html = self.load(pyfile.url, decode=True, cookies=True)
+
+ if re.search(self.ADULT_PATTERN, self.html):
+ self.logInfo("Adult content confirmation needed. Proceeding..")
+
+ m = re.search(self.TOKEN_PATTERN, self.html)
+ if m is None:
+ self.parseError('TOKEN')
+ token = m.group(1)
+
+ self.html = self.load(pyfile.url, get={"do": "askAgeForm-submit"},
+ post={"agree": "Confirm", "_token_": token}, cookies=True)
+
+ passwords = self.getPassword().splitlines()
+ while self.PASSWD_PATTERN in self.html:
+ if passwords:
+ password = passwords.pop(0)
+ self.logInfo("Password protected link, trying " + password)
+ self.html = self.load(pyfile.url, get={"do": "passwordProtectedForm-submit"},
+ post={"password": password, "password_send": 'Send'}, cookies=True)
+ else:
+ self.fail("No or incorrect password")
+
+ if re.search(self.VIPLINK_PATTERN, self.html):
+ self.html = self.load(pyfile.url, get={"disclaimer": "1"})
+
+ self.file_info = self.getFileInfo()
+
+ if self.premium and self.checkTrafficLeft():
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ self.doCheckDownload()
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('id="frm-downloadDialog-freeDownloadForm"')
+ if not action or not inputs:
+ self.parseError("free download form")
+
+ self.logDebug('inputs.keys() = ' + str(inputs.keys()))
+ # get and decrypt captcha
+ if all(key in inputs for key in ("captcha_value", "captcha_id", "captcha_key")):
+ # Old version - last seen 9.12.2013
+ self.logDebug('Using "old" version')
+
+ captcha_value = self.decryptCaptcha("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id'])
+ self.logDebug('CAPTCHA ID: ' + inputs['captcha_id'] + ", CAPTCHA VALUE: " + captcha_value)
+
+ inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value})
+
+ elif all(key in inputs for key in ("captcha_value", "timestamp", "salt", "hash")):
+ # New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013
+ self.logDebug('Using "new" version')
+
+ xapca = self.load("http://www.ulozto.net/reloadXapca.php", get={"rnd": str(int(time.time()))})
+ self.logDebug('xapca = ' + str(xapca))
+
+ data = json_loads(xapca)
+ captcha_value = self.decryptCaptcha(str(data['image']))
+ self.logDebug("CAPTCHA HASH: " + data['hash'] + ", CAPTCHA SALT: " + str(data['salt']) + ", CAPTCHA VALUE: " + captcha_value)
+
+ inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value})
+ else:
+ self.parseError("CAPTCHA form changed")
+
+ self.multiDL = True
+ self.download("http://www.ulozto.net" + action, post=inputs, cookies=True, disposition=True)
+
+ def handlePremium(self):
+ self.download(self.pyfile.url + "?do=directDownload", disposition=True)
+ #parsed_url = self.findDownloadURL(premium=True)
+ #self.download(parsed_url, post={"download": "Download"})
+
+ def findDownloadURL(self, premium=False):
+ msg = "%s link" % ("Premium" if premium else "Free")
+ m = re.search(self.PREMIUM_URL_PATTERN if premium else self.FREE_URL_PATTERN, self.html)
+ if m is None:
+ self.parseError(msg)
+ parsed_url = "http://www.ulozto.net" + m.group(1)
+ self.logDebug("%s: %s" % (msg, parsed_url))
+ return parsed_url
+
+ def doCheckDownload(self):
+ check = self.checkDownload({
+ "wrong_captcha": re.compile(r'<ul class="error">\s*<li>Error rewriting the text.</li>'),
+ "offline": re.compile(self.OFFLINE_PATTERN),
+ "passwd": self.PASSWD_PATTERN,
+ "server_error": 'src="http://img.ulozto.cz/error403/vykricnik.jpg"', # paralell dl, server overload etc.
+ "not_found": "<title>UloÅŸ.to</title>"
+ })
+
+ if check == "wrong_captcha":
+ #self.delStorage("captcha_id")
+ #self.delStorage("captcha_text")
+ self.invalidCaptcha()
+ self.retry(reason="Wrong captcha code")
+ elif check == "offline":
+ self.offline()
+ elif check == "passwd":
+ self.fail("Wrong password")
+ elif check == "server_error":
+ self.logError("Server error, try downloading later")
+ self.multiDL = False
+ self.wait(1 * 60 * 60, True)
+ self.retry()
+ elif check == "not_found":
+ self.fail("Server error - file not downloadable")
+
+
+getInfo = create_getInfo(UlozTo)
diff --git a/pyload/plugins/hoster/UloziskoSk.py b/pyload/plugins/hoster/UloziskoSk.py
new file mode 100644
index 000000000..f78a6e29a
--- /dev/null
+++ b/pyload/plugins/hoster/UloziskoSk.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UloziskoSk(SimpleHoster):
+ __name__ = "UloziskoSk"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://(?:www\.)?ulozisko.sk/.*'
+
+ __description__ = """Ulozisko.sk hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'<div class="down1">(?P<N>[^<]+)</div>'
+ FILE_SIZE_PATTERN = ur'Veğkosť súboru: <strong>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</strong><br />'
+ OFFLINE_PATTERN = ur'<span class = "red">ZadanÜ súbor neexistuje z jedného z nasledujúcich dÎvodov:</span>'
+
+ LINK_PATTERN = r'<form name = "formular" action = "([^"]+)" method = "post">'
+ ID_PATTERN = r'<input type = "hidden" name = "id" value = "([^"]+)" />'
+ CAPTCHA_PATTERN = r'<img src="(/obrazky/obrazky.php\?fid=[^"]+)" alt="" />'
+ IMG_PATTERN = ur'<strong>PRE ZVÄČŠENIE KLIKNITE NA OBRÁZOK</strong><br /><a href = "([^"]+)">'
+
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ m = re.search(self.IMG_PATTERN, self.html)
+ if m:
+ url = "http://ulozisko.sk" + m.group(1)
+ self.download(url)
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError('URL')
+ parsed_url = 'http://www.ulozisko.sk' + m.group(1)
+
+ m = re.search(self.ID_PATTERN, self.html)
+ if m is None:
+ self.parseError('ID')
+ id = m.group(1)
+
+ self.logDebug('URL:' + parsed_url + ' ID:' + id)
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.parseError('CAPTCHA')
+ captcha_url = 'http://www.ulozisko.sk' + m.group(1)
+
+ captcha = self.decryptCaptcha(captcha_url, cookies=True)
+
+ self.logDebug('CAPTCHA_URL:' + captcha_url + ' CAPTCHA:' + captcha)
+
+ self.download(parsed_url, post={
+ "antispam": captcha,
+ "id": id,
+ "name": self.pyfile.name,
+ "but": "++++STIAHNI+S%DABOR++++"
+ })
+
+
+getInfo = create_getInfo(UloziskoSk)
diff --git a/pyload/plugins/hoster/UnibytesCom.py b/pyload/plugins/hoster/UnibytesCom.py
new file mode 100644
index 000000000..1541265d9
--- /dev/null
+++ b/pyload/plugins/hoster/UnibytesCom.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UnibytesCom(SimpleHoster):
+ __name__ = "UnibytesCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?unibytes\.com/[a-zA-Z0-9-._ ]{11}B'
+
+ __description__ = """UniBytes.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_INFO_PATTERN = r'<span[^>]*?id="fileName"[^>]*>(?P<N>[^>]+)</span>\s*\((?P<S>\d.*?)\)'
+
+ HOSTER_NAME = "unibytes.com"
+ WAIT_PATTERN = r'Wait for <span id="slowRest">(\d+)</span> sec'
+ LINK_PATTERN = r'<a href="([^"]+)">Download</a>'
+
+
+ def handleFree(self):
+ domain = "http://www." + self.HOSTER_NAME
+ action, post_data = self.parseHtmlForm('id="startForm"')
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ for _ in xrange(8):
+ self.logDebug(action, post_data)
+ self.html = self.load(domain + action, post=post_data)
+
+ m = re.search(r'location:\s*(\S+)', self.req.http.header, re.I)
+ if m:
+ url = m.group(1)
+ break
+
+ if '>Somebody else is already downloading using your IP-address<' in self.html:
+ self.wait(10 * 60, True)
+ self.retry()
+
+ if post_data['step'] == 'last':
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+
+ last_step = post_data['step']
+ action, post_data = self.parseHtmlForm('id="stepForm"')
+
+ if last_step == 'timer':
+ m = re.search(self.WAIT_PATTERN, self.html)
+ self.wait(int(m.group(1)) if m else 60, False)
+ elif last_step in ("captcha", "last"):
+ post_data['captcha'] = self.decryptCaptcha(domain + '/captcha.jpg')
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.logDebug('Download link: ' + url)
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.download(url)
+
+
+getInfo = create_getInfo(UnibytesCom)
diff --git a/pyload/plugins/hoster/UnrestrictLi.py b/pyload/plugins/hoster/UnrestrictLi.py
new file mode 100644
index 000000000..2cad6616f
--- /dev/null
+++ b/pyload/plugins/hoster/UnrestrictLi.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from datetime import datetime, timedelta
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+
+
+def secondsToMidnight(gmt=0):
+ now = datetime.utcnow() + timedelta(hours=gmt)
+ if now.hour is 0 and now.minute < 10:
+ midnight = now
+ else:
+ midnight = now + timedelta(days=1)
+ midnight = midnight.replace(hour=0, minute=10, second=0, microsecond=0)
+ return int((midnight - now).total_seconds())
+
+
+class UnrestrictLi(Hoster):
+ __name__ = "UnrestrictLi"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?(unrestrict|unr)\.li'
+
+ __description__ = """Unrestrict.li hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def setup(self):
+ self.chunkLimit = 16
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Unrestrict.li")
+ self.fail("No Unrestrict.li account provided")
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ for _ in xrange(5):
+ page = self.req.load('https://unrestrict.li/unrestrict.php',
+ post={'link': pyfile.url, 'domain': 'long'})
+ self.logDebug("JSON data: " + page)
+ if page != '':
+ break
+ else:
+ self.logInfo("Unable to get API data, waiting 1 minute and retry")
+ self.retry(5, 60, "Unable to get API data")
+
+ if 'Expired session' in page or ("You are not allowed to "
+ "download from this host" in page and self.premium):
+ self.account.relogin(self.user)
+ self.retry()
+ elif "File offline" in page:
+ self.offline()
+ elif "You are not allowed to download from this host" in page:
+ self.fail("You are not allowed to download from this host")
+ elif "You have reached your daily limit for this host" in page:
+ self.logWarning("Reached daily limit for this host")
+ self.retry(5, secondsToMidnight(gmt=2), "Daily limit for this host reached")
+ elif "ERROR_HOSTER_TEMPORARILY_UNAVAILABLE" in page:
+ self.logInfo("Hoster temporarily unavailable, waiting 1 minute and retry")
+ self.retry(5, 60, "Hoster is temporarily unavailable")
+ page = json_loads(page)
+ new_url = page.keys()[0]
+ self.api_data = page[new_url]
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: " + new_url)
+
+ if hasattr(self, 'api_data'):
+ self.setNameSize()
+
+ self.download(new_url, disposition=True)
+
+ if self.getConfig("history"):
+ self.load("https://unrestrict.li/history/&delete=all")
+ self.logInfo("Download history deleted")
+
+ def setNameSize(self):
+ if 'name' in self.api_data:
+ self.pyfile.name = self.api_data['name']
+ if 'size' in self.api_data:
+ self.pyfile.size = self.api_data['size']
diff --git a/pyload/plugins/hoster/UploadStationCom.py b/pyload/plugins/hoster/UploadStationCom.py
new file mode 100644
index 000000000..4671b2dc5
--- /dev/null
+++ b/pyload/plugins/hoster/UploadStationCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class UploadStationCom(DeadHoster):
+ __name__ = "UploadStationCom"
+ __type__ = "hoster"
+ __version__ = "0.52"
+
+ __pattern__ = r'http://(?:www\.)?uploadstation\.com/file/(?P<id>[A-Za-z0-9]+)'
+
+ __description__ = """UploadStation.com hoster plugin"""
+ __author_name__ = ("fragonib", "zoidberg")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es", "zoidberg@mujmail.cz")
+
+
+getInfo = create_getInfo(UploadStationCom)
diff --git a/pyload/plugins/hoster/UploadedTo.py b/pyload/plugins/hoster/UploadedTo.py
new file mode 100644
index 000000000..db620eea6
--- /dev/null
+++ b/pyload/plugins/hoster/UploadedTo.py
@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://ul.to/044yug9o
+# http://ul.to/gzfhd0xs
+
+import re
+
+from time import sleep
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.utils import html_unescape, parseFileSize
+
+
+key = "bGhGMkllZXByd2VEZnU5Y2NXbHhYVlZ5cEE1bkEzRUw=".decode('base64')
+
+
+def getID(url):
+ """ returns id from file url"""
+ m = re.match(UploadedTo.__pattern__, url)
+ return m.group('ID')
+
+
+def getAPIData(urls):
+ post = {"apikey": key}
+
+ idMap = {}
+
+ for i, url in enumerate(urls):
+ id = getID(url)
+ post['id_%s' % i] = id
+ idMap[id] = url
+
+ for _ in xrange(5):
+ api = unicode(getURL("http://uploaded.net/api/filemultiple", post=post, decode=False), 'iso-8859-1')
+ if api != "can't find request":
+ break
+ else:
+ sleep(3)
+
+ result = {}
+
+ if api:
+ for line in api.splitlines():
+ data = line.split(",", 4)
+ if data[1] in idMap:
+ result[data[1]] = (data[0], data[2], data[4], data[3], idMap[data[1]])
+
+ return result
+
+
+def parseFileInfo(self, url='', html=''):
+ if not html and hasattr(self, "html"):
+ html = self.html
+
+ name = url
+ size = 0
+ fileid = None
+
+ if re.search(self.OFFLINE_PATTERN, html):
+ # File offline
+ status = 1
+ else:
+ m = re.search(self.FILE_INFO_PATTERN, html)
+ if m:
+ name, fileid = html_unescape(m.group('N')), m.group('ID')
+ size = parseFileSize(m.group('S'))
+ status = 2
+ else:
+ status = 3
+
+ return name, size, status, fileid
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 80):
+ result = []
+
+ api = getAPIData(chunk)
+
+ for data in api.itervalues():
+ if data[0] == "online":
+ result.append((html_unescape(data[2]), data[1], 2, data[4]))
+
+ elif data[0] == "offline":
+ result.append((data[4], 0, 1, data[4]))
+
+ yield result
+
+
+class UploadedTo(Hoster):
+ __name__ = "UploadedTo"
+ __type__ = "hoster"
+ __version__ = "0.73"
+
+ __pattern__ = r'https?://(?:www\.)?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)'
+
+ __description__ = """Uploaded.net hoster plugin"""
+ __author_name__ = ("spoob", "mkaay", "zoidberg", "netpok", "stickell")
+ __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de", "zoidberg@mujmail.cz",
+ "netpok@gmail.com", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<a href="file/(?P<ID>\w+)" id="filename">(?P<N>[^<]+)</a> &nbsp;\s*<small[^>]*>(?P<S>[^<]+)</small>'
+ OFFLINE_PATTERN = r'<small class="cL">Error: 404</small>'
+ DL_LIMIT_PATTERN = r'You have reached the max. number of possible free downloads for this hour'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+ self.chunkLimit = 1 # critical problems with more chunks
+
+ self.fileID = getID(self.pyfile.url)
+ self.pyfile.url = "http://uploaded.net/file/%s" % self.fileID
+
+ def process(self, pyfile):
+ self.load("http://uploaded.net/language/en", just_header=True)
+
+ api = getAPIData([pyfile.url])
+
+ # TODO: fallback to parse from site, because api sometimes delivers wrong status codes
+
+ if not api:
+ self.logWarning("No response for API call")
+
+ self.html = unicode(self.load(pyfile.url, decode=False), 'iso-8859-1')
+ name, size, status, self.fileID = parseFileInfo(self)
+ self.logDebug(name, size, status, self.fileID)
+ if status == 1:
+ self.offline()
+ elif status == 2:
+ pyfile.name, pyfile.size = name, size
+ else:
+ self.fail('Parse error - file info')
+ elif api == 'Access denied':
+ self.fail(_("API key invalid"))
+
+ else:
+ if self.fileID not in api:
+ self.offline()
+
+ self.data = api[self.fileID]
+ if self.data[0] != "online":
+ self.offline()
+
+ pyfile.name = html_unescape(self.data[2])
+
+ # pyfile.name = self.get_file_name()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handlePremium(self):
+ info = self.account.getAccountInfo(self.user, True)
+ self.logDebug("%(name)s: Use Premium Account (%(left)sGB left)" % {"name": self.__name__,
+ "left": info['trafficleft'] / 1024 / 1024})
+ if int(self.data[1]) / 1024 > info['trafficleft']:
+ self.logInfo(_("%s: Not enough traffic left" % self.__name__))
+ self.account.empty(self.user)
+ self.resetAccount()
+ self.fail(_("Traffic exceeded"))
+
+ header = self.load("http://uploaded.net/file/%s" % self.fileID, just_header=True)
+ if "location" in header:
+ #Direct download
+ print "Direct Download: " + header['location']
+ self.download(header['location'])
+ else:
+ #Indirect download
+ self.html = self.load("http://uploaded.net/file/%s" % self.fileID)
+ m = re.search(r'<div class="tfree".*\s*<form method="post" action="(.*?)"', self.html)
+ if m is None:
+ self.fail("Download URL not m. Try to enable direct downloads.")
+ url = m.group(1)
+ print "Premium URL: " + url
+ self.download(url, post={})
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ if 'var free_enabled = false;' in self.html:
+ self.logError("Free-download capacities exhausted.")
+ self.retry(max_tries=24, wait_time=5 * 60)
+
+ m = re.search(r"Current waiting period: <span>(\d+)</span> seconds", self.html)
+ if m is None:
+ self.fail("File not downloadable for free users")
+ self.setWait(int(m.group(1)))
+
+ js = self.load("http://uploaded.net/js/download.js", decode=True)
+
+ challengeId = re.search(r'Recaptcha\.create\("([^"]+)', js)
+
+ url = "http://uploaded.net/io/ticket/captcha/%s" % self.fileID
+ downloadURL = ""
+
+ for _ in xrange(5):
+ re_captcha = ReCaptcha(self)
+ challenge, result = re_captcha.challenge(challengeId.group(1))
+ options = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": result}
+ self.wait()
+
+ result = self.load(url, post=options)
+ self.logDebug("result: %s" % result)
+
+ if "limit-size" in result:
+ self.fail("File too big for free download")
+ elif "limit-slot" in result: # Temporary restriction so just wait a bit
+ self.setWait(30 * 60, True)
+ self.wait()
+ self.retry()
+ elif "limit-parallel" in result:
+ self.fail("Cannot download in parallel")
+ elif self.DL_LIMIT_PATTERN in result: # limit-dl
+ self.setWait(3 * 60 * 60, True)
+ self.wait()
+ self.retry()
+ elif '"err":"captcha"' in result:
+ self.logError("ul.net captcha is disabled")
+ self.invalidCaptcha()
+ elif "type:'download'" in result:
+ self.correctCaptcha()
+ downloadURL = re.search("url:'([^']+)", result).group(1)
+ break
+ else:
+ self.fail("Unknown error '%s'" % result)
+
+ if not downloadURL:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.download(downloadURL, disposition=True)
+ check = self.checkDownload({"limit-dl": self.DL_LIMIT_PATTERN})
+ if check == "limit-dl":
+ self.setWait(3 * 60 * 60, True)
+ self.wait()
+ self.retry()
diff --git a/pyload/plugins/hoster/UploadheroCom.py b/pyload/plugins/hoster/UploadheroCom.py
new file mode 100644
index 000000000..f1f893c30
--- /dev/null
+++ b/pyload/plugins/hoster/UploadheroCom.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://uploadhero.co/dl/wQBRAVSM
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UploadheroCom(SimpleHoster):
+ __name__ = "UploadheroCom"
+ __type__ = "hoster"
+ __version__ = "0.15"
+
+ __pattern__ = r'http://(?:www\.)?uploadhero\.com?/dl/\w+'
+
+ __description__ = """UploadHero.co plugin"""
+ __author_name__ = ("mcmyst", "zoidberg")
+ __author_mail__ = ("mcmyst@hotmail.fr", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<div class="nom_de_fichier">(?P<N>.*?)</div>'
+ FILE_SIZE_PATTERN = r'Taille du fichier : </span><strong>(?P<S>.*?)</strong>'
+ OFFLINE_PATTERN = r'<p class="titre_dl_2">|<div class="raison"><strong>Le lien du fichier ci-dessus n\'existe plus.'
+
+ SH_COOKIES = [(".uploadhero.co", "lang", "en")]
+
+ IP_BLOCKED_PATTERN = r'href="(/lightbox_block_download.php\?min=.*?)"'
+ IP_WAIT_PATTERN = r'<span id="minutes">(\d+)</span>.*\s*<span id="seconds">(\d+)</span>'
+
+ CAPTCHA_PATTERN = r'"(/captchadl\.php\?[a-z0-9]+)"'
+ FREE_URL_PATTERN = r'var magicomfg = \'<a href="(http://[^<>"]*?)"|"(http://storage\d+\.uploadhero\.co/\?d=[A-Za-z0-9]+/[^<>"/]+)"'
+ PREMIUM_URL_PATTERN = r'<a href="([^"]+)" id="downloadnow"'
+
+
+ def handleFree(self):
+ self.checkErrors()
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.parseError("Captcha URL")
+ captcha_url = "http://uploadhero.co" + m.group(1)
+
+ for _ in xrange(5):
+ captcha = self.decryptCaptcha(captcha_url)
+ self.html = self.load(self.pyfile.url, get={"code": captcha})
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m:
+ self.correctCaptcha()
+ download_url = m.group(1) or m.group(2)
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.download(download_url)
+
+ def handlePremium(self):
+ self.logDebug("%s: Use Premium Account" % self.__name__)
+ self.html = self.load(self.pyfile.url)
+ link = re.search(self.PREMIUM_URL_PATTERN, self.html).group(1)
+ self.logDebug("Downloading link : '%s'" % link)
+ self.download(link)
+
+ def checkErrors(self):
+ m = re.search(self.IP_BLOCKED_PATTERN, self.html)
+ if m:
+ self.html = self.load("http://uploadhero.co%s" % m.group(1))
+
+ m = re.search(self.IP_WAIT_PATTERN, self.html)
+ wait_time = (int(m.group(1)) * 60 + int(m.group(2))) if m else 5 * 60
+ self.wait(wait_time, True)
+ self.retry()
+
+
+getInfo = create_getInfo(UploadheroCom)
diff --git a/pyload/plugins/hoster/UploadingCom.py b/pyload/plugins/hoster/UploadingCom.py
new file mode 100644
index 000000000..a7c328eec
--- /dev/null
+++ b/pyload/plugins/hoster/UploadingCom.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+
+
+class UploadingCom(SimpleHoster):
+ __name__ = "UploadingCom"
+ __type__ = "hoster"
+ __version__ = "0.36"
+
+ __pattern__ = r'http://(?:www\.)?uploading\.com/files/(?:get/)?(?P<ID>[\w\d]+)'
+
+ __description__ = """Uploading.com hoster plugin"""
+ __author_name__ = ("jeix", "mkaay", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.de", "mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'id="file_title">(?P<N>.+)</'
+ FILE_SIZE_PATTERN = r'size tip_container">(?P<S>[\d.]+) (?P<U>\w+)<'
+ OFFLINE_PATTERN = r'(Page|file) not found'
+
+
+ def process(self, pyfile):
+ # set lang to english
+ self.req.cj.setCookie(".uploading.com", "lang", "1")
+ self.req.cj.setCookie(".uploading.com", "language", "1")
+ self.req.cj.setCookie(".uploading.com", "setlang", "en")
+ self.req.cj.setCookie(".uploading.com", "_lang", "en")
+
+ if not "/get/" in pyfile.url:
+ pyfile.url = pyfile.url.replace("/files", "/files/get")
+
+ self.html = self.load(pyfile.url, decode=True)
+ self.file_info = self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handlePremium(self):
+ postData = {'action': 'get_link',
+ 'code': self.file_info['ID'],
+ 'pass': 'undefined'}
+
+ self.html = self.load('http://uploading.com/files/get/?JsHttpRequest=%d-xml' % timestamp(), post=postData)
+ url = re.search(r'"link"\s*:\s*"(.*?)"', self.html)
+ if url:
+ url = url.group(1).replace("\\/", "/")
+ self.download(url)
+
+ raise Exception("Plugin defect.")
+
+ def handleFree(self):
+ m = re.search('<h2>((Daily )?Download Limit)</h2>', self.html)
+ if m:
+ self.pyfile.error = m.group(1)
+ self.logWarning(self.pyfile.error)
+ self.retry(max_tries=6, wait_time=6 * 60 * 60 if m.group(2) else 15 * 60, reason=self.pyfile.error)
+
+ ajax_url = "http://uploading.com/files/get/?ajax"
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.req.http.lastURL = self.pyfile.url
+
+ response = json_loads(self.load(ajax_url, post={'action': 'second_page', 'code': self.file_info['ID']}))
+ if 'answer' in response and 'wait_time' in response['answer']:
+ wait_time = int(response['answer']['wait_time'])
+ self.logInfo("%s: Waiting %d seconds." % (self.__name__, wait_time))
+ self.wait(wait_time)
+ else:
+ self.parseError("AJAX/WAIT")
+
+ response = json_loads(
+ self.load(ajax_url, post={'action': 'get_link', 'code': self.file_info['ID'], 'pass': 'false'}))
+ if 'answer' in response and 'link' in response['answer']:
+ url = response['answer']['link']
+ else:
+ self.parseError("AJAX/URL")
+
+ self.html = self.load(url)
+ m = re.search(r'<form id="file_form" action="(.*?)"', self.html)
+ if m:
+ url = m.group(1)
+ else:
+ self.parseError("URL")
+
+ self.download(url)
+
+ check = self.checkDownload({"html": re.compile("\A<!DOCTYPE html PUBLIC")})
+ if check == "html":
+ self.logWarning("Redirected to a HTML page, wait 10 minutes and retry")
+ self.wait(10 * 60, True)
+
+
+getInfo = create_getInfo(UploadingCom)
diff --git a/pyload/plugins/hoster/UpstoreNet.py b/pyload/plugins/hoster/UpstoreNet.py
new file mode 100644
index 000000000..bd084612c
--- /dev/null
+++ b/pyload/plugins/hoster/UpstoreNet.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UpstoreNet(SimpleHoster):
+ __name__ = "UpstoreNet"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?upstore\.net/'
+
+ __description__ = """Upstore.Net File Download Hoster"""
+ __author_name__ = "igel"
+ __author_mail__ = "igelkun@myopera.com"
+
+ FILE_INFO_PATTERN = r'<div class="comment">.*?</div>\s*\n<h2 style="margin:0">(?P<N>.*?)</h2>\s*\n<div class="comment">\s*\n\s*(?P<S>[\d.]+) (?P<U>\w+)'
+ OFFLINE_PATTERN = r'<span class="error">File not found</span>'
+
+ WAIT_PATTERN = r'var sec = (\d+)'
+ CHASH_PATTERN = r'<input type="hidden" name="hash" value="([^"]*)">'
+ LINK_PATTERN = r'<a href="(https?://.*?)" target="_blank"><b>'
+
+
+ def handleFree(self):
+ # STAGE 1: get link to continue
+ m = re.search(self.CHASH_PATTERN, self.html)
+ if m is None:
+ self.parseError("could not detect hash")
+ chash = m.group(1)
+ self.logDebug("read hash " + chash)
+ # continue to stage2
+ post_data = {'hash': chash, 'free': 'Slow download'}
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+
+ # STAGE 2: solv captcha and wait
+ # first get the infos we need: recaptcha key and wait time
+ recaptcha = ReCaptcha(self)
+ if not recaptcha.detect_key(self.html):
+ self.parseError("could not find recaptcha pattern")
+ self.logDebug("using captcha key " + recaptcha.recaptcha_key)
+ # try the captcha 5 times
+ for i in xrange(5):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m is None:
+ self.parseError("could not find wait pattern")
+ wait_time = m.group(1)
+
+ # then, do the waiting
+ self.wait(wait_time)
+
+ # then, handle the captcha
+ challenge, code = recaptcha.challenge()
+ post_data['recaptcha_challenge_field'] = challenge
+ post_data['recaptcha_response_field'] = code
+
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+
+ # STAGE 3: get direct link
+ m = re.search(self.LINK_PATTERN, self.html, re.DOTALL)
+ if m:
+ break
+
+ if m is None:
+ self.parseError("could not detect direct link")
+
+ direct = m.group(1)
+ self.logDebug('found direct link: ' + direct)
+ self.download(direct, disposition=True)
+
+
+getInfo = create_getInfo(UpstoreNet)
diff --git a/pyload/plugins/hoster/UptoboxCom.py b/pyload/plugins/hoster/UptoboxCom.py
new file mode 100644
index 000000000..8fd5e6fa7
--- /dev/null
+++ b/pyload/plugins/hoster/UptoboxCom.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+from pyload.plugins.internal.CaptchaService import ReCaptcha, SolveMedia
+from pyload.utils import html_unescape
+
+
+class UptoboxCom(XFileSharingPro):
+ __name__ = "UptoboxCom"
+ __type__ = "hoster"
+ __version__ = "0.09"
+
+ __pattern__ = r'https?://(?:www\.)?uptobox\.com/\w+'
+
+ __description__ = """Uptobox.com hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ HOSTER_NAME = "uptobox.com"
+
+ FILE_INFO_PATTERN = r'"para_title">(?P<N>.+) \((?P<S>[\d\.]+) (?P<U>\w+)\)'
+ OFFLINE_PATTERN = r'>(File not found|Access Denied|404 Not Found)'
+ TEMP_OFFLINE_PATTERN = r'>This server is in maintenance mode'
+
+ WAIT_PATTERN = r'>(\d+)</span> seconds<'
+
+ LINK_PATTERN = r'"(https?://\w+\.uptobox\.com/d/.*?)"'
+
+
+ def handleCaptcha(self, inputs):
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = SolveMedia(self)
+ inputs['adcopy_challenge'], inputs['adcopy_response'] = captcha.challenge(captcha_key)
+ return 4
+ else:
+ m = re.search(self.CAPTCHA_URL_PATTERN, self.html)
+ if m:
+ captcha_url = m.group(1)
+ inputs['code'] = self.decryptCaptcha(captcha_url)
+ return 2
+ else:
+ m = re.search(self.CAPTCHA_DIV_PATTERN, self.html, re.DOTALL)
+ if m:
+ captcha_div = m.group(1)
+ self.logDebug(captcha_div)
+ numerals = re.findall(r'<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>',
+ html_unescape(captcha_div))
+ inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))])
+ self.logDebug("CAPTCHA", inputs['code'], numerals)
+ return 3
+ else:
+ m = re.search(self.RECAPTCHA_URL_PATTERN, self.html)
+ if m:
+ recaptcha_key = unquote(m.group(1))
+ self.logDebug("RECAPTCHA KEY: %s" % recaptcha_key)
+ recaptcha = ReCaptcha(self)
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(
+ recaptcha_key)
+ return 1
+ return 0
+
+
+getInfo = create_getInfo(UptoboxCom)
diff --git a/pyload/plugins/hoster/VeehdCom.py b/pyload/plugins/hoster/VeehdCom.py
new file mode 100644
index 000000000..4d76c3525
--- /dev/null
+++ b/pyload/plugins/hoster/VeehdCom.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class VeehdCom(Hoster):
+ __name__ = "VeehdCom"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://veehd\.com/video/\d+_\S+'
+ __config__ = [("filename_spaces", "bool", "Allow spaces in filename", False),
+ ("replacement_char", "str", "Filename replacement character", "_")]
+
+ __description__ = """Veehd.com hoster plugin"""
+ __author_name__ = "cat"
+ __author_mail__ = "cat@pyload"
+
+
+ def _debug(self, msg):
+ self.logDebug('[%s] %s' % (self.__name__, msg))
+
+ def setup(self):
+ self.multiDL = True
+ self.req.canContinue = True
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self._debug("Requesting page: %s" % (repr(url),))
+ self.html = self.load(url)
+
+ def file_exists(self):
+ if not self.html:
+ self.download_html()
+
+ if '<title>Veehd</title>' in self.html:
+ return False
+ return True
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ m = re.search(r'<title[^>]*>([^<]+) on Veehd</title>', self.html)
+ if m is None:
+ self.fail("video title not found")
+
+ name = m.group(1)
+
+ # replace unwanted characters in filename
+ if self.getConfig('filename_spaces'):
+ pattern = '[^0-9A-Za-z\.\ ]+'
+ else:
+ pattern = '[^0-9A-Za-z\.]+'
+
+ return re.sub(pattern, self.getConfig('replacement_char'), name) + '.avi'
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ m = re.search(r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/[^"]+)"',
+ self.html)
+ if m is None:
+ self.fail("embedded video url not found")
+
+ return m.group(1)
diff --git a/pyload/plugins/hoster/VeohCom.py b/pyload/plugins/hoster/VeohCom.py
new file mode 100644
index 000000000..31b21420a
--- /dev/null
+++ b/pyload/plugins/hoster/VeohCom.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class VeohCom(SimpleHoster):
+ __name__ = "VeohCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?veoh\.com/(tv/)?(watch|videos)/(?P<ID>v\w+)'
+ __config__ = [("quality", "Low;High;Auto", "Quality", "Auto")]
+
+ __description__ = """Veoh.com hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ FILE_NAME_PATTERN = r'<meta name="title" content="(?P<N>.*?)"'
+ OFFLINE_PATTERN = r'>Sorry, we couldn\'t find the video you were looking for'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.veoh.com/watch/\g<ID>')]
+
+ SH_COOKIES = [(".veoh.com", "lassieLocale", "en")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = -1
+
+ def handleFree(self):
+ quality = self.getConfig("quality")
+ if quality == "Auto":
+ quality = ("High", "Low")
+ for q in quality:
+ pattern = r'"fullPreviewHash%sPath":"(.+?)"' % q
+ m = re.search(pattern, self.html)
+ if m:
+ self.pyfile.name += ".mp4"
+ link = m.group(1).replace("\\", "")
+ self.logDebug("Download link: " + link)
+ self.download(link)
+ return
+ else:
+ self.logInfo("No %s quality video found" % q.upper())
+ else:
+ self.fail("No video found!")
+
+
+getInfo = create_getInfo(VeohCom)
diff --git a/pyload/plugins/hoster/VidPlayNet.py b/pyload/plugins/hoster/VidPlayNet.py
new file mode 100644
index 000000000..82afde07d
--- /dev/null
+++ b/pyload/plugins/hoster/VidPlayNet.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# BigBuckBunny_320x180.mp4 - 61.7 Mb - http://vidplay.net/38lkev0h3jv0
+
+from pyload.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class VidPlayNet(XFileSharingPro):
+ __name__ = "VidPlayNet"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?vidplay\.net/\w{12}'
+
+ __description__ = """VidPlay.net hoster plugin"""
+ __author_name__ = "t4skforce"
+ __author_mail__ = "t4skforce1337[AT]gmail[DOT]com"
+
+ HOSTER_NAME = "vidplay.net"
+
+ OFFLINE_PATTERN = r'<b>File Not Found</b><br>\s*<br>'
+ FILE_NAME_PATTERN = r'<b>Password:</b></div>\s*<h[1-6]>(?P<N>[^<]+)</h[1-6]>'
+ LINK_PATTERN = r'(http://([^/]*?%s|\d+\.\d+\.\d+\.\d+)(:\d+)?(/d/|(?:/files)?/\d+/\w+/)[^"\'<&]+)' % HOSTER_NAME
+
+
+getInfo = create_getInfo(VidPlayNet)
diff --git a/pyload/plugins/hoster/VimeoCom.py b/pyload/plugins/hoster/VimeoCom.py
new file mode 100644
index 000000000..aebf1c344
--- /dev/null
+++ b/pyload/plugins/hoster/VimeoCom.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class VimeoCom(SimpleHoster):
+ __name__ = "VimeoCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?(player\.)?vimeo\.com/(video/)?(?P<ID>\d+)'
+ __config__ = [("quality", "Lowest;Mobile;SD;HD;Highest", "Quality", "Highest"),
+ ("original", "bool", "Try to download the original file first", True)]
+
+ __description__ = """Vimeo.com hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+ FILE_NAME_PATTERN = r'<title>(?P<N>.+) on Vimeo<'
+ OFFLINE_PATTERN = r'class="exception_header"'
+ TEMP_OFFLINE_PATTERN = r'Please try again in a few minutes.<'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'https://www.vimeo.com/\g<ID>')]
+
+ SH_COOKIES = [(".vimeo.com", "language", "en")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = -1
+
+ def handleFree(self):
+ password = self.getPassword()
+
+ if self.js and 'class="btn iconify_down_b"' in self.html:
+ html = self.js.eval(self.load(self.pyfile.url, get={'action': "download", 'password': password}, decode=True))
+ pattern = r'href="(?P<URL>http://vimeo\.com.+?)".*?\>(?P<QL>.+?) '
+ else:
+ id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ html = self.load("https://player.vimeo.com/video/" + id, get={'password': password})
+ pattern = r'"(?P<QL>\w+)":{"profile".*?"(?P<URL>http://pdl\.vimeocdn\.com.+?)"'
+
+ link = dict([(l.group('QL').lower(), l.group('URL')) for l in re.finditer(pattern, html)])
+
+ if self.getConfig("original"):
+ if "original" in link:
+ self.download(link[q])
+ return
+ else:
+ self.logInfo("Original file not downloadable")
+
+ quality = self.getConfig("quality")
+ if quality == "Highest":
+ qlevel = ("hd", "sd", "mobile")
+ elif quality == "Lowest":
+ qlevel = ("mobile", "sd", "hd")
+ else:
+ qlevel = quality.lower()
+
+ for q in qlevel:
+ if q in link:
+ self.download(link[q])
+ return
+ else:
+ self.logInfo("No %s quality video found" % q.upper())
+ else:
+ self.fail("No video found!")
+
+
+getInfo = create_getInfo(VimeoCom)
diff --git a/pyload/plugins/hoster/Vipleech4uCom.py b/pyload/plugins/hoster/Vipleech4uCom.py
new file mode 100644
index 000000000..436b7d484
--- /dev/null
+++ b/pyload/plugins/hoster/Vipleech4uCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class Vipleech4uCom(DeadHoster):
+ __name__ = "Vipleech4uCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?vipleech4u\.com/manager\.php'
+
+ __description__ = """Vipleech4u.com hoster plugin"""
+ __author_name__ = "Kagenoshin"
+ __author_mail__ = "kagenoshin@gmx.ch"
+
+
+getInfo = create_getInfo(Vipleech4uCom)
diff --git a/pyload/plugins/hoster/WarserverCz.py b/pyload/plugins/hoster/WarserverCz.py
new file mode 100644
index 000000000..365f0f0fa
--- /dev/null
+++ b/pyload/plugins/hoster/WarserverCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class WarserverCz(DeadHoster):
+ __name__ = "WarserverCz"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'http://(?:www\.)?warserver\.cz/stahnout/\d+'
+
+ __description__ = """Warserver.cz hoster plugin"""
+ __author_name__ = "Walter Purcaro"
+ __author_mail__ = "vuolter@gmail.com"
+
+
+getInfo = create_getInfo(WarserverCz)
diff --git a/pyload/plugins/hoster/WebshareCz.py b/pyload/plugins/hoster/WebshareCz.py
new file mode 100644
index 000000000..6ca8d8882
--- /dev/null
+++ b/pyload/plugins/hoster/WebshareCz.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getRequest
+from pyload.plugins.internal.SimpleHoster import SimpleHoster
+
+
+def getInfo(urls):
+ h = getRequest()
+ for url in urls:
+ h.load(url)
+ fid = re.search(WebshareCz.__pattern__, url).group('ID')
+ api_data = h.load('https://webshare.cz/api/file_info/', post={'ident': fid})
+ if 'File not found' in api_data:
+ file_info = (url, 0, 1, url)
+ else:
+ name = re.search('<name>(.+)</name>', api_data).group(1)
+ size = re.search('<size>(.+)</size>', api_data).group(1)
+ file_info = (name, size, 2, url)
+ yield file_info
+
+
+class WebshareCz(SimpleHoster):
+ __name__ = "WebshareCz"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'https?://(?:www\.)?webshare.cz/(?:#/)?file/(?P<ID>\w+)'
+
+ __description__ = """WebShare.cz hoster plugin"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def handleFree(self):
+ api_data = self.load('https://webshare.cz/api/file_link/', post={'ident': self.fid})
+ self.logDebug("API data: " + api_data)
+ m = re.search('<link>(.+)</link>', api_data)
+ if m is None:
+ self.parseError('Unable to detect direct link')
+ direct = m.group(1)
+ self.logDebug("Direct link: " + direct)
+ self.download(direct, disposition=True)
+
+ def getFileInfo(self):
+ self.logDebug("URL: %s" % self.pyfile.url)
+
+ self.fid = re.match(self.__pattern__, self.pyfile.url).group('ID')
+
+ self.load(self.pyfile.url)
+ api_data = self.load('https://webshare.cz/api/file_info/', post={'ident': self.fid})
+
+ if 'File not found' in api_data:
+ self.offline()
+ else:
+ self.pyfile.name = re.search('<name>(.+)</name>', api_data).group(1)
+ self.pyfile.size = re.search('<size>(.+)</size>', api_data).group(1)
+
+ self.logDebug("FILE NAME: %s FILE SIZE: %s" % (self.pyfile.name, self.pyfile.size))
diff --git a/pyload/plugins/hoster/WrzucTo.py b/pyload/plugins/hoster/WrzucTo.py
new file mode 100644
index 000000000..b766ea785
--- /dev/null
+++ b/pyload/plugins/hoster/WrzucTo.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class WrzucTo(SimpleHoster):
+ __name__ = "WrzucTo"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?wrzuc\.to/([a-zA-Z0-9]+(\.wt|\.html)|(\w+/?linki/[a-zA-Z0-9]+))'
+
+ __description__ = """Wrzuc.to hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r'id="file_info">\s*<strong>(?P<N>.*?)</strong>'
+ FILE_SIZE_PATTERN = r'class="info">\s*<tr>\s*<td>(?P<S>.*?)</td>'
+
+ SH_COOKIES = [(".wrzuc.to", "language", "en")]
+
+
+ def setup(self):
+ self.multiDL = True
+
+ def handleFree(self):
+ data = dict(re.findall(r'(md5|file): "(.*?)"', self.html))
+ if len(data) != 2:
+ self.parseError('File ID')
+
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.req.http.lastURL = self.pyfile.url
+ self.load("http://www.wrzuc.to/ajax/server/prepair", post={"md5": data['md5']})
+
+ self.req.http.lastURL = self.pyfile.url
+ self.html = self.load("http://www.wrzuc.to/ajax/server/download_link", post={"file": data['file']})
+
+ data.update(re.findall(r'"(download_link|server_id)":"(.*?)"', self.html))
+ if len(data) != 4:
+ self.parseError('Download URL')
+
+ download_url = "http://%s.wrzuc.to/pobierz/%s" % (data['server_id'], data['download_link'])
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+
+getInfo = create_getInfo(WrzucTo)
diff --git a/pyload/plugins/hoster/WuploadCom.py b/pyload/plugins/hoster/WuploadCom.py
new file mode 100644
index 000000000..5bc933ae5
--- /dev/null
+++ b/pyload/plugins/hoster/WuploadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class WuploadCom(DeadHoster):
+ __name__ = "WuploadCom"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://(?:www\.)?wupload\..*?/file/(([a-z][0-9]+/)?[0-9]+)(/.*)?'
+
+ __description__ = """Wupload.com hoster plugin"""
+ __author_name__ = ("jeix", "Paul King")
+ __author_mail__ = ("jeix@hasnomail.de", "")
+
+
+getInfo = create_getInfo(WuploadCom)
diff --git a/pyload/plugins/hoster/X7To.py b/pyload/plugins/hoster/X7To.py
new file mode 100644
index 000000000..8df1d0ab3
--- /dev/null
+++ b/pyload/plugins/hoster/X7To.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class X7To(DeadHoster):
+ __name__ = "X7To"
+ __type__ = "hoster"
+ __version__ = "0.41"
+
+ __pattern__ = r'http://(?:www\.)?x7.to/'
+
+ __description__ = """X7.to hoster plugin"""
+ __author_name__ = "ernieb"
+ __author_mail__ = "ernieb"
+
+
+getInfo = create_getInfo(X7To)
diff --git a/pyload/plugins/hoster/XFileSharingPro.py b/pyload/plugins/hoster/XFileSharingPro.py
new file mode 100644
index 000000000..c7733600b
--- /dev/null
+++ b/pyload/plugins/hoster/XFileSharingPro.py
@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION, LOW_SPEED_TIME
+from random import random
+from urllib import unquote
+from urlparse import urlparse
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.CaptchaService import ReCaptcha, SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError, replace_patterns
+from pyload.utils import html_unescape
+
+
+class XFileSharingPro(SimpleHoster):
+ """
+ Common base for XFileSharingPro hosters like EasybytezCom, CramitIn, FiledinoCom...
+ Some hosters may work straight away when added to __pattern__
+ However, most of them will NOT work because they are either down or running a customized version
+ """
+ __name__ = "XFileSharingPro"
+ __type__ = "hoster"
+ __version__ = "0.32"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """XFileSharingPro base hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<tr><td align=right><b>Filename:</b></td><td nowrap>(?P<N>[^<]+)</td></tr>\s*.*?<small>\((?P<S>[^<]+)\)</small>'
+ FILE_NAME_PATTERN = r'<input type="hidden" name="fname" value="(?P<N>[^"]+)"'
+ FILE_SIZE_PATTERN = r'You have requested .*\((?P<S>[\d\.\,]+) ?(?P<U>\w+)?\)</font>'
+ OFFLINE_PATTERN = r'>\w+ (Not Found|file (was|has been) removed)'
+
+ WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>'
+
+ OVR_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
+
+ CAPTCHA_URL_PATTERN = r'(http://[^"\']+?/captchas?/[^"\']+)'
+ RECAPTCHA_URL_PATTERN = r'http://[^"\']+?recaptcha[^"\']+?\?k=([^"\']+)"'
+ CAPTCHA_DIV_PATTERN = r'>Enter code.*?<div.*?>(.*?)</div>'
+ SOLVEMEDIA_PATTERN = r'http:\/\/api\.solvemedia\.com\/papi\/challenge\.script\?k=(.*?)"'
+
+ ERROR_PATTERN = r'class=["\']err["\'][^>]*>(.*?)</'
+
+
+ def setup(self):
+ if self.__name__ == "XFileSharingPro":
+ self.__pattern__ = self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
+ self.multiDL = True
+ else:
+ self.resumeDownload = self.multiDL = self.premium
+
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ self.prepare()
+
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+
+ if not re.match(self.__pattern__, pyfile.url):
+ if self.premium:
+ self.handleOverriden()
+ else:
+ self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
+ else:
+ try:
+ # Due to a 0.4.9 core bug self.load would use cookies even if
+ # cookies=False. Workaround using getURL to avoid cookies.
+ # Can be reverted in 0.5 as the cookies bug has been fixed.
+ self.html = getURL(pyfile.url, decode=True)
+ self.file_info = self.getFileInfo()
+ except PluginParseError:
+ self.file_info = None
+
+ self.location = self.getDirectDownloadLink()
+
+ if not self.file_info:
+ pyfile.name = html_unescape(unquote(urlparse(
+ self.location if self.location else pyfile.url).path.split("/")[-1]))
+
+ if self.location:
+ self.startDownload(self.location)
+ elif self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def prepare(self):
+ """ Initialize important variables """
+ if not hasattr(self, "HOSTER_NAME"):
+ self.HOSTER_NAME = re.match(self.__pattern__, self.pyfile.url).group(1)
+ if not hasattr(self, "LINK_PATTERN"):
+ self.LINK_PATTERN = r'(http://([^/]*?%s|\d+\.\d+\.\d+\.\d+)(:\d+)?(/d/|(?:/files)?/\d+/\w+/)[^"\'<]+)' % self.HOSTER_NAME
+
+ self.captcha = self.errmsg = None
+ self.passwords = self.getPassword().splitlines()
+
+ def getDirectDownloadLink(self):
+ """ Get download link for premium users with direct download enabled """
+ self.req.http.lastURL = self.pyfile.url
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.html = self.load(self.pyfile.url, cookies=True, decode=True)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ location = None
+ m = re.search(r"Location\s*:\s*(.*)", self.header, re.I)
+ if m and re.match(self.LINK_PATTERN, m.group(1)):
+ location = m.group(1).strip()
+
+ return location
+
+ def handleFree(self):
+ url = self.getDownloadLink()
+ self.logDebug("Download URL: %s" % url)
+ self.startDownload(url)
+
+ def getDownloadLink(self):
+ for i in xrange(5):
+ self.logDebug("Getting download link: #%d" % i)
+ data = self.getPostParameters()
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ m = re.search(r"Location\s*:\s*(.*)", self.header, re.I)
+ if m:
+ break
+
+ m = re.search(self.LINK_PATTERN, self.html, re.S)
+ if m:
+ break
+
+ else:
+ if self.errmsg and 'captcha' in self.errmsg:
+ self.fail("No valid captcha code entered")
+ else:
+ self.fail("Download link not found")
+
+ return m.group(1)
+
+ def handlePremium(self):
+ self.html = self.load(self.pyfile.url, post=self.getPostParameters())
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError('DIRECT LINK')
+ self.startDownload(m.group(1))
+
+ def handleOverriden(self):
+ #only tested with easybytez.com
+ self.html = self.load("http://www.%s/" % self.HOSTER_NAME)
+ action, inputs = self.parseHtmlForm('')
+ upload_id = "%012d" % int(random() * 10 ** 12)
+ action += upload_id + "&js_on=1&utype=prem&upload_type=url"
+ inputs['tos'] = '1'
+ inputs['url_mass'] = self.pyfile.url
+ inputs['up1oad_type'] = 'url'
+
+ self.logDebug(self.HOSTER_NAME, action, inputs)
+ #wait for file to upload to easybytez.com
+ self.req.http.c.setopt(LOW_SPEED_TIME, 600)
+ self.html = self.load(action, post=inputs)
+
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ self.parseError('TEXTAREA')
+ self.logDebug(self.HOSTER_NAME, inputs)
+ if inputs['st'] == 'OK':
+ self.html = self.load(action, post=inputs)
+ elif inputs['st'] == 'Can not leech file':
+ self.retry(max_tries=20, wait_time=3 * 60, reason=inputs['st'])
+ else:
+ self.fail(inputs['st'])
+
+ #get easybytez.com link for uploaded file
+ m = re.search(self.OVR_LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError('DIRECT LINK (OVR)')
+ self.pyfile.url = m.group(1)
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header: # Direct link
+ self.startDownload(self.pyfile.url)
+ else:
+ self.retry()
+
+ def startDownload(self, link):
+ link = link.strip()
+ if self.captcha:
+ self.correctCaptcha()
+ self.logDebug('DIRECT LINK: %s' % link)
+ self.download(link, disposition=True)
+
+ def checkErrors(self):
+ m = re.search(self.ERROR_PATTERN, self.html)
+ if m:
+ self.errmsg = m.group(1)
+ self.logWarning(re.sub(r"<.*?>", " ", self.errmsg))
+
+ if 'wait' in self.errmsg:
+ wait_time = sum([int(v) * {"hour": 3600, "minute": 60, "second": 1}[u] for v, u in
+ re.findall(r'(\d+)\s*(hour|minute|second)', self.errmsg)])
+ self.wait(wait_time, True)
+ elif 'captcha' in self.errmsg:
+ self.invalidCaptcha()
+ elif 'premium' in self.errmsg and 'require' in self.errmsg:
+ self.fail("File can be downloaded by premium users only")
+ elif 'limit' in self.errmsg:
+ self.wait(1 * 60 * 60, True)
+ self.retry(25)
+ elif 'countdown' in self.errmsg or 'Expired' in self.errmsg:
+ self.retry()
+ elif 'maintenance' in self.errmsg:
+ self.tempOffline()
+ elif 'download files up to' in self.errmsg:
+ self.fail("File too large for free download")
+ else:
+ self.fail(self.errmsg)
+
+ else:
+ self.errmsg = None
+
+ return self.errmsg
+
+ def getPostParameters(self):
+ for _ in xrange(3):
+ if not self.errmsg:
+ self.checkErrors()
+
+ if hasattr(self, "FORM_PATTERN"):
+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
+ else:
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+
+ if not inputs:
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry()
+ else:
+ self.parseError("Form not found")
+
+ self.logDebug(self.HOSTER_NAME, inputs)
+
+ if 'op' in inputs and inputs['op'] in ("download2", "download3"):
+ if "password" in inputs:
+ if self.passwords:
+ inputs['password'] = self.passwords.pop(0)
+ else:
+ self.fail("No or invalid passport")
+
+ if not self.premium:
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1)) + 1
+ self.setWait(wait_time, False)
+ else:
+ wait_time = 0
+
+ self.captcha = self.handleCaptcha(inputs)
+
+ if wait_time:
+ self.wait()
+
+ self.errmsg = None
+ return inputs
+
+ else:
+ inputs['referer'] = self.pyfile.url
+
+ if self.premium:
+ inputs['method_premium'] = "Premium Download"
+ if 'method_free' in inputs:
+ del inputs['method_free']
+ else:
+ inputs['method_free'] = "Free Download"
+ if 'method_premium' in inputs:
+ del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post=inputs, ref=True)
+ self.errmsg = None
+
+ else:
+ self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))
+
+ def handleCaptcha(self, inputs):
+ m = re.search(self.RECAPTCHA_URL_PATTERN, self.html)
+ if m:
+ recaptcha_key = unquote(m.group(1))
+ self.logDebug("RECAPTCHA KEY: %s" % recaptcha_key)
+ recaptcha = ReCaptcha(self)
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(recaptcha_key)
+ return 1
+ else:
+ m = re.search(self.CAPTCHA_URL_PATTERN, self.html)
+ if m:
+ captcha_url = m.group(1)
+ inputs['code'] = self.decryptCaptcha(captcha_url)
+ return 2
+ else:
+ m = re.search(self.CAPTCHA_DIV_PATTERN, self.html, re.DOTALL)
+ if m:
+ captcha_div = m.group(1)
+ self.logDebug(captcha_div)
+ numerals = re.findall(r'<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
+ inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))])
+ self.logDebug("CAPTCHA", inputs['code'], numerals)
+ return 3
+ else:
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = SolveMedia(self)
+ inputs['adcopy_challenge'], inputs['adcopy_response'] = captcha.challenge(captcha_key)
+ return 4
+ return 0
+
+
+getInfo = create_getInfo(XFileSharingPro)
diff --git a/pyload/plugins/hoster/XHamsterCom.py b/pyload/plugins/hoster/XHamsterCom.py
new file mode 100644
index 000000000..0afb94b74
--- /dev/null
+++ b/pyload/plugins/hoster/XHamsterCom.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.Hoster import Hoster
+
+
+def clean_json(json_expr):
+ json_expr = re.sub('[\n\r]', '', json_expr)
+ json_expr = re.sub(' +', '', json_expr)
+ json_expr = re.sub('\'', '"', json_expr)
+
+ return json_expr
+
+
+class XHamsterCom(Hoster):
+ __name__ = "XHamsterCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?xhamster\.com/movies/.+'
+ __config__ = [("type", ".mp4;.flv", "Preferred type", ".mp4")]
+
+ __description__ = """XHamster.com hoster plugin"""
+ __author_name__ = None
+ __author_mail__ = None
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ if not self.file_exists():
+ self.offline()
+
+ if self.getConfig("type"):
+ self.desired_fmt = self.getConfig("type")
+
+ pyfile.name = self.get_file_name() + self.desired_fmt
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ flashvar_pattern = re.compile('flashvars = ({.*?});', re.DOTALL)
+ json_flashvar = flashvar_pattern.search(self.html)
+
+ if not json_flashvar:
+ self.fail("Parse error (flashvars)")
+
+ j = clean_json(json_flashvar.group(1))
+ flashvars = json_loads(j)
+
+ if flashvars['srv']:
+ srv_url = flashvars['srv'] + '/'
+ else:
+ self.fail("Parse error (srv_url)")
+
+ if flashvars['url_mode']:
+ url_mode = flashvars['url_mode']
+ else:
+ self.fail("Parse error (url_mode)")
+
+ if self.desired_fmt == ".mp4":
+ file_url = re.search(r"<a href=\"" + srv_url + "(.+?)\"", self.html)
+ if file_url is None:
+ self.fail("Parse error (file_url)")
+ file_url = file_url.group(1)
+ long_url = srv_url + file_url
+ self.logDebug("long_url: %s" % long_url)
+ else:
+ if flashvars['file']:
+ file_url = unquote(flashvars['file'])
+ else:
+ self.fail("Parse error (file_url)")
+
+ if url_mode == '3':
+ long_url = file_url
+ self.logDebug("long_url: %s" % long_url)
+ else:
+ long_url = srv_url + "key=" + file_url
+ self.logDebug("long_url: %s" % long_url)
+
+ return long_url
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ pattern = r"<title>(.*?) - xHamster\.com</title>"
+ name = re.search(pattern, self.html)
+ if name is None:
+ pattern = r"<h1 >(.*)</h1>"
+ name = re.search(pattern, self.html)
+ if name is None:
+ pattern = r"http://[www.]+xhamster\.com/movies/.*/(.*?)\.html?"
+ name = re.match(file_name_pattern, self.pyfile.url)
+ if name is None:
+ pattern = r"<div id=\"element_str_id\" style=\"display:none;\">(.*)</div>"
+ name = re.search(pattern, self.html)
+ if name is None:
+ return "Unknown"
+
+ return name.group(1)
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+ if re.search(r"(.*Video not found.*)", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/XVideosCom.py b/pyload/plugins/hoster/XVideosCom.py
new file mode 100644
index 000000000..75162955a
--- /dev/null
+++ b/pyload/plugins/hoster/XVideosCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.Hoster import Hoster
+
+
+class XVideosCom(Hoster):
+ __name__ = "XVideos.com"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?xvideos\.com/video([0-9]+)/.*'
+
+ __description__ = """XVideos.com hoster plugin"""
+ __author_name__ = None
+ __author_mail__ = None
+
+
+ def process(self, pyfile):
+ site = self.load(pyfile.url)
+ pyfile.name = "%s (%s).flv" % (
+ re.search(r"<h2>([^<]+)<span", site).group(1),
+ re.match(self.__pattern__, pyfile.url).group(1),
+ )
+ self.download(unquote(re.search(r"flv_url=([^&]+)&", site).group(1)))
diff --git a/pyload/plugins/hoster/Xdcc.py b/pyload/plugins/hoster/Xdcc.py
new file mode 100644
index 000000000..8b427cfdc
--- /dev/null
+++ b/pyload/plugins/hoster/Xdcc.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+import re
+import socket
+import struct
+import sys
+import time
+
+from os import makedirs
+from os.path import exists, join
+from select import select
+
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import safe_join
+
+
+class Xdcc(Hoster):
+ __name__ = "Xdcc"
+ __type__ = "hoster"
+ __version__ = "0.32"
+
+ __config__ = [("nick", "str", "Nickname", "pyload"),
+ ("ident", "str", "Ident", "pyloadident"),
+ ("realname", "str", "Realname", "pyloadreal")]
+
+ __description__ = """Download from IRC XDCC bot"""
+ __author_name__ = "jeix"
+ __author_mail__ = "jeix@hasnomail.com"
+
+
+ def setup(self):
+ self.debug = 0 # 0,1,2
+ self.timeout = 30
+ self.multiDL = False
+
+ def process(self, pyfile):
+ # change request type
+ self.req = pyfile.m.core.requestFactory.getRequest(self.__name__, type="XDCC")
+
+ self.pyfile = pyfile
+ for _ in xrange(0, 3):
+ try:
+ nmn = self.doDownload(pyfile.url)
+ self.logDebug("%s: Download of %s finished." % (self.__name__, nmn))
+ return
+ except socket.error, e:
+ if hasattr(e, "errno"):
+ errno = e.errno
+ else:
+ errno = e.args[0]
+
+ if errno == 10054:
+ self.logDebug("XDCC: Server blocked our ip, retry in 5 min")
+ self.setWait(300)
+ self.wait()
+ continue
+
+ self.fail("Failed due to socket errors. Code: %d" % errno)
+
+ self.fail("Server blocked our ip, retry again later manually")
+
+ def doDownload(self, url):
+ self.pyfile.setStatus("waiting") # real link
+
+ m = re.match(r'xdcc://(.*?)/#?(.*?)/(.*?)/#?(\d+)/?', url)
+ server = m.group(1)
+ chan = m.group(2)
+ bot = m.group(3)
+ pack = m.group(4)
+ nick = self.getConfig('nick')
+ ident = self.getConfig('ident')
+ real = self.getConfig('realname')
+
+ temp = server.split(':')
+ ln = len(temp)
+ if ln == 2:
+ host, port = temp
+ elif ln == 1:
+ host, port = temp[0], 6667
+ else:
+ self.fail("Invalid hostname for IRC Server (%s)" % server)
+
+ #######################
+ # CONNECT TO IRC AND IDLE FOR REAL LINK
+ dl_time = time.time()
+
+ sock = socket.socket()
+ sock.connect((host, int(port)))
+ if nick == "pyload":
+ nick = "pyload-%d" % (time.time() % 1000) # last 3 digits
+ sock.send("NICK %s\r\n" % nick)
+ sock.send("USER %s %s bla :%s\r\n" % (ident, host, real))
+ time.sleep(3)
+ sock.send("JOIN #%s\r\n" % chan)
+ sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
+
+ # IRC recv loop
+ readbuffer = ""
+ done = False
+ retry = None
+ m = None
+ while True:
+
+ # done is set if we got our real link
+ if done:
+ break
+
+ if retry:
+ if time.time() > retry:
+ retry = None
+ dl_time = time.time()
+ sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
+
+ else:
+ if (dl_time + self.timeout) < time.time(): # todo: add in config
+ sock.send("QUIT :byebye\r\n")
+ sock.close()
+ self.fail("XDCC Bot did not answer")
+
+ fdset = select([sock], [], [], 0)
+ if sock not in fdset[0]:
+ continue
+
+ readbuffer += sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ if self.debug is 2:
+ print "*> " + unicode(line, errors='ignore')
+ line = line.rstrip()
+ first = line.split()
+
+ if first[0] == "PING":
+ sock.send("PONG %s\r\n" % first[1])
+
+ if first[0] == "ERROR":
+ self.fail("IRC-Error: %s" % line)
+
+ msg = line.split(None, 3)
+ if len(msg) != 4:
+ continue
+
+ msg = {
+ "origin": msg[0][1:],
+ "action": msg[1],
+ "target": msg[2],
+ "text": msg[3][1:]
+ }
+
+ if nick == msg['target'][0:len(nick)] and "PRIVMSG" == msg['action']:
+ if msg['text'] == "\x01VERSION\x01":
+ self.logDebug("XDCC: Sending CTCP VERSION.")
+ sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
+ elif msg['text'] == "\x01TIME\x01":
+ self.logDebug("Sending CTCP TIME.")
+ sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
+ elif msg['text'] == "\x01LAG\x01":
+ pass # don't know how to answer
+
+ if not (bot == msg['origin'][0:len(bot)]
+ and nick == msg['target'][0:len(nick)]
+ and msg['action'] in ("PRIVMSG", "NOTICE")):
+ continue
+
+ if self.debug is 1:
+ print "%s: %s" % (msg['origin'], msg['text'])
+
+ if "You already requested that pack" in msg['text']:
+ retry = time.time() + 300
+
+ if "you must be on a known channel to request a pack" in msg['text']:
+ self.fail("Wrong channel")
+
+ m = re.match('\x01DCC SEND (.*?) (\d+) (\d+)(?: (\d+))?\x01', msg['text'])
+ if m:
+ done = True
+
+ # get connection data
+ ip = socket.inet_ntoa(struct.pack('L', socket.ntohl(int(m.group(2)))))
+ port = int(m.group(3))
+ packname = m.group(1)
+
+ if len(m.groups()) > 3:
+ self.req.filesize = int(m.group(4))
+
+ self.pyfile.name = packname
+
+ download_folder = self.config['general']['download_folder']
+ filename = safe_join(download_folder, packname)
+
+ self.logInfo("XDCC: Downloading %s from %s:%d" % (packname, ip, port))
+
+ self.pyfile.setStatus("downloading")
+ newname = self.req.download(ip, port, filename, sock, self.pyfile.setProgress)
+ if newname and newname != filename:
+ self.logInfo("%(name)s saved as %(newname)s" % {"name": self.pyfile.name, "newname": newname})
+ filename = newname
+
+ # kill IRC socket
+ # sock.send("QUIT :byebye\r\n")
+ sock.close()
+
+ self.lastDownload = filename
+ return self.lastDownload
diff --git a/pyload/plugins/hoster/YibaishiwuCom.py b/pyload/plugins/hoster/YibaishiwuCom.py
new file mode 100644
index 000000000..b6d06d234
--- /dev/null
+++ b/pyload/plugins/hoster/YibaishiwuCom.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.common.json_layer import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class YibaishiwuCom(SimpleHoster):
+ __name__ = "YibaishiwuCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?(?:u\.)?115.com/file/(?P<ID>\w+)'
+
+ __description__ = """115.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ FILE_NAME_PATTERN = r"file_name: '(?P<N>[^']+)'"
+ FILE_SIZE_PATTERN = r"file_size: '(?P<S>[^']+)'"
+ OFFLINE_PATTERN = ur'<h3><i style="color:red;">哎呀提取码䞍存圚䞍劚搜搜看吧</i></h3>'
+
+ LINK_PATTERN = r'(/\?ct=(pickcode|download)[^"\']+)'
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.parseError("AJAX URL")
+ url = m.group(1)
+ self.logDebug(('FREEUSER' if m.group(2) == 'download' else 'GUEST') + ' URL', url)
+
+ response = json_loads(self.load("http://115.com" + url, decode=False))
+ if "urls" in response:
+ mirrors = response['urls']
+ elif "data" in response:
+ mirrors = response['data']
+ else:
+ mirrors = None
+
+ for mr in mirrors:
+ try:
+ url = mr['url'].replace("\\", "")
+ self.logDebug("Trying URL: " + url)
+ self.download(url)
+ break
+ except:
+ continue
+ else:
+ self.fail('No working link found')
+
+
+getInfo = create_getInfo(YibaishiwuCom)
diff --git a/pyload/plugins/hoster/YoupornCom.py b/pyload/plugins/hoster/YoupornCom.py
new file mode 100644
index 000000000..de23780c3
--- /dev/null
+++ b/pyload/plugins/hoster/YoupornCom.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hoster import Hoster
+
+
+class YoupornCom(Hoster):
+ __name__ = "YoupornCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?youporn\.com/watch/.+'
+
+ __description__ = """Youporn.com hoster plugin"""
+ __author_name__ = "willnix"
+ __author_mail__ = "willnix@pyload.org"
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url, post={"user_choice": "Enter"}, cookies=False)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ return re.search(r'(http://download\.youporn\.com/download/\d+\?save=1)">', self.html).group(1)
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ file_name_pattern = r"<title>(.*) - Free Porn Videos - YouPorn</title>"
+ return re.search(file_name_pattern, self.html).group(1).replace("&amp;", "&").replace("/", "") + '.flv'
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+ if re.search(r"(.*invalid video_id.*)", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/YourfilesTo.py b/pyload/plugins/hoster/YourfilesTo.py
new file mode 100644
index 000000000..2de636b4b
--- /dev/null
+++ b/pyload/plugins/hoster/YourfilesTo.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.Hoster import Hoster
+
+
+class YourfilesTo(Hoster):
+ __name__ = "YourfilesTo"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'(http://)?(?:www\.)?yourfiles\.(to|biz)/\?d=[a-zA-Z0-9]+'
+
+ __description__ = """Youfiles.to hoster plugin"""
+ __author_name__ = ("jeix", "skydancer")
+ __author_mail__ = ("jeix@hasnomail.de", "skydancer@hasnomail.de")
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.prepare()
+ self.download(self.get_file_url())
+
+ def prepare(self):
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+
+ wait_time = self.get_waiting_time()
+ self.setWait(wait_time)
+ self.logDebug("%s: Waiting %d seconds." % (self.__name__, wait_time))
+ self.wait()
+
+ def get_waiting_time(self):
+ if not self.html:
+ self.download_html()
+
+ #var zzipitime = 15;
+ m = re.search(r'var zzipitime = (\d+);', self.html)
+ if m:
+ sec = int(m.group(1))
+ else:
+ sec = 0
+
+ return sec
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = re.search(r"var bla = '(.*?)';", self.html)
+ if url:
+ url = url.group(1)
+ url = unquote(url.replace("http://http:/http://", "http://").replace("dumdidum", ""))
+ return url
+ else:
+ self.fail("absolute filepath could not be found. offline? ")
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ return re.search("<title>(.*)</title>", self.html).group(1)
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r"HTTP Status 404", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/YoutubeCom.py b/pyload/plugins/hoster/YoutubeCom.py
new file mode 100644
index 000000000..6869d8b86
--- /dev/null
+++ b/pyload/plugins/hoster/YoutubeCom.py
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+import subprocess
+
+from urllib import unquote
+
+from pyload.plugins.Hoster import Hoster
+from pyload.plugins.internal.SimpleHoster import replace_patterns
+from pyload.utils import html_unescape
+
+
+def which(program):
+ """Works exactly like the unix command which
+
+ Courtesy of http://stackoverflow.com/a/377028/675646"""
+
+ def is_exe(fpath):
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ fpath, fname = os.path.split(program)
+ if fpath:
+ if is_exe(program):
+ return program
+ else:
+ for path in os.environ['PATH'].split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+
+ return None
+
+
+class YoutubeCom(Hoster):
+ __name__ = "YoutubeCom"
+ __type__ = "hoster"
+ __version__ = "0.40"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?(?:youtube\.com|youtu\.be)/watch.*?[?&]v=.*'
+ __config__ = [("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;3072p", "Quality Setting", "hd"),
+ ("fmt", "int", "FMT/ITAG Number (5-102, 0 for auto)", 0),
+ (".mp4", "bool", "Allow .mp4", True),
+ (".flv", "bool", "Allow .flv", True),
+ (".webm", "bool", "Allow .webm", False),
+ (".3gp", "bool", "Allow .3gp", False),
+ ("3d", "bool", "Prefer 3D", False)]
+
+ __description__ = """Youtube.com hoster plugin"""
+ __author_name__ = ("spoob", "zoidberg")
+ __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz")
+
+ FILE_URL_REPLACEMENTS = [(r'youtu\.be/', 'youtube.com/')]
+
+ # Invalid characters that must be removed from the file name
+ invalidChars = u'\u2605:?><"|\\'
+
+ # name, width, height, quality ranking, 3D
+ formats = {5: (".flv", 400, 240, 1, False),
+ 6: (".flv", 640, 400, 4, False),
+ 17: (".3gp", 176, 144, 0, False),
+ 18: (".mp4", 480, 360, 2, False),
+ 22: (".mp4", 1280, 720, 8, False),
+ 43: (".webm", 640, 360, 3, False),
+ 34: (".flv", 640, 360, 4, False),
+ 35: (".flv", 854, 480, 6, False),
+ 36: (".3gp", 400, 240, 1, False),
+ 37: (".mp4", 1920, 1080, 9, False),
+ 38: (".mp4", 4096, 3072, 10, False),
+ 44: (".webm", 854, 480, 5, False),
+ 45: (".webm", 1280, 720, 7, False),
+ 46: (".webm", 1920, 1080, 9, False),
+ 82: (".mp4", 640, 360, 3, True),
+ 83: (".mp4", 400, 240, 1, True),
+ 84: (".mp4", 1280, 720, 8, True),
+ 85: (".mp4", 1920, 1080, 9, True),
+ 100: (".webm", 640, 360, 3, True),
+ 101: (".webm", 640, 360, 4, True),
+ 102: (".webm", 1280, 720, 8, True)}
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def process(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+ html = self.load(pyfile.url, decode=True)
+
+ if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*">', html):
+ self.offline()
+
+ if "We have been receiving a large volume of requests from your network." in html:
+ self.tempOffline()
+
+ #get config
+ use3d = self.getConfig("3d")
+ if use3d:
+ quality = {"sd": 82, "hd": 84, "fullhd": 85, "240p": 83, "360p": 82,
+ "480p": 82, "720p": 84, "1080p": 85, "3072p": 85}
+ else:
+ quality = {"sd": 18, "hd": 22, "fullhd": 37, "240p": 5, "360p": 18,
+ "480p": 35, "720p": 22, "1080p": 37, "3072p": 38}
+ desired_fmt = self.getConfig("fmt")
+ if desired_fmt and desired_fmt not in self.formats:
+ self.logWarning("FMT %d unknown - using default." % desired_fmt)
+ desired_fmt = 0
+ if not desired_fmt:
+ desired_fmt = quality.get(self.getConfig("quality"), 18)
+
+ #parse available streams
+ streams = re.search(r'"url_encoded_fmt_stream_map": "(.*?)",', html).group(1)
+ streams = [x.split('\u0026') for x in streams.split(',')]
+ streams = [dict((y.split('=', 1)) for y in x) for x in streams]
+ streams = [(int(x['itag']), unquote(x['url'])) for x in streams]
+ #self.logDebug("Found links: %s" % streams)
+ self.logDebug("AVAILABLE STREAMS: %s" % [x[0] for x in streams])
+
+ #build dictionary of supported itags (3D/2D)
+ allowed = lambda x: self.getConfig(self.formats[x][0])
+ streams = [x for x in streams if x[0] in self.formats and allowed(x[0])]
+ if not streams:
+ self.fail("No available stream meets your preferences")
+ fmt_dict = dict([x for x in streams if self.formats[x[0]][4] == use3d] or streams)
+
+ self.logDebug("DESIRED STREAM: ITAG:%d (%s) %sfound, %sallowed" %
+ (desired_fmt, "%s %dx%d Q:%d 3D:%s" % self.formats[desired_fmt],
+ "" if desired_fmt in fmt_dict else "NOT ", "" if allowed(desired_fmt) else "NOT "))
+
+ #return fmt nearest to quality index
+ if desired_fmt in fmt_dict and allowed(desired_fmt):
+ fmt = desired_fmt
+ else:
+ sel = lambda x: self.formats[x][3] # select quality index
+ comp = lambda x, y: abs(sel(x) - sel(y))
+
+ self.logDebug("Choosing nearest fmt: %s" % [(x, allowed(x), comp(x, desired_fmt)) for x in fmt_dict.keys()])
+ fmt = reduce(lambda x, y: x if comp(x, desired_fmt) <= comp(y, desired_fmt) and
+ sel(x) > sel(y) else y, fmt_dict.keys())
+
+ self.logDebug("Chosen fmt: %s" % fmt)
+ url = fmt_dict[fmt]
+ self.logDebug("URL: %s" % url)
+
+ #set file name
+ file_suffix = self.formats[fmt][0] if fmt in self.formats else ".flv"
+ file_name_pattern = '<meta name="title" content="(.+?)">'
+ name = re.search(file_name_pattern, html).group(1).replace("/", "")
+
+ # Cleaning invalid characters from the file name
+ name = name.encode('ascii', 'replace')
+ for c in self.invalidChars:
+ name = name.replace(c, '_')
+
+ pyfile.name = html_unescape(name)
+
+ time = re.search(r"t=((\d+)m)?(\d+)s", pyfile.url)
+ ffmpeg = which("ffmpeg")
+ if ffmpeg and time:
+ m, s = time.groups()[1:]
+ if m is None:
+ m = "0"
+
+ pyfile.name += " (starting at %s:%s)" % (m, s)
+ pyfile.name += file_suffix
+
+ filename = self.download(url)
+
+ if ffmpeg and time:
+ inputfile = filename + "_"
+ os.rename(filename, inputfile)
+
+ subprocess.call([
+ ffmpeg,
+ "-ss", "00:%s:%s" % (m, s),
+ "-i", inputfile,
+ "-vcodec", "copy",
+ "-acodec", "copy",
+ filename])
+ os.remove(inputfile)
diff --git a/pyload/plugins/hoster/ZDF.py b/pyload/plugins/hoster/ZDF.py
new file mode 100644
index 000000000..d7bd5469a
--- /dev/null
+++ b/pyload/plugins/hoster/ZDF.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from xml.etree.ElementTree import fromstring
+
+from pyload.plugins.Hoster import Hoster
+
+
+# Based on zdfm by Roland Beermann (http://github.com/enkore/zdfm/)
+class ZDF(Hoster):
+ __name__ = "ZDF Mediathek"
+ __type__ = "hoster"
+ __version__ = "0.8"
+
+ __pattern__ = r'http://(?:www\.)?zdf\.de/ZDFmediathek/[^0-9]*([0-9]+)[^0-9]*'
+
+ __description__ = """ZDF.de hoster plugin"""
+ __author_name__ = None
+ __author_mail__ = None
+
+ XML_API = "http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?id=%i"
+
+
+ @staticmethod
+ def video_key(video):
+ return (
+ int(video.findtext("videoBitrate", "0")),
+ any(f.text == "progressive" for f in video.iter("facet")),
+ )
+
+ @staticmethod
+ def video_valid(video):
+ return video.findtext("url").startswith("http") and video.findtext("url").endswith(".mp4") and \
+ video.findtext("facets/facet").startswith("progressive")
+
+ @staticmethod
+ def get_id(url):
+ return int(re.search(r"[^0-9]*([0-9]{4,})[^0-9]*", url).group(1))
+
+ def process(self, pyfile):
+ xml = fromstring(self.load(self.XML_API % self.get_id(pyfile.url)))
+
+ status = xml.findtext("./status/statuscode")
+ if status != "ok":
+ self.fail("Error retrieving manifest.")
+
+ video = xml.find("video")
+ title = video.findtext("information/title")
+
+ pyfile.name = title
+
+ target_url = sorted((v for v in video.iter("formitaet") if self.video_valid(v)),
+ key=self.video_key)[-1].findtext("url")
+
+ self.download(target_url)
diff --git a/pyload/plugins/hoster/ZeveraCom.py b/pyload/plugins/hoster/ZeveraCom.py
new file mode 100644
index 000000000..f76290ea5
--- /dev/null
+++ b/pyload/plugins/hoster/ZeveraCom.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Hoster import Hoster
+
+
+class ZeveraCom(Hoster):
+ __name__ = "ZeveraCom"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?zevera.com/.*'
+
+ __description__ = """Zevera.com hoster plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "zevera.com")
+ self.fail("No zevera.com account provided")
+
+ self.logDebug("zevera.com: Old URL: %s" % pyfile.url)
+
+ if self.account.getAPIData(self.req, cmd="checklink", olink=pyfile.url) != "Alive":
+ self.fail("Offline or not downloadable - contact Zevera support")
+
+ header = self.account.getAPIData(self.req, just_header=True, cmd="generatedownloaddirect", olink=pyfile.url)
+ if not "location" in header:
+ self.fail("Unable to initialize download - contact Zevera support")
+
+ self.download(header['location'], disposition=True)
+
+ check = self.checkDownload({"error": 'action="ErrorDownload.aspx'})
+ if check == "error":
+ self.fail("Error response received - contact Zevera support")
+
+ # BitAPI not used - defunct, probably abandoned by Zevera
+ #
+ # api_url = "http://zevera.com/API.ashx"
+ #
+ # def process(self, pyfile):
+ # if not self.account:
+ # self.logError(_("Please enter your zevera.com account or deactivate this plugin"))
+ # self.fail("No zevera.com account provided")
+ #
+ # self.logDebug("zevera.com: Old URL: %s" % pyfile.url)
+ #
+ # last_size = retries = 0
+ # olink = pyfile.url #quote(pyfile.url.encode('utf_8'))
+ #
+ # for _ in xrange(100):
+ # self.retData = self.account.loadAPIRequest(self.req, cmd = 'download_request', olink = olink)
+ # self.checkAPIErrors(self.retData)
+ #
+ # if self.retData['FileInfo']['StatusID'] == 100:
+ # break
+ # elif self.retData['FileInfo']['StatusID'] == 99:
+ # self.fail('Failed to initialize download (99)')
+ # else:
+ # if self.retData['FileInfo']['Progress']['BytesReceived'] <= last_size:
+ # if retries >= 6:
+ # self.fail('Failed to initialize download (%d)' % self.retData['FileInfo']['StatusID'] )
+ # retries += 1
+ # else:
+ # retries = 0
+ #
+ # last_size = self.retData['FileInfo']['Progress']['BytesReceived']
+ #
+ # self.setWait(self.retData['Update_Wait'])
+ # self.wait()
+ #
+ # pyfile.name = self.retData['FileInfo']['RealFileName']
+ # pyfile.size = self.retData['FileInfo']['FileSizeInBytes']
+ #
+ # self.retData = self.account.loadAPIRequest(self.req, cmd = 'download_start',
+ # FileID = self.retData['FileInfo']['FileID'])
+ # self.checkAPIErrors(self.retData)
+ #
+ # self.download(self.api_url, get = {
+ # 'cmd': "open_stream",
+ # 'login': self.account.loginname,
+ # 'pass': self.account.password,
+ # 'FileID': self.retData['FileInfo']['FileID'],
+ # 'startBytes': 0
+ # }
+ # )
+ #
+ # def checkAPIErrors(self, retData):
+ # if not retData:
+ # self.fail('Unknown API response')
+ #
+ # if retData['ErrorCode']:
+ # self.logError(retData['ErrorCode'], retData['ErrorMessage'])
+ # #self.fail('ERROR: ' + retData['ErrorMessage'])
+ #
+ # if pyfile.size / 1024000 > retData['AccountInfo']['AvailableTODAYTrafficForUseInMBytes']:
+ # self.logWarning("Not enough data left to download the file")
+ #
+ # def crazyDecode(self, ustring):
+ # # accepts decoded ie. unicode string - API response is double-quoted, double-utf8-encoded
+ # # no idea what the proper order of calling these functions would be :-/
+ # return html_unescape(unquote(unquote(ustring.replace(
+ # '@DELIMITER@','#'))).encode('raw_unicode_escape').decode('utf-8'))
diff --git a/pyload/plugins/hoster/ZippyshareCom.py b/pyload/plugins/hoster/ZippyshareCom.py
new file mode 100644
index 000000000..d6b7375e2
--- /dev/null
+++ b/pyload/plugins/hoster/ZippyshareCom.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://www13.zippyshare.com/v/18665333/file.html
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class ZippyshareCom(SimpleHoster):
+ __name__ = "ZippyshareCom"
+ __type__ = "hoster"
+ __version__ = "0.49"
+
+ __pattern__ = r'(?P<HOST>http://www\d{0,2}\.zippyshare.com)/v(?:/|iew.jsp.*key=)(?P<KEY>\d+)'
+
+ __description__ = """Zippyshare.com hoster plugin"""
+ __author_name__ = ("spoob", "zoidberg", "stickell", "skylab")
+ __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it", "development@sky-lab.de")
+
+ FILE_NAME_PATTERN = r'<title>Zippyshare\.com - (?P<N>[^<]+)</title>'
+ FILE_SIZE_PATTERN = r'>Size:</font>\s*<font [^>]*>(?P<S>[0-9.,]+) (?P<U>[kKMG]+)i?B</font><br />'
+ FILE_INFO_PATTERN = r'document\.getElementById\(\'dlbutton\'\)\.href = "[^;]*/(?P<N>[^"]+)";'
+ OFFLINE_PATTERN = r'>File does not exist on this server</div>'
+
+ SH_COOKIES = [(".zippyshare.com", "ziplocale", "en")]
+
+
+ def setup(self):
+ self.multiDL = True
+
+ def handleFree(self):
+ url = self.get_file_url()
+ if not url:
+ self.fail("Download URL not found.")
+ self.logDebug("Download URL: %s" % url)
+ self.download(url)
+
+ def get_file_url(self):
+ """returns the absolute downloadable filepath"""
+ url_parts = re.search(r'(addthis:url="(http://www(\d+).zippyshare.com/v/(\d*)/file.html))', self.html)
+ number = url_parts.group(4)
+ check = re.search(r'<script type="text/javascript">([^<]*?)(var a = (\d*);)', self.html)
+ if check:
+ a = int(re.search(r'<script type="text/javascript">([^<]*?)(var a = (\d*);)', self.html).group(3))
+ k = int(re.search(r'<script type="text/javascript">([^<]*?)(\d*%(\d*))', self.html).group(3))
+ checksum = ((a + 3) % k) * ((a + 3) % 3) + 18
+ else:
+ # This might work but is insecure
+ # checksum = eval(re.search("((\d*)\s\%\s(\d*)\s\+\s(\d*)\s\%\s(\d*))", self.html).group(0))
+
+ m = re.search(r"((?P<a>\d*)\s%\s(?P<b>\d*)\s\+\s(?P<c>\d*)\s%\s(?P<k>\d*))", self.html)
+ if m is None:
+ self.parseError("Unable to detect values to calculate direct link")
+ a = int(m.group("a"))
+ b = int(m.group("b"))
+ c = int(m.group("c"))
+ k = int(m.group("k"))
+ if a == c:
+ checksum = ((a % b) + (a % k))
+ else:
+ checksum = ((a % b) + (c % k))
+
+ self.logInfo('Checksum: %s' % checksum)
+
+ filename = re.search(r'>Name:</font>\s*<font [^>]*>(?P<N>[^<]+)</font><br />', self.html).group('N')
+
+ url = "/d/%s/%s/%s" % (number, checksum, filename)
+ self.logInfo(self.file_info['HOST'] + url)
+ return self.file_info['HOST'] + url
+
+
+getInfo = create_getInfo(ZippyshareCom)
diff --git a/pyload/plugins/hoster/__init__.py b/pyload/plugins/hoster/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/hoster/__init__.py
diff --git a/pyload/plugins/internal/AbstractExtractor.py b/pyload/plugins/internal/AbstractExtractor.py
new file mode 100644
index 000000000..d1d1a09cb
--- /dev/null
+++ b/pyload/plugins/internal/AbstractExtractor.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+
+class ArchiveError(Exception):
+ pass
+
+
+class CRCError(Exception):
+ pass
+
+
+class WrongPassword(Exception):
+ pass
+
+
+class AbtractExtractor:
+ __name__ = "AbtractExtractor"
+ __version__ = "0.1"
+
+ __description__ = """Abtract extractor plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+
+ @staticmethod
+ def checkDeps():
+ """ Check if system statisfy dependencies
+ :return: boolean
+ """
+ return True
+
+ @staticmethod
+ def getTargets(files_ids):
+ """ Filter suited targets from list of filename id tuple list
+ :param files_ids: List of filepathes
+ :return: List of targets, id tuple list
+ """
+ raise NotImplementedError
+
+ def __init__(self, m, file, out, fullpath, overwrite, excludefiles, renice):
+ """Initialize extractor for specific file
+
+ :param m: ExtractArchive Hook plugin
+ :param file: Absolute filepath
+ :param out: Absolute path to destination directory
+ :param fullpath: extract to fullpath
+ :param overwrite: Overwrite existing archives
+ :param renice: Renice value
+ """
+ self.m = m
+ self.file = file
+ self.out = out
+ self.fullpath = fullpath
+ self.overwrite = overwrite
+ self.excludefiles = excludefiles
+ self.renice = renice
+ self.files = [] #: Store extracted files here
+
+ def init(self):
+ """ Initialize additional data structures """
+ pass
+
+ def checkArchive(self):
+ """Check if password if needed. Raise ArchiveError if integrity is
+ questionable.
+
+ :return: boolean
+ :raises ArchiveError
+ """
+ return False
+
+ def checkPassword(self, password):
+ """ Check if the given password is/might be correct.
+ If it can not be decided at this point return true.
+
+ :param password:
+ :return: boolean
+ """
+ return True
+
+ def extract(self, progress, password=None):
+ """Extract the archive. Raise specific errors in case of failure.
+
+ :param progress: Progress function, call this to update status
+ :param password password to use
+ :raises WrongPassword
+ :raises CRCError
+ :raises ArchiveError
+ :return:
+ """
+ raise NotImplementedError
+
+ def getDeleteFiles(self):
+ """Return list of files to delete, do *not* delete them here.
+
+ :return: List with paths of files to delete
+ """
+ raise NotImplementedError
+
+ def getExtractedFiles(self):
+ """Populate self.files at some point while extracting"""
+ return self.files
diff --git a/pyload/plugins/internal/CaptchaService.py b/pyload/plugins/internal/CaptchaService.py
new file mode 100644
index 000000000..b247ba654
--- /dev/null
+++ b/pyload/plugins/internal/CaptchaService.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+
+
+class CaptchaService:
+ __name__ = "CaptchaService"
+ __version__ = "0.05"
+
+ __description__ = """Captcha service plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+
+class ReCaptcha:
+ RECAPTCHA_KEY_PATTERN = r"https?://(?:www\.)?google\.com/recaptcha/api/challenge\?k=(?P<key>\w+)"
+ RECAPTCHA_KEY_AJAX_PATTERN = r"Recaptcha\.create\s*\(\s*[\"'](?P<key>\w+)[\"']\s*,"
+
+ recaptcha_key = None
+
+
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def detect_key(self, html):
+ m = re.search(self.RECAPTCHA_KEY_PATTERN, html)
+ if m is None:
+ m = re.search(self.RECAPTCHA_KEY_AJAX_PATTERN, html)
+ if m:
+ self.recaptcha_key = m.group('key')
+ return self.recaptcha_key
+ else:
+ return None
+
+ def challenge(self, key=None):
+ if key is None and self.recaptcha_key:
+ key = self.recaptcha_key
+ else:
+ raise TypeError("ReCaptcha key not found")
+
+ js = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={"k": key}, cookies=True)
+
+ try:
+ challenge = re.search("challenge : '(.*?)',", js).group(1)
+ server = re.search("server : '(.*?)',", js).group(1)
+ except:
+ self.plugin.fail("recaptcha error")
+ result = self.result(server, challenge)
+
+ return challenge, result
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%simage" % server, get={"c": challenge},
+ cookies=True, forceUser=True, imgtype="jpg")
+
+
+class AdsCaptcha(CaptchaService):
+
+ def challenge(self, src):
+ js = self.plugin.req.load(src, cookies=True)
+
+ try:
+ challenge = re.search("challenge: '(.*?)',", js).group(1)
+ server = re.search("server: '(.*?)',", js).group(1)
+ except:
+ self.plugin.fail("adscaptcha error")
+ result = self.result(server, challenge)
+
+ return challenge, result
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%sChallenge.aspx" % server, get={"cid": challenge, "dummy": random()},
+ cookies=True, imgtype="jpg")
+
+
+class SolveMedia(CaptchaService):
+
+ def challenge(self, src):
+ html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript?k=%s" % src, cookies=True)
+ try:
+ challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="([^"]+)">',
+ html).group(1)
+ except:
+ self.plugin.fail("solvemedia error")
+ result = self.result(challenge)
+
+ return challenge, result
+
+ def result(self, challenge):
+ return self.plugin.decryptCaptcha("http://api.solvemedia.com/papi/media?c=%s" % challenge, imgtype="gif")
diff --git a/pyload/plugins/internal/DeadCrypter.py b/pyload/plugins/internal/DeadCrypter.py
new file mode 100644
index 000000000..ea9c414cb
--- /dev/null
+++ b/pyload/plugins/internal/DeadCrypter.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Crypter import Crypter as _Crypter
+
+
+class DeadCrypter(_Crypter):
+ __name__ = "DeadCrypter"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = None
+
+ __description__ = """Crypter is no longer available"""
+ __author_name__ = "stickell"
+ __author_mail__ = "l.stickell@yahoo.it"
+
+
+ def setup(self):
+ self.fail("Crypter is no longer available")
diff --git a/pyload/plugins/internal/DeadHoster.py b/pyload/plugins/internal/DeadHoster.py
new file mode 100644
index 000000000..0b2398020
--- /dev/null
+++ b/pyload/plugins/internal/DeadHoster.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Hoster import Hoster as _Hoster
+
+
+def create_getInfo(plugin):
+
+ def getInfo(urls):
+ yield [('#N/A: ' + url, 0, 1, url) for url in urls]
+
+ return getInfo
+
+
+class DeadHoster(_Hoster):
+ __name__ = "DeadHoster"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = None
+
+ __description__ = """Hoster is no longer available"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+
+ def setup(self):
+ self.fail("Hoster is no longer available")
diff --git a/pyload/plugins/internal/MultiHoster.py b/pyload/plugins/internal/MultiHoster.py
new file mode 100644
index 000000000..d99ae6ff9
--- /dev/null
+++ b/pyload/plugins/internal/MultiHoster.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Hook import Hook
+from pyload.utils import remove_chars
+
+
+class MultiHoster(Hook):
+ __name__ = "MultiHoster"
+ __type__ = "hook"
+ __version__ = "0.20"
+
+ __description__ = """Generic MultiHoster plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+ replacements = [("2shared.com", "twoshared.com"), ("4shared.com", "fourshared.com"), ("cloudnator.com", "shragle.com"),
+ ("ifile.it", "filecloud.io"), ("easy-share.com", "crocko.com"), ("freakshare.net", "freakshare.com"),
+ ("hellshare.com", "hellshare.cz"), ("share-rapid.cz", "sharerapid.com"), ("sharerapid.cz", "sharerapid.com"),
+ ("ul.to", "uploaded.to"), ("uploaded.net", "uploaded.to"), ("1fichier.com", "onefichier.com")]
+ ignored = []
+ interval = 24 * 60 * 60 #: reload hosters daily
+
+
+ def setup(self):
+ self.hosters = []
+ self.supported = []
+ self.new_supported = []
+
+ def getConfig(self, option, default=''):
+ """getConfig with default value - subclass may not implements all config options"""
+ try:
+ # Fixed loop due to getConf deprecation in 0.4.10
+ return super(MultiHoster, self).getConfig(option)
+ except KeyError:
+ return default
+
+ def getHosterCached(self):
+ if not self.hosters:
+ try:
+ hosterSet = self.toHosterSet(self.getHoster()) - set(self.ignored)
+ except Exception, e:
+ self.logError("%s" % str(e))
+ return []
+
+ try:
+ configMode = self.getConfig('hosterListMode', 'all')
+ if configMode in ("listed", "unlisted"):
+ configSet = self.toHosterSet(self.getConfig('hosterList', '').replace('|', ',').replace(';', ',').split(','))
+
+ if configMode == "listed":
+ hosterSet &= configSet
+ else:
+ hosterSet -= configSet
+
+ except Exception, e:
+ self.logError("%s" % str(e))
+
+ self.hosters = list(hosterSet)
+
+ return self.hosters
+
+ def toHosterSet(self, hosters):
+ hosters = set((str(x).strip().lower() for x in hosters))
+
+ for rep in self.replacements:
+ if rep[0] in hosters:
+ hosters.remove(rep[0])
+ hosters.add(rep[1])
+
+ hosters.discard('')
+ return hosters
+
+ def getHoster(self):
+ """Load list of supported hoster
+
+ :return: List of domain names
+ """
+ raise NotImplementedError
+
+ def coreReady(self):
+ if self.cb:
+ self.core.scheduler.removeJob(self.cb)
+
+ self.setConfig("activated", True) #: config not in sync after plugin reload
+
+ cfg_interval = self.getConfig("interval", None) #: reload interval in hours
+ if cfg_interval is not None:
+ self.interval = cfg_interval * 60 * 60
+
+ if self.interval:
+ self._periodical()
+ else:
+ self.periodical()
+
+ def initPeriodical(self):
+ pass
+
+ def periodical(self):
+ """reload hoster list periodically"""
+ self.logInfo("Reloading supported hoster list")
+
+ old_supported = self.supported
+ self.supported, self.new_supported, self.hosters = [], [], []
+
+ self.overridePlugins()
+
+ old_supported = [hoster for hoster in old_supported if hoster not in self.supported]
+ if old_supported:
+ self.logDebug("UNLOAD: %s" % ", ".join(old_supported))
+ for hoster in old_supported:
+ self.unloadHoster(hoster)
+
+ def overridePlugins(self):
+ pluginMap = {}
+ for name in self.core.pluginManager.hosterPlugins.keys():
+ pluginMap[name.lower()] = name
+
+ accountList = [name.lower() for name, data in self.core.accountManager.accounts.items() if data]
+ excludedList = []
+
+ for hoster in self.getHosterCached():
+ name = remove_chars(hoster.lower(), "-.")
+
+ if name in accountList:
+ excludedList.append(hoster)
+ else:
+ if name in pluginMap:
+ self.supported.append(pluginMap[name])
+ else:
+ self.new_supported.append(hoster)
+
+ if not self.supported and not self.new_supported:
+ self.logError(_("No Hoster loaded"))
+ return
+
+ module = self.core.pluginManager.getPlugin(self.__name__)
+ klass = getattr(module, self.__name__)
+
+ # inject plugin plugin
+ self.logDebug("Overwritten Hosters: %s" % ", ".join(sorted(self.supported)))
+ for hoster in self.supported:
+ dict = self.core.pluginManager.hosterPlugins[hoster]
+ dict['new_module'] = module
+ dict['new_name'] = self.__name__
+
+ if excludedList:
+ self.logInfo("The following hosters were not overwritten - account exists: %s" % ", ".join(sorted(excludedList)))
+
+ if self.new_supported:
+ self.logDebug("New Hosters: %s" % ", ".join(sorted(self.new_supported)))
+
+ # create new regexp
+ regexp = r".*(%s).*" % "|".join([x.replace(".", "\\.") for x in self.new_supported])
+ if hasattr(klass, "__pattern__") and isinstance(klass.__pattern__, basestring) and '://' in klass.__pattern__:
+ regexp = r"%s|%s" % (klass.__pattern__, regexp)
+
+ self.logDebug("Regexp: %s" % regexp)
+
+ dict = self.core.pluginManager.hosterPlugins[self.__name__]
+ dict['pattern'] = regexp
+ dict['re'] = re.compile(regexp)
+
+ def unloadHoster(self, hoster):
+ dict = self.core.pluginManager.hosterPlugins[hoster]
+ if "module" in dict:
+ del dict['module']
+
+ if "new_module" in dict:
+ del dict['new_module']
+ del dict['new_name']
+
+ def unload(self):
+ """Remove override for all hosters. Scheduler job is removed by hookmanager"""
+ for hoster in self.supported:
+ self.unloadHoster(hoster)
+
+ # reset pattern
+ klass = getattr(self.core.pluginManager.getPlugin(self.__name__), self.__name__)
+ dict = self.core.pluginManager.hosterPlugins[self.__name__]
+ dict['pattern'] = getattr(klass, "__pattern__", r'^unmatchable$')
+ dict['re'] = re.compile(dict['pattern'])
+
+ def downloadFailed(self, pyfile):
+ """remove plugin override if download fails but not if file is offline/temp.offline"""
+ if pyfile.hasStatus("failed") and self.getConfig("unloadFailing", True):
+ hdict = self.core.pluginManager.hosterPlugins[pyfile.pluginname]
+ if "new_name" in hdict and hdict['new_name'] == self.__name__:
+ self.logDebug("Unload MultiHoster", pyfile.pluginname, hdict)
+ self.unloadHoster(pyfile.pluginname)
+ pyfile.setStatus("queued")
diff --git a/pyload/plugins/internal/SimpleCrypter.py b/pyload/plugins/internal/SimpleCrypter.py
new file mode 100644
index 000000000..6e639c946
--- /dev/null
+++ b/pyload/plugins/internal/SimpleCrypter.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Crypter import Crypter
+from pyload.plugins.internal.SimpleHoster import PluginParseError, replace_patterns, set_cookies
+from pyload.utils import html_unescape
+
+
+class SimpleCrypter(Crypter):
+ __name__ = "SimpleCrypter"
+ __type__ = "crypter"
+ __version__ = "0.10"
+
+ __pattern__ = None
+
+ __description__ = """Simple decrypter plugin"""
+ __author_name__ = ("stickell", "zoidberg", "Walter Purcaro")
+ __author_mail__ = ("l.stickell@yahoo.it", "zoidberg@mujmail.cz", "vuolter@gmail.com")
+
+ """
+ Following patterns should be defined by each crypter:
+
+ LINK_PATTERN: group(1) must be a download link or a regex to catch more links
+ example: LINK_PATTERN = r'<div class="link"><a href="(http://speedload.org/\w+)'
+
+ TITLE_PATTERN: (optional) The group defined by 'title' should be the title
+ example: TITLE_PATTERN = r'<title>Files of: (?P<title>[^<]+) folder</title>'
+
+ OFFLINE_PATTERN: (optional) Checks if the file is yet available online
+ example: OFFLINE_PATTERN = r'File (deleted|not found)'
+
+ TEMP_OFFLINE_PATTERN: (optional) Checks if the file is temporarily offline
+ example: TEMP_OFFLINE_PATTERN = r'Server maintainance'
+
+
+ If it's impossible to extract the links using the LINK_PATTERN only you can override the getLinks method.
+
+ If the links are disposed on multiple pages you need to define a pattern:
+
+ PAGES_PATTERN: The group defined by 'pages' must be the total number of pages
+ example: PAGES_PATTERN = r'Pages: (?P<pages>\d+)'
+
+ and a function:
+
+ loadPage(self, page_n):
+ return the html of the page number 'page_n'
+ """
+
+ URL_REPLACEMENTS = []
+
+ SH_COOKIES = True # or False or list of tuples [(domain, name, value)]
+
+
+ def setup(self):
+ if isinstance(self.SH_COOKIES, list):
+ set_cookies(self.req.cj, self.SH_COOKIES)
+
+ def decrypt(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
+
+ self.html = self.load(pyfile.url, decode=True)
+
+ self.checkOnline()
+
+ package_name, folder_name = self.getPackageNameAndFolder()
+
+ self.package_links = self.getLinks()
+
+ if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'):
+ self.handleMultiPages()
+
+ self.logDebug('Package has %d links' % len(self.package_links))
+
+ if self.package_links:
+ self.packages = [(package_name, self.package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
+
+ def getLinks(self):
+ """
+ Returns the links extracted from self.html
+ You should override this only if it's impossible to extract links using only the LINK_PATTERN.
+ """
+ return re.findall(self.LINK_PATTERN, self.html)
+
+ def checkOnline(self):
+ if hasattr(self, "OFFLINE_PATTERN") and re.search(self.OFFLINE_PATTERN, self.html):
+ self.offline()
+ elif hasattr(self, "TEMP_OFFLINE_PATTERN") and re.search(self.TEMP_OFFLINE_PATTERN, self.html):
+ self.tempOffline()
+
+ def getPackageNameAndFolder(self):
+ if hasattr(self, 'TITLE_PATTERN'):
+ m = re.search(self.TITLE_PATTERN, self.html)
+ if m:
+ name = folder = html_unescape(m.group('title').strip())
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+ return name, folder
+
+ name = self.pyfile.package().name
+ folder = self.pyfile.package().folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+ return name, folder
+
+ def handleMultiPages(self):
+ pages = re.search(self.PAGES_PATTERN, self.html)
+ if pages:
+ pages = int(pages.group('pages'))
+ else:
+ pages = 1
+
+ for p in xrange(2, pages + 1):
+ self.html = self.loadPage(p)
+ self.package_links += self.getLinks()
+
+ def parseError(self, msg):
+ raise PluginParseError(msg)
diff --git a/pyload/plugins/internal/SimpleHoster.py b/pyload/plugins/internal/SimpleHoster.py
new file mode 100644
index 000000000..ca320732f
--- /dev/null
+++ b/pyload/plugins/internal/SimpleHoster.py
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+from urlparse import urlparse
+
+from pyload.network.CookieJar import CookieJar
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import fixup, html_unescape, parseFileSize
+
+
+def replace_patterns(string, ruleslist):
+ for r in ruleslist:
+ rf, rt = r
+ string = re.sub(rf, rt, string)
+ #self.logDebug(rf, rt, string)
+ return string
+
+
+def set_cookies(cj, cookies):
+ for cookie in cookies:
+ if isinstance(cookie, tuple) and len(cookie) == 3:
+ domain, name, value = cookie
+ cj.setCookie(domain, name, value)
+
+
+def parseHtmlTagAttrValue(attr_name, tag):
+ m = re.search(r"%s\s*=\s*([\"']?)((?<=\")[^\"]+|(?<=')[^']+|[^>\s\"'][^>\s]*)\1" % attr_name, tag, re.I)
+ return m.group(2) if m else None
+
+
+def parseHtmlForm(attr_str, html, input_names=None):
+ for form in re.finditer(r"(?P<tag><form[^>]*%s[^>]*>)(?P<content>.*?)</?(form|body|html)[^>]*>" % attr_str,
+ html, re.S | re.I):
+ inputs = {}
+ action = parseHtmlTagAttrValue("action", form.group('tag'))
+ for inputtag in re.finditer(r'(<(input|textarea)[^>]*>)([^<]*(?=</\2)|)', form.group('content'), re.S | re.I):
+ name = parseHtmlTagAttrValue("name", inputtag.group(1))
+ if name:
+ value = parseHtmlTagAttrValue("value", inputtag.group(1))
+ if not value:
+ inputs[name] = inputtag.group(3) or ''
+ else:
+ inputs[name] = value
+
+ if isinstance(input_names, dict):
+ # check input attributes
+ for key, val in input_names.items():
+ if key in inputs:
+ if isinstance(val, basestring) and inputs[key] == val:
+ continue
+ elif isinstance(val, tuple) and inputs[key] in val:
+ continue
+ elif hasattr(val, "search") and re.match(val, inputs[key]):
+ continue
+ break # attibute value does not match
+ else:
+ break # attibute name does not match
+ else:
+ return action, inputs # passed attribute check
+ else:
+ # no attribute check
+ return action, inputs
+
+ return {}, None # no matching form found
+
+
+def parseFileInfo(self, url='', html=''):
+ info = {"name": url, "size": 0, "status": 3}
+
+ if hasattr(self, "pyfile"):
+ url = self.pyfile.url
+
+ if hasattr(self, "req") and self.req.http.code == '404':
+ info['status'] = 1
+ else:
+ if not html and hasattr(self, "html"):
+ html = self.html
+ if isinstance(self.SH_BROKEN_ENCODING, (str, unicode)):
+ html = unicode(html, self.SH_BROKEN_ENCODING)
+ if hasattr(self, "html"):
+ self.html = html
+
+ if hasattr(self, "OFFLINE_PATTERN") and re.search(self.OFFLINE_PATTERN, html):
+ info['status'] = 1
+ elif hasattr(self, "FILE_OFFLINE_PATTERN") and re.search(self.FILE_OFFLINE_PATTERN, html): #@TODO: Remove in 0.4.10
+ info['status'] = 1
+ elif hasattr(self, "TEMP_OFFLINE_PATTERN") and re.search(self.TEMP_OFFLINE_PATTERN, html):
+ info['status'] = 6
+ else:
+ online = False
+ try:
+ info.update(re.match(self.__pattern__, url).groupdict())
+ except:
+ pass
+
+ for pattern in ("FILE_INFO_PATTERN", "FILE_NAME_PATTERN", "FILE_SIZE_PATTERN"):
+ try:
+ info.update(re.search(getattr(self, pattern), html).groupdict())
+ online = True
+ except AttributeError:
+ continue
+
+ if online:
+ # File online, return name and size
+ info['status'] = 2
+ if 'N' in info:
+ info['name'] = replace_patterns(info['N'], self.FILE_NAME_REPLACEMENTS)
+ if 'S' in info:
+ size = replace_patterns(info['S'] + info['U'] if 'U' in info else info['S'],
+ self.FILE_SIZE_REPLACEMENTS)
+ info['size'] = parseFileSize(size)
+ elif isinstance(info['size'], (str, unicode)):
+ if 'units' in info:
+ info['size'] += info['units']
+ info['size'] = parseFileSize(info['size'])
+
+ if hasattr(self, "file_info"):
+ self.file_info = info
+
+ return info['name'], info['size'], info['status'], url
+
+
+def create_getInfo(plugin):
+
+ def getInfo(urls):
+ for url in urls:
+ cj = CookieJar(plugin.__name__)
+ if isinstance(plugin.SH_COOKIES, list):
+ set_cookies(cj, plugin.SH_COOKIES)
+ file_info = parseFileInfo(plugin, url, getURL(replace_patterns(url, plugin.FILE_URL_REPLACEMENTS),
+ decode=not plugin.SH_BROKEN_ENCODING, cookies=cj))
+ yield file_info
+
+ return getInfo
+
+
+def timestamp():
+ return int(time() * 1000)
+
+
+class PluginParseError(Exception):
+
+ def __init__(self, msg):
+ Exception.__init__(self)
+ self.value = 'Parse error (%s) - plugin may be out of date' % msg
+
+ def __str__(self):
+ return repr(self.value)
+
+
+class SimpleHoster(Hoster):
+ __name__ = "SimpleHoster"
+ __type__ = "hoster"
+ __version__ = "0.35"
+
+ __pattern__ = None
+
+ __description__ = """Simple hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ """
+ Following patterns should be defined by each hoster:
+
+ FILE_INFO_PATTERN: Name and Size of the file
+ example: FILE_INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>size_unit)'
+ or
+ FILE_NAME_PATTERN: Name that will be set for the file
+ example: FILE_NAME_PATTERN = r'(?P<N>file_name)'
+ FILE_SIZE_PATTERN: Size that will be checked for the file
+ example: FILE_SIZE_PATTERN = r'(?P<S>file_size) (?P<U>size_unit)'
+
+ OFFLINE_PATTERN: Checks if the file is yet available online
+ example: OFFLINE_PATTERN = r'File (deleted|not found)'
+
+ TEMP_OFFLINE_PATTERN: Checks if the file is temporarily offline
+ example: TEMP_OFFLINE_PATTERN = r'Server maintainance'
+
+ PREMIUM_ONLY_PATTERN: (optional) Checks if the file can be downloaded only with a premium account
+ example: PREMIUM_ONLY_PATTERN = r'Premium account required'
+ """
+
+ FILE_NAME_REPLACEMENTS = [("&#?\w+;", fixup)]
+ FILE_SIZE_REPLACEMENTS = []
+ FILE_URL_REPLACEMENTS = []
+
+ SH_BROKEN_ENCODING = False # Set to True or encoding name if encoding in http header is not correct
+ SH_COOKIES = True # or False or list of tuples [(domain, name, value)]
+ SH_CHECK_TRAFFIC = False # True = force check traffic left for a premium account
+
+
+ def init(self):
+ self.file_info = {}
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+ if isinstance(self.SH_COOKIES, list):
+ set_cookies(self.req.cj, self.SH_COOKIES)
+
+ def process(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+ self.req.setOption("timeout", 120)
+ # Due to a 0.4.9 core bug self.load would keep previous cookies even if overridden by cookies parameter.
+ # Workaround using getURL. Can be reverted in 0.5 as the cookies bug has been fixed.
+ self.html = getURL(pyfile.url, decode=not self.SH_BROKEN_ENCODING, cookies=self.SH_COOKIES)
+ premium_only = hasattr(self, 'PREMIUM_ONLY_PATTERN') and re.search(self.PREMIUM_ONLY_PATTERN, self.html)
+ if not premium_only: # Usually premium only pages doesn't show the file information
+ self.getFileInfo()
+
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ elif premium_only:
+ self.fail("This link require a premium account")
+ else:
+ # This line is required due to the getURL workaround. Can be removed in 0.5
+ self.html = self.load(pyfile.url, decode=not self.SH_BROKEN_ENCODING, cookies=self.SH_COOKIES)
+ self.handleFree()
+
+ def load(self, url, get={}, post={}, ref=True, cookies=True, just_header=False, decode=False):
+ if type(url) == unicode:
+ url = url.encode('utf8')
+ return Hoster.load(self, url=url, get=get, post=post, ref=ref, cookies=cookies,
+ just_header=just_header, decode=decode)
+
+ def getFileInfo(self):
+ self.logDebug("URL: %s" % self.pyfile.url)
+
+ name, size, status = parseFileInfo(self)[:3]
+
+ if status == 1:
+ self.offline()
+ elif status == 6:
+ self.tempOffline()
+ elif status != 2:
+ self.logDebug(self.file_info)
+ self.parseError('File info')
+
+ if name:
+ self.pyfile.name = name
+ else:
+ self.pyfile.name = html_unescape(urlparse(self.pyfile.url).path.split("/")[-1])
+
+ if size:
+ self.pyfile.size = size
+ else:
+ self.logError("File size not parsed")
+
+ self.logDebug("FILE NAME: %s FILE SIZE: %s" % (self.pyfile.name, self.pyfile.size))
+ return self.file_info
+
+ def handleFree(self):
+ self.fail("Free download not implemented")
+
+ def handlePremium(self):
+ self.fail("Premium download not implemented")
+
+ def parseError(self, msg):
+ raise PluginParseError(msg)
+
+ def longWait(self, wait_time=None, max_tries=3):
+ if wait_time and isinstance(wait_time, (int, long, float)):
+ time_str = "%dh %dm" % divmod(wait_time / 60, 60)
+ else:
+ wait_time = 900
+ time_str = "(unknown time)"
+ max_tries = 100
+
+ self.logInfo("Download limit reached, reconnect or wait %s" % time_str)
+
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry(max_tries=max_tries, reason="Download limit reached")
+
+ def parseHtmlForm(self, attr_str='', input_names=None):
+ return parseHtmlForm(attr_str, self.html, input_names)
+
+ def checkTrafficLeft(self):
+ traffic = self.account.getAccountInfo(self.user, True)['trafficleft']
+ if traffic == -1:
+ return True
+ size = self.pyfile.size / 1024
+ self.logInfo("Filesize: %i KiB, Traffic left for user %s: %i KiB" % (size, self.user, traffic))
+ return size <= traffic
+
+ # TODO: Remove in 0.5
+ def wait(self, seconds=False, reconnect=False):
+ if seconds:
+ self.setWait(seconds, reconnect)
+ super(SimpleHoster, self).wait()
diff --git a/pyload/plugins/internal/UnRar.py b/pyload/plugins/internal/UnRar.py
new file mode 100644
index 000000000..ed8478a3a
--- /dev/null
+++ b/pyload/plugins/internal/UnRar.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+
+from glob import glob
+from os.path import join
+from string import digits
+from subprocess import Popen, PIPE
+
+from pyload.plugins.internal.AbstractExtractor import AbtractExtractor, WrongPassword, ArchiveError, CRCError
+from pyload.utils import safe_join, decode
+
+
+class UnRar(AbtractExtractor):
+ __name__ = "UnRar"
+ __version__ = "0.16"
+
+ __description__ = """Rar extractor plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+ CMD = "unrar"
+
+ # there are some more uncovered rar formats
+ re_version = re.compile(r"(UNRAR 5[\.\d]+(.*?)freeware)")
+ re_splitfile = re.compile(r"(.*)\.part(\d+)\.rar$", re.I)
+ re_partfiles = re.compile(r".*\.(rar|r[0-9]+)", re.I)
+ re_filelist = re.compile(r"(.+)\s+(\d+)\s+(\d+)\s+")
+ re_filelist5 = re.compile(r"(.+)\s+(\d+)\s+\d\d-\d\d-\d\d\s+\d\d:\d\d\s+(.+)")
+ re_wrongpwd = re.compile("(Corrupt file or wrong password|password incorrect)", re.I)
+
+
+ @staticmethod
+ def checkDeps():
+ if os.name == "nt":
+ UnRar.CMD = join(pypath, "UnRAR.exe")
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+ else:
+ try:
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+ except OSError:
+
+ # fallback to rar
+ UnRar.CMD = "rar"
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+
+ return True
+
+ @staticmethod
+ def getTargets(files_ids):
+ result = []
+
+ for file, id in files_ids:
+ if not file.endswith(".rar"):
+ continue
+
+ match = UnRar.re_splitfile.findall(file)
+ if match:
+ # only add first parts
+ if int(match[0][1]) == 1:
+ result.append((file, id))
+ else:
+ result.append((file, id))
+
+ return result
+
+ def init(self):
+ self.passwordProtected = False
+ self.headerProtected = False #: list files will not work without password
+ self.smallestFile = None #: small file to test passwords
+ self.password = "" #: save the correct password
+
+ def checkArchive(self):
+ p = self.call_unrar("l", "-v", self.file)
+ out, err = p.communicate()
+ if self.re_wrongpwd.search(err):
+ self.passwordProtected = True
+ self.headerProtected = True
+ return True
+
+ # output only used to check if passworded files are present
+ if self.re_version.search(out):
+ for attr, size, name in self.re_filelist5.findall(out):
+ if attr.startswith("*"):
+ self.passwordProtected = True
+ return True
+ else:
+ for name, size, packed in self.re_filelist.findall(out):
+ if name.startswith("*"):
+ self.passwordProtected = True
+ return True
+
+ self.listContent()
+ if not self.files:
+ raise ArchiveError("Empty Archive")
+
+ return False
+
+ def checkPassword(self, password):
+ # at this point we can only verify header protected files
+ if self.headerProtected:
+ p = self.call_unrar("l", "-v", self.file, password=password)
+ out, err = p.communicate()
+ if self.re_wrongpwd.search(err):
+ return False
+
+ return True
+
+ def extract(self, progress, password=None):
+ command = "x" if self.fullpath else "e"
+
+ p = self.call_unrar(command, self.file, self.out, password=password)
+ renice(p.pid, self.renice)
+
+ progress(0)
+ progressstring = ""
+ while True:
+ c = p.stdout.read(1)
+ # quit loop on eof
+ if not c:
+ break
+ # reading a percentage sign -> set progress and restart
+ if c == '%':
+ progress(int(progressstring))
+ progressstring = ""
+ # not reading a digit -> therefore restart
+ elif c not in digits:
+ progressstring = ""
+ # add digit to progressstring
+ else:
+ progressstring = progressstring + c
+ progress(100)
+
+ # retrieve stderr
+ err = p.stderr.read()
+
+ if "CRC failed" in err and not password and not self.passwordProtected:
+ raise CRCError
+ elif "CRC failed" in err:
+ raise WrongPassword
+ if err.strip(): #: raise error if anything is on stderr
+ raise ArchiveError(err.strip())
+ if p.returncode:
+ raise ArchiveError("Process terminated")
+
+ if not self.files:
+ self.password = password
+ self.listContent()
+
+ def getDeleteFiles(self):
+ if ".part" in self.file:
+ return glob(re.sub("(?<=\.part)([01]+)", "*", self.file, re.IGNORECASE))
+ # get files which matches .r* and filter unsuited files out
+ parts = glob(re.sub(r"(?<=\.r)ar$", "*", self.file, re.IGNORECASE))
+ return filter(lambda x: self.re_partfiles.match(x), parts)
+
+ def listContent(self):
+ command = "vb" if self.fullpath else "lb"
+ p = self.call_unrar(command, "-v", self.file, password=self.password)
+ out, err = p.communicate()
+
+ if "Cannot open" in err:
+ raise ArchiveError("Cannot open file")
+
+ if err.strip(): #: only log error at this point
+ self.m.logError(err.strip())
+
+ result = set()
+
+ for f in decode(out).splitlines():
+ f = f.strip()
+ result.add(safe_join(self.out, f))
+
+ self.files = result
+
+ def call_unrar(self, command, *xargs, **kwargs):
+ args = []
+ # overwrite flag
+ args.append("-o+") if self.overwrite else args.append("-o-")
+
+ if self.excludefiles:
+ for word in self.excludefiles.split(';'):
+ args.append("-x%s" % word)
+
+ # assume yes on all queries
+ args.append("-y")
+
+ # set a password
+ if "password" in kwargs and kwargs['password']:
+ args.append("-p%s" % kwargs['password'])
+ else:
+ args.append("-p-")
+
+ # NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue
+ call = [self.CMD, command] + args + list(xargs)
+ self.m.logDebug(" ".join(call))
+
+ p = Popen(call, stdout=PIPE, stderr=PIPE)
+
+ return p
+
+
+def renice(pid, value):
+ if os.name != "nt" and value:
+ try:
+ Popen(["renice", str(value), str(pid)], stdout=PIPE, stderr=PIPE, bufsize=-1)
+ except:
+ print "Renice failed"
diff --git a/pyload/plugins/internal/UnZip.py b/pyload/plugins/internal/UnZip.py
new file mode 100644
index 000000000..65a5a82bb
--- /dev/null
+++ b/pyload/plugins/internal/UnZip.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import sys
+import zipfile
+
+from pyload.plugins.internal.AbstractExtractor import AbtractExtractor
+
+
+class UnZip(AbtractExtractor):
+ __name__ = "UnZip"
+ __version__ = "0.1"
+
+ __description__ = """Zip extractor plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ @staticmethod
+ def checkDeps():
+ return sys.version_info[:2] >= (2, 6)
+
+ @staticmethod
+ def getTargets(files_ids):
+ result = []
+
+ for file, id in files_ids:
+ if file.endswith(".zip"):
+ result.append((file, id))
+
+ return result
+
+ def extract(self, progress, password=None):
+ z = zipfile.ZipFile(self.file)
+ self.files = z.namelist()
+ z.extractall(self.out)
+
+ def getDeleteFiles(self):
+ return [self.file]
diff --git a/pyload/plugins/internal/XFSPAccount.py b/pyload/plugins/internal/XFSPAccount.py
new file mode 100644
index 000000000..aec9b7dbc
--- /dev/null
+++ b/pyload/plugins/internal/XFSPAccount.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import mktime, strptime
+
+from pyload.plugins.Account import Account
+from pyload.plugins.internal.SimpleHoster import parseHtmlForm
+from pyload.utils import parseFileSize
+
+
+class XFSPAccount(Account):
+ __name__ = "XFSPAccount"
+ __type__ = "account"
+ __version__ = "0.06"
+
+ __description__ = """XFileSharingPro base account plugin"""
+ __author_name__ = "zoidberg"
+ __author_mail__ = "zoidberg@mujmail.cz"
+
+ MAIN_PAGE = None
+
+ VALID_UNTIL_PATTERN = r'>Premium.[Aa]ccount expire:</TD><TD><b>([^<]+)</b>'
+ TRAFFIC_LEFT_PATTERN = r'>Traffic available today:</TD><TD><b>([^<]+)</b>'
+ LOGIN_FAIL_PATTERN = r'Incorrect Login or Password|>Error<'
+ PREMIUM_PATTERN = r'>Renew premium<'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load(self.MAIN_PAGE + "?op=my_account", decode=True)
+
+ validuntil = trafficleft = None
+ premium = True if re.search(self.PREMIUM_PATTERN, html) else False
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ premium = True
+ trafficleft = -1
+ try:
+ self.logDebug(m.group(1))
+ validuntil = mktime(strptime(m.group(1), "%d %B %Y"))
+ except Exception, e:
+ self.logError(e)
+ else:
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ trafficleft = m.group(1)
+ if "Unlimited" in trafficleft:
+ premium = True
+ else:
+ trafficleft = parseFileSize(trafficleft) / 1024
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('%slogin.html' % self.MAIN_PAGE, decode=True)
+
+ action, inputs = parseHtmlForm('name="FL"', html)
+ if not inputs:
+ inputs = {"op": "login",
+ "redirect": self.MAIN_PAGE}
+
+ inputs.update({"login": user,
+ "password": data['password']})
+
+ html = req.load(self.MAIN_PAGE, post=inputs, decode=True)
+
+ if re.search(self.LOGIN_FAIL_PATTERN, html):
+ self.wrongPassword()
diff --git a/pyload/plugins/internal/__init__.py b/pyload/plugins/internal/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/internal/__init__.py
diff --git a/pyload/plugins/ocr/GigasizeCom.py b/pyload/plugins/ocr/GigasizeCom.py
new file mode 100644
index 000000000..b139c304e
--- /dev/null
+++ b/pyload/plugins/ocr/GigasizeCom.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.OCR import OCR
+
+
+class GigasizeCom(OCR):
+ __name__ = "GigasizeCom"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Gigasize.com ocr plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+
+ def __init__(self):
+ OCR.__init__(self)
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.threshold(2.8)
+ self.run_tesser(True, False, False, True)
+ return self.result_captcha
diff --git a/pyload/plugins/ocr/LinksaveIn.py b/pyload/plugins/ocr/LinksaveIn.py
new file mode 100644
index 000000000..1eb8bd796
--- /dev/null
+++ b/pyload/plugins/ocr/LinksaveIn.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+
+from glob import glob
+from PIL import Image
+from os import sep
+from os.path import abspath, dirname
+
+from pyload.plugins.OCR import OCR
+
+
+class LinksaveIn(OCR):
+ __name__ = "LinksaveIn"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Linksave.in ocr plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+
+ def __init__(self):
+ OCR.__init__(self)
+ self.data_dir = dirname(abspath(__file__)) + sep + "LinksaveIn" + sep
+
+ def load_image(self, image):
+ im = Image.open(image)
+ frame_nr = 0
+
+ lut = im.resize((256, 1))
+ lut.putdata(range(256))
+ lut = list(lut.convert("RGB").getdata())
+
+ new = Image.new("RGB", im.size)
+ npix = new.load()
+ while True:
+ try:
+ im.seek(frame_nr)
+ except EOFError:
+ break
+ frame = im.copy()
+ pix = frame.load()
+ for x in xrange(frame.size[0]):
+ for y in xrange(frame.size[1]):
+ if lut[pix[x, y]] != (0,0,0):
+ npix[x, y] = lut[pix[x, y]]
+ frame_nr += 1
+ new.save(self.data_dir+"unblacked.png")
+ self.image = new.copy()
+ self.pixels = self.image.load()
+ self.result_captcha = ''
+
+ def get_bg(self):
+ stat = {}
+ cstat = {}
+ img = self.image.convert("P")
+ for bgpath in glob(self.data_dir+"bg/*.gif"):
+ stat[bgpath] = 0
+ bg = Image.open(bgpath)
+
+ bglut = bg.resize((256, 1))
+ bglut.putdata(range(256))
+ bglut = list(bglut.convert("RGB").getdata())
+
+ lut = img.resize((256, 1))
+ lut.putdata(range(256))
+ lut = list(lut.convert("RGB").getdata())
+
+ bgpix = bg.load()
+ pix = img.load()
+ for x in xrange(bg.size[0]):
+ for y in xrange(bg.size[1]):
+ rgb_bg = bglut[bgpix[x, y]]
+ rgb_c = lut[pix[x, y]]
+ try:
+ cstat[rgb_c] += 1
+ except:
+ cstat[rgb_c] = 1
+ if rgb_bg == rgb_c:
+ stat[bgpath] += 1
+ max_p = 0
+ bg = ""
+ for bgpath, value in stat.items():
+ if max_p < value:
+ bg = bgpath
+ max_p = value
+ return bg
+
+ def substract_bg(self, bgpath):
+ bg = Image.open(bgpath)
+ img = self.image.convert("P")
+
+ bglut = bg.resize((256, 1))
+ bglut.putdata(range(256))
+ bglut = list(bglut.convert("RGB").getdata())
+
+ lut = img.resize((256, 1))
+ lut.putdata(range(256))
+ lut = list(lut.convert("RGB").getdata())
+
+ bgpix = bg.load()
+ pix = img.load()
+ orgpix = self.image.load()
+ for x in xrange(bg.size[0]):
+ for y in xrange(bg.size[1]):
+ rgb_bg = bglut[bgpix[x, y]]
+ rgb_c = lut[pix[x, y]]
+ if rgb_c == rgb_bg:
+ orgpix[x, y] = (255,255,255)
+
+ def eval_black_white(self):
+ new = Image.new("RGB", (140, 75))
+ pix = new.load()
+ orgpix = self.image.load()
+ thresh = 4
+ for x in xrange(new.size[0]):
+ for y in xrange(new.size[1]):
+ rgb = orgpix[x, y]
+ r, g, b = rgb
+ pix[x, y] = (255,255,255)
+ if r > max(b, g)+thresh:
+ pix[x, y] = (0,0,0)
+ if g < min(r, b):
+ pix[x, y] = (0,0,0)
+ if g > max(r, b)+thresh:
+ pix[x, y] = (0,0,0)
+ if b > max(r, g)+thresh:
+ pix[x, y] = (0,0,0)
+ self.image = new
+ self.pixels = self.image.load()
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ bg = self.get_bg()
+ self.substract_bg(bg)
+ self.eval_black_white()
+ self.to_greyscale()
+ self.image.save(self.data_dir+"cleaned_pass1.png")
+ self.clean(4)
+ self.clean(4)
+ self.image.save(self.data_dir+"cleaned_pass2.png")
+ letters = self.split_captcha_letters()
+ final = ""
+ for n, letter in enumerate(letters):
+ self.image = letter
+ self.image.save(ocr.data_dir+"letter%d.png" % n)
+ self.run_tesser(True, True, False, False)
+ final += self.result_captcha
+
+ return final
diff --git a/pyload/plugins/ocr/NetloadIn.py b/pyload/plugins/ocr/NetloadIn.py
new file mode 100644
index 000000000..d31c30989
--- /dev/null
+++ b/pyload/plugins/ocr/NetloadIn.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.OCR import OCR
+
+class NetloadIn(OCR):
+ __name__ = "NetloadIn"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Netload.in ocr plugin"""
+ __author_name__ = "pyLoad Team"
+ __author_mail__ = "admin@pyload.org"
+
+
+ def __init__(self):
+ OCR.__init__(self)
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.to_greyscale()
+ self.clean(3)
+ self.clean(3)
+ self.run_tesser(True, True, False, False)
+
+ self.result_captcha = self.result_captcha.replace(" ", "")[:4] # cut to 4 numbers
+
+ return self.result_captcha
diff --git a/pyload/plugins/ocr/ShareonlineBiz.py b/pyload/plugins/ocr/ShareonlineBiz.py
new file mode 100644
index 000000000..3cee0348e
--- /dev/null
+++ b/pyload/plugins/ocr/ShareonlineBiz.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.OCR import OCR
+
+
+class ShareonlineBiz(OCR):
+ __name__ = "ShareonlineBiz"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Shareonline.biz ocr plugin"""
+ __author_name__ = "RaNaN"
+ __author_mail__ = "RaNaN@pyload.org"
+
+
+ def __init__(self):
+ OCR.__init__(self)
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.to_greyscale()
+ self.image = self.image.resize((160, 50))
+ self.pixels = self.image.load()
+ self.threshold(1.85)
+ #self.eval_black_white(240)
+ #self.derotate_by_average()
+
+ letters = self.split_captcha_letters()
+
+ final = ""
+ for letter in letters:
+ self.image = letter
+ self.run_tesser(True, True, False, False)
+ final += self.result_captcha
+
+ return final
+
+ #tesseract at 60%
diff --git a/pyload/plugins/ocr/__init__.py b/pyload/plugins/ocr/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/ocr/__init__.py