summaryrefslogtreecommitdiffstats
path: root/pyload/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/plugins')
-rw-r--r--pyload/plugins/Plugin.py751
-rw-r--r--pyload/plugins/README.md16
-rw-r--r--pyload/plugins/__init__.py0
-rw-r--r--pyload/plugins/account/AlldebridCom.py59
-rw-r--r--pyload/plugins/account/BayfilesCom.py37
-rw-r--r--pyload/plugins/account/BillionuploadsCom.py16
-rw-r--r--pyload/plugins/account/BitshareCom.py32
-rw-r--r--pyload/plugins/account/CatShareNet.py56
-rw-r--r--pyload/plugins/account/CramitIn.py16
-rw-r--r--pyload/plugins/account/CzshareCom.py44
-rw-r--r--pyload/plugins/account/DebridItaliaCom.py40
-rw-r--r--pyload/plugins/account/DepositfilesCom.py35
-rw-r--r--pyload/plugins/account/DevhostSt.py48
-rw-r--r--pyload/plugins/account/DevhostStFolder.py58
-rw-r--r--pyload/plugins/account/DropboxCom.py42
-rw-r--r--pyload/plugins/account/EasybytezCom.py22
-rw-r--r--pyload/plugins/account/EuroshareEu.py41
-rw-r--r--pyload/plugins/account/FastixRu.py37
-rw-r--r--pyload/plugins/account/FastshareCz.py53
-rw-r--r--pyload/plugins/account/File4safeCom.py18
-rw-r--r--pyload/plugins/account/FileParadoxIn.py16
-rw-r--r--pyload/plugins/account/FilecloudIo.py59
-rw-r--r--pyload/plugins/account/FilefactoryCom.py49
-rw-r--r--pyload/plugins/account/FilejungleCom.py49
-rw-r--r--pyload/plugins/account/FileomCom.py16
-rw-r--r--pyload/plugins/account/FilerNet.py50
-rw-r--r--pyload/plugins/account/FilerioCom.py16
-rw-r--r--pyload/plugins/account/FilesMailRu.py28
-rw-r--r--pyload/plugins/account/FileserveCom.py44
-rw-r--r--pyload/plugins/account/FourSharedCom.py33
-rw-r--r--pyload/plugins/account/FreakshareCom.py43
-rw-r--r--pyload/plugins/account/FreeWayMe.py55
-rw-r--r--pyload/plugins/account/FshareVn.py63
-rw-r--r--pyload/plugins/account/Ftp.py17
-rw-r--r--pyload/plugins/account/HellshareCz.py76
-rw-r--r--pyload/plugins/account/Http.py17
-rw-r--r--pyload/plugins/account/HugefilesNet.py16
-rw-r--r--pyload/plugins/account/HundredEightyUploadCom.py16
-rw-r--r--pyload/plugins/account/JunocloudMe.py16
-rw-r--r--pyload/plugins/account/Keep2shareCc.py69
-rw-r--r--pyload/plugins/account/KingfilesNet.py82
-rw-r--r--pyload/plugins/account/LetitbitNet.py34
-rw-r--r--pyload/plugins/account/LinestorageCom.py16
-rw-r--r--pyload/plugins/account/LinksnappyCom.py50
-rw-r--r--pyload/plugins/account/LomafileCom.py16
-rw-r--r--pyload/plugins/account/MegaDebridEu.py39
-rw-r--r--pyload/plugins/account/MegaRapidCz.py59
-rw-r--r--pyload/plugins/account/MegasharesCom.py48
-rw-r--r--pyload/plugins/account/MovReelCom.py19
-rw-r--r--pyload/plugins/account/MultishareCz.py44
-rw-r--r--pyload/plugins/account/MyfastfileCom.py35
-rw-r--r--pyload/plugins/account/NetloadIn.py40
-rw-r--r--pyload/plugins/account/NosuploadCom.py16
-rw-r--r--pyload/plugins/account/NovafileCom.py16
-rw-r--r--pyload/plugins/account/NowVideoAt.py56
-rw-r--r--pyload/plugins/account/OboomCom.py62
-rw-r--r--pyload/plugins/account/OneFichierCom.py55
-rw-r--r--pyload/plugins/account/OverLoadMe.py36
-rw-r--r--pyload/plugins/account/PremiumTo.py34
-rw-r--r--pyload/plugins/account/PremiumizeMe.py48
-rw-r--r--pyload/plugins/account/QuickshareCz.py43
-rw-r--r--pyload/plugins/account/RPNetBiz.py51
-rw-r--r--pyload/plugins/account/RapidfileshareNet.py18
-rw-r--r--pyload/plugins/account/RapidgatorNet.py58
-rw-r--r--pyload/plugins/account/RapidshareCom.py55
-rw-r--r--pyload/plugins/account/RarefileNet.py16
-rw-r--r--pyload/plugins/account/RealdebridCom.py36
-rw-r--r--pyload/plugins/account/RehostTo.py38
-rw-r--r--pyload/plugins/account/RyushareCom.py25
-rw-r--r--pyload/plugins/account/SecureUploadEu.py16
-rw-r--r--pyload/plugins/account/SendmywayCom.py16
-rw-r--r--pyload/plugins/account/ShareonlineBiz.py45
-rw-r--r--pyload/plugins/account/SimplyPremiumCom.py46
-rw-r--r--pyload/plugins/account/SimplydebridCom.py34
-rw-r--r--pyload/plugins/account/StahnuTo.py34
-rw-r--r--pyload/plugins/account/StreamcloudEu.py16
-rw-r--r--pyload/plugins/account/TurbobitNet.py42
-rw-r--r--pyload/plugins/account/TusfilesNet.py23
-rw-r--r--pyload/plugins/account/UlozTo.py52
-rw-r--r--pyload/plugins/account/UnrestrictLi.py44
-rw-r--r--pyload/plugins/account/UploadcCom.py16
-rw-r--r--pyload/plugins/account/UploadedTo.py60
-rw-r--r--pyload/plugins/account/UploadheroCom.py41
-rw-r--r--pyload/plugins/account/UploadingCom.py63
-rw-r--r--pyload/plugins/account/UptoboxCom.py17
-rw-r--r--pyload/plugins/account/VidPlayNet.py16
-rw-r--r--pyload/plugins/account/XFileSharingPro.py30
-rw-r--r--pyload/plugins/account/YibaishiwuCom.py40
-rw-r--r--pyload/plugins/account/ZeveraCom.py56
-rw-r--r--pyload/plugins/account/__init__.py0
-rw-r--r--pyload/plugins/addon/Checksum.py181
-rw-r--r--pyload/plugins/addon/ClickAndLoad.py71
-rw-r--r--pyload/plugins/addon/DeleteFinished.py79
-rw-r--r--pyload/plugins/addon/DownloadScheduler.py77
-rw-r--r--pyload/plugins/addon/ExternalScripts.py141
-rw-r--r--pyload/plugins/addon/ExtractArchive.py361
-rw-r--r--pyload/plugins/addon/HotFolder.py65
-rw-r--r--pyload/plugins/addon/IRCInterface.py432
-rw-r--r--pyload/plugins/addon/MergeFiles.py83
-rw-r--r--pyload/plugins/addon/MultiHome.py82
-rw-r--r--pyload/plugins/addon/RestartFailed.py45
-rw-r--r--pyload/plugins/addon/UnSkipOnFail.py87
-rw-r--r--pyload/plugins/addon/UpdateManager.py300
-rw-r--r--pyload/plugins/addon/WindowsPhoneToastNotify.py58
-rw-r--r--pyload/plugins/addon/XMPPInterface.py253
-rw-r--r--pyload/plugins/addon/__init__.py0
-rw-r--r--pyload/plugins/captcha/AdsCaptcha.py70
-rw-r--r--pyload/plugins/captcha/ReCaptcha.py65
-rw-r--r--pyload/plugins/captcha/SolveMedia.py44
-rw-r--r--pyload/plugins/captcha/__init__.py0
-rw-r--r--pyload/plugins/container/CCF.py42
-rw-r--r--pyload/plugins/container/LinkList.py71
-rw-r--r--pyload/plugins/container/RSDF.py54
-rw-r--r--pyload/plugins/container/__init__.py0
-rw-r--r--pyload/plugins/crypter/BitshareCom.py21
-rw-r--r--pyload/plugins/crypter/C1neonCom.py16
-rw-r--r--pyload/plugins/crypter/ChipDe.py29
-rw-r--r--pyload/plugins/crypter/CrockoCom.py20
-rw-r--r--pyload/plugins/crypter/CryptItCom.py16
-rw-r--r--pyload/plugins/crypter/CzshareCom.py32
-rw-r--r--pyload/plugins/crypter/DDLMusicOrg.py51
-rw-r--r--pyload/plugins/crypter/DailymotionBatch.py106
-rw-r--r--pyload/plugins/crypter/DataHu.py40
-rw-r--r--pyload/plugins/crypter/DdlstorageCom.py20
-rw-r--r--pyload/plugins/crypter/DepositfilesCom.py20
-rw-r--r--pyload/plugins/crypter/Dereferer.py26
-rw-r--r--pyload/plugins/crypter/DlProtectCom.py65
-rw-r--r--pyload/plugins/crypter/DontKnowMe.py29
-rw-r--r--pyload/plugins/crypter/DuckCryptInfo.py59
-rw-r--r--pyload/plugins/crypter/DuploadOrg.py16
-rw-r--r--pyload/plugins/crypter/EasybytezCom.py22
-rw-r--r--pyload/plugins/crypter/EmbeduploadCom.py60
-rw-r--r--pyload/plugins/crypter/FilebeerInfo.py16
-rw-r--r--pyload/plugins/crypter/FilecloudIo.py21
-rw-r--r--pyload/plugins/crypter/FilecryptCc.py148
-rw-r--r--pyload/plugins/crypter/FilefactoryCom.py28
-rw-r--r--pyload/plugins/crypter/FilerNet.py26
-rw-r--r--pyload/plugins/crypter/FileserveCom.py38
-rw-r--r--pyload/plugins/crypter/FilesonicCom.py15
-rw-r--r--pyload/plugins/crypter/FilestubeCom.py21
-rw-r--r--pyload/plugins/crypter/FiletramCom.py22
-rw-r--r--pyload/plugins/crypter/FiredriveCom.py16
-rw-r--r--pyload/plugins/crypter/FourChanOrg.py27
-rw-r--r--pyload/plugins/crypter/FreakhareCom.py38
-rw-r--r--pyload/plugins/crypter/FreetexthostCom.py27
-rw-r--r--pyload/plugins/crypter/FshareVn.py20
-rw-r--r--pyload/plugins/crypter/GooGl.py32
-rw-r--r--pyload/plugins/crypter/HoerbuchIn.py62
-rw-r--r--pyload/plugins/crypter/HotfileCom.py16
-rw-r--r--pyload/plugins/crypter/ILoadTo.py16
-rw-r--r--pyload/plugins/crypter/ImgurComAlbum.py27
-rw-r--r--pyload/plugins/crypter/JunocloudMe.py20
-rw-r--r--pyload/plugins/crypter/LetitbitNet.py33
-rw-r--r--pyload/plugins/crypter/LinkCryptWs.py326
-rw-r--r--pyload/plugins/crypter/LinkSaveIn.py246
-rw-r--r--pyload/plugins/crypter/LinkdecrypterCom.py92
-rw-r--r--pyload/plugins/crypter/LixIn.py62
-rw-r--r--pyload/plugins/crypter/LofCc.py16
-rw-r--r--pyload/plugins/crypter/MBLinkInfo.py17
-rw-r--r--pyload/plugins/crypter/MediafireCom.py56
-rw-r--r--pyload/plugins/crypter/MegaRapidCz.py20
-rw-r--r--pyload/plugins/crypter/MegauploadCom.py15
-rw-r--r--pyload/plugins/crypter/Movie2kTo.py16
-rw-r--r--pyload/plugins/crypter/MultiUpOrg.py38
-rw-r--r--pyload/plugins/crypter/MultiloadCz.py42
-rw-r--r--pyload/plugins/crypter/MultiuploadCom.py15
-rw-r--r--pyload/plugins/crypter/NCryptIn.py315
-rw-r--r--pyload/plugins/crypter/NetfolderIn.py70
-rw-r--r--pyload/plugins/crypter/NosvideoCom.py21
-rw-r--r--pyload/plugins/crypter/OneKhDe.py40
-rw-r--r--pyload/plugins/crypter/OronCom.py16
-rw-r--r--pyload/plugins/crypter/PastebinCom.py21
-rw-r--r--pyload/plugins/crypter/QuickshareCz.py31
-rw-r--r--pyload/plugins/crypter/RSLayerCom.py16
-rw-r--r--pyload/plugins/crypter/RapidfileshareNet.py20
-rw-r--r--pyload/plugins/crypter/RelinkUs.py282
-rw-r--r--pyload/plugins/crypter/SafelinkingNet.py81
-rw-r--r--pyload/plugins/crypter/SecuredIn.py16
-rw-r--r--pyload/plugins/crypter/SexuriaCom.py94
-rw-r--r--pyload/plugins/crypter/ShareLinksBiz.py286
-rw-r--r--pyload/plugins/crypter/SharingmatrixCom.py15
-rw-r--r--pyload/plugins/crypter/SpeedLoadOrg.py16
-rw-r--r--pyload/plugins/crypter/StealthTo.py16
-rw-r--r--pyload/plugins/crypter/TnyCz.py27
-rw-r--r--pyload/plugins/crypter/TrailerzoneInfo.py16
-rw-r--r--pyload/plugins/crypter/TurbobitNet.py44
-rw-r--r--pyload/plugins/crypter/TusfilesNet.py45
-rw-r--r--pyload/plugins/crypter/UlozTo.py46
-rw-r--r--pyload/plugins/crypter/UploadableCh.py24
-rw-r--r--pyload/plugins/crypter/UploadedTo.py34
-rw-r--r--pyload/plugins/crypter/WiiReloadedOrg.py16
-rw-r--r--pyload/plugins/crypter/WuploadCom.py15
-rw-r--r--pyload/plugins/crypter/XFileSharingPro.py47
-rw-r--r--pyload/plugins/crypter/XupPl.py25
-rw-r--r--pyload/plugins/crypter/YoutubeBatch.py148
-rw-r--r--pyload/plugins/crypter/__init__.py0
-rw-r--r--pyload/plugins/hook/AlldebridCom.py27
-rw-r--r--pyload/plugins/hook/BypassCaptcha.py133
-rw-r--r--pyload/plugins/hook/Captcha9kw.py250
-rw-r--r--pyload/plugins/hook/CaptchaBrotherhood.py166
-rw-r--r--pyload/plugins/hook/DeathByCaptcha.py213
-rw-r--r--pyload/plugins/hook/DebridItaliaCom.py28
-rw-r--r--pyload/plugins/hook/EasybytezCom.py36
-rw-r--r--pyload/plugins/hook/ExpertDecoders.py92
-rw-r--r--pyload/plugins/hook/FastixRu.py27
-rw-r--r--pyload/plugins/hook/FreeWayMe.py25
-rw-r--r--pyload/plugins/hook/ImageTyperz.py151
-rw-r--r--pyload/plugins/hook/LinkdecrypterCom.py60
-rw-r--r--pyload/plugins/hook/LinksnappyCom.py27
-rw-r--r--pyload/plugins/hook/MegaDebridEu.py30
-rw-r--r--pyload/plugins/hook/MultishareCz.py27
-rw-r--r--pyload/plugins/hook/MyfastfileCom.py30
-rw-r--r--pyload/plugins/hook/OverLoadMe.py30
-rw-r--r--pyload/plugins/hook/PremiumTo.py38
-rw-r--r--pyload/plugins/hook/PremiumizeMe.py54
-rw-r--r--pyload/plugins/hook/RPNetBiz.py52
-rw-r--r--pyload/plugins/hook/RealdebridCom.py27
-rw-r--r--pyload/plugins/hook/RehostTo.py40
-rw-r--r--pyload/plugins/hook/SimplyPremiumCom.py29
-rw-r--r--pyload/plugins/hook/SimplydebridCom.py22
-rw-r--r--pyload/plugins/hook/UnrestrictLi.py30
-rw-r--r--pyload/plugins/hook/XFileSharingPro.py96
-rw-r--r--pyload/plugins/hook/ZeveraCom.py22
-rw-r--r--pyload/plugins/hook/__init__.py0
-rw-r--r--pyload/plugins/hoster/AlldebridCom.py89
-rw-r--r--pyload/plugins/hoster/BayfilesCom.py87
-rw-r--r--pyload/plugins/hoster/BezvadataCz.py91
-rw-r--r--pyload/plugins/hoster/BillionuploadsCom.py24
-rw-r--r--pyload/plugins/hoster/BitshareCom.py155
-rw-r--r--pyload/plugins/hoster/BoltsharingCom.py18
-rw-r--r--pyload/plugins/hoster/CatShareNet.py67
-rw-r--r--pyload/plugins/hoster/CloudzerNet.py20
-rw-r--r--pyload/plugins/hoster/CramitIn.py24
-rw-r--r--pyload/plugins/hoster/CrockoCom.py70
-rw-r--r--pyload/plugins/hoster/CyberlockerCh.py18
-rw-r--r--pyload/plugins/hoster/CzshareCom.py152
-rw-r--r--pyload/plugins/hoster/DailymotionCom.py116
-rw-r--r--pyload/plugins/hoster/DataHu.py45
-rw-r--r--pyload/plugins/hoster/DataportCz.py57
-rw-r--r--pyload/plugins/hoster/DateiTo.py78
-rw-r--r--pyload/plugins/hoster/DdlstorageCom.py19
-rw-r--r--pyload/plugins/hoster/DebridItaliaCom.py50
-rw-r--r--pyload/plugins/hoster/DepositfilesCom.py123
-rw-r--r--pyload/plugins/hoster/DlFreeFr.py213
-rw-r--r--pyload/plugins/hoster/DuploadOrg.py18
-rw-r--r--pyload/plugins/hoster/EasybytezCom.py26
-rw-r--r--pyload/plugins/hoster/EdiskCz.py56
-rw-r--r--pyload/plugins/hoster/EgoFilesCom.py18
-rw-r--r--pyload/plugins/hoster/EnteruploadCom.py18
-rw-r--r--pyload/plugins/hoster/EpicShareNet.py18
-rw-r--r--pyload/plugins/hoster/EuroshareEu.py67
-rw-r--r--pyload/plugins/hoster/ExtabitCom.py79
-rw-r--r--pyload/plugins/hoster/FastixRu.py73
-rw-r--r--pyload/plugins/hoster/FastshareCz.py86
-rw-r--r--pyload/plugins/hoster/FileApeCom.py18
-rw-r--r--pyload/plugins/hoster/FileParadoxIn.py25
-rw-r--r--pyload/plugins/hoster/FileSharkPl.py137
-rw-r--r--pyload/plugins/hoster/FileStoreTo.py36
-rw-r--r--pyload/plugins/hoster/FilebeerInfo.py18
-rw-r--r--pyload/plugins/hoster/FilecloudIo.py124
-rw-r--r--pyload/plugins/hoster/FilefactoryCom.py90
-rw-r--r--pyload/plugins/hoster/FilejungleCom.py29
-rw-r--r--pyload/plugins/hoster/FileomCom.py35
-rw-r--r--pyload/plugins/hoster/FilepostCom.py130
-rw-r--r--pyload/plugins/hoster/FilepupNet.py51
-rw-r--r--pyload/plugins/hoster/FilerNet.py99
-rw-r--r--pyload/plugins/hoster/FilerioCom.py24
-rw-r--r--pyload/plugins/hoster/FilesMailRu.py106
-rw-r--r--pyload/plugins/hoster/FileserveCom.py217
-rw-r--r--pyload/plugins/hoster/FileshareInUa.py18
-rw-r--r--pyload/plugins/hoster/FilesonicCom.py19
-rw-r--r--pyload/plugins/hoster/FilezyNet.py18
-rw-r--r--pyload/plugins/hoster/FiredriveCom.py18
-rw-r--r--pyload/plugins/hoster/FlyFilesNet.py45
-rw-r--r--pyload/plugins/hoster/FourSharedCom.py61
-rw-r--r--pyload/plugins/hoster/FreakshareCom.py180
-rw-r--r--pyload/plugins/hoster/FreeWayMe.py36
-rw-r--r--pyload/plugins/hoster/FreevideoCz.py18
-rw-r--r--pyload/plugins/hoster/FshareVn.py123
-rw-r--r--pyload/plugins/hoster/Ftp.py79
-rw-r--r--pyload/plugins/hoster/GamefrontCom.py89
-rw-r--r--pyload/plugins/hoster/GigapetaCom.py65
-rw-r--r--pyload/plugins/hoster/GooIm.py38
-rw-r--r--pyload/plugins/hoster/HellshareCz.py48
-rw-r--r--pyload/plugins/hoster/HellspyCz.py18
-rw-r--r--pyload/plugins/hoster/HotfileCom.py21
-rw-r--r--pyload/plugins/hoster/HugefilesNet.py26
-rw-r--r--pyload/plugins/hoster/HundredEightyUploadCom.py27
-rw-r--r--pyload/plugins/hoster/IFileWs.py18
-rw-r--r--pyload/plugins/hoster/IcyFilesCom.py18
-rw-r--r--pyload/plugins/hoster/IfileIt.py64
-rw-r--r--pyload/plugins/hoster/IfolderRu.py76
-rw-r--r--pyload/plugins/hoster/JumbofilesCom.py37
-rw-r--r--pyload/plugins/hoster/JunocloudMe.py29
-rw-r--r--pyload/plugins/hoster/Keep2shareCc.py118
-rw-r--r--pyload/plugins/hoster/KickloadCom.py18
-rw-r--r--pyload/plugins/hoster/LemUploadsCom.py18
-rw-r--r--pyload/plugins/hoster/LetitbitNet.py145
-rw-r--r--pyload/plugins/hoster/LinksnappyCom.py76
-rw-r--r--pyload/plugins/hoster/LoadTo.py72
-rw-r--r--pyload/plugins/hoster/LomafileCom.py30
-rw-r--r--pyload/plugins/hoster/LuckyShareNet.py76
-rw-r--r--pyload/plugins/hoster/MediafireCom.py122
-rw-r--r--pyload/plugins/hoster/MegaCoNz.py144
-rw-r--r--pyload/plugins/hoster/MegaDebridEu.py94
-rw-r--r--pyload/plugins/hoster/MegaFilesSe.py18
-rw-r--r--pyload/plugins/hoster/MegaRapidCz.py71
-rw-r--r--pyload/plugins/hoster/MegacrypterCom.py56
-rw-r--r--pyload/plugins/hoster/MegareleaseOrg.py19
-rw-r--r--pyload/plugins/hoster/MegasharesCom.py110
-rw-r--r--pyload/plugins/hoster/MegauploadCom.py18
-rw-r--r--pyload/plugins/hoster/MegavideoCom.py19
-rw-r--r--pyload/plugins/hoster/MovReelCom.py26
-rw-r--r--pyload/plugins/hoster/MultishareCz.py77
-rw-r--r--pyload/plugins/hoster/MyfastfileCom.py47
-rw-r--r--pyload/plugins/hoster/MyvideoDe.py49
-rw-r--r--pyload/plugins/hoster/NahrajCz.py18
-rw-r--r--pyload/plugins/hoster/NarodRu.py60
-rw-r--r--pyload/plugins/hoster/NetloadIn.py267
-rw-r--r--pyload/plugins/hoster/NosuploadCom.py43
-rw-r--r--pyload/plugins/hoster/NovafileCom.py31
-rw-r--r--pyload/plugins/hoster/NowDownloadEu.py63
-rw-r--r--pyload/plugins/hoster/NowVideoAt.py44
-rw-r--r--pyload/plugins/hoster/OboomCom.py145
-rw-r--r--pyload/plugins/hoster/OneFichierCom.py71
-rw-r--r--pyload/plugins/hoster/OronCom.py19
-rw-r--r--pyload/plugins/hoster/OverLoadMe.py84
-rw-r--r--pyload/plugins/hoster/PandaplaNet.py18
-rw-r--r--pyload/plugins/hoster/PornhostCom.py80
-rw-r--r--pyload/plugins/hoster/PornhubCom.py89
-rw-r--r--pyload/plugins/hoster/PotloadCom.py18
-rw-r--r--pyload/plugins/hoster/PremiumTo.py77
-rw-r--r--pyload/plugins/hoster/PremiumizeMe.py55
-rw-r--r--pyload/plugins/hoster/PromptfileCom.py46
-rw-r--r--pyload/plugins/hoster/PrzeklejPl.py18
-rw-r--r--pyload/plugins/hoster/QuickshareCz.py95
-rw-r--r--pyload/plugins/hoster/RPNetBiz.py85
-rw-r--r--pyload/plugins/hoster/RapidfileshareNet.py31
-rw-r--r--pyload/plugins/hoster/RapidgatorNet.py201
-rw-r--r--pyload/plugins/hoster/RapidshareCom.py228
-rw-r--r--pyload/plugins/hoster/RarefileNet.py28
-rw-r--r--pyload/plugins/hoster/RealdebridCom.py93
-rw-r--r--pyload/plugins/hoster/RedtubeCom.py62
-rw-r--r--pyload/plugins/hoster/RehostTo.py43
-rw-r--r--pyload/plugins/hoster/RemixshareCom.py61
-rw-r--r--pyload/plugins/hoster/RgHostNet.py33
-rw-r--r--pyload/plugins/hoster/RyushareCom.py81
-rw-r--r--pyload/plugins/hoster/SecureUploadEu.py23
-rw-r--r--pyload/plugins/hoster/SendmywayCom.py24
-rw-r--r--pyload/plugins/hoster/SendspaceCom.py60
-rw-r--r--pyload/plugins/hoster/Share4webCom.py22
-rw-r--r--pyload/plugins/hoster/Share76Com.py18
-rw-r--r--pyload/plugins/hoster/ShareFilesCo.py18
-rw-r--r--pyload/plugins/hoster/SharebeesCom.py18
-rw-r--r--pyload/plugins/hoster/ShareonlineBiz.py196
-rw-r--r--pyload/plugins/hoster/ShareplaceCom.py89
-rw-r--r--pyload/plugins/hoster/SharingmatrixCom.py19
-rw-r--r--pyload/plugins/hoster/ShragleCom.py19
-rw-r--r--pyload/plugins/hoster/SimplyPremiumCom.py82
-rw-r--r--pyload/plugins/hoster/SimplydebridCom.py63
-rw-r--r--pyload/plugins/hoster/SockshareCom.py20
-rw-r--r--pyload/plugins/hoster/SoundcloudCom.py57
-rw-r--r--pyload/plugins/hoster/SpeedLoadOrg.py18
-rw-r--r--pyload/plugins/hoster/SpeedfileCz.py18
-rw-r--r--pyload/plugins/hoster/SpeedyshareCom.py51
-rw-r--r--pyload/plugins/hoster/StorageTo.py18
-rw-r--r--pyload/plugins/hoster/StreamCz.py71
-rw-r--r--pyload/plugins/hoster/StreamcloudEu.py31
-rw-r--r--pyload/plugins/hoster/TurbobitNet.py173
-rw-r--r--pyload/plugins/hoster/TurbouploadCom.py18
-rw-r--r--pyload/plugins/hoster/TusfilesNet.py35
-rw-r--r--pyload/plugins/hoster/TwoSharedCom.py40
-rw-r--r--pyload/plugins/hoster/UlozTo.py164
-rw-r--r--pyload/plugins/hoster/UloziskoSk.py72
-rw-r--r--pyload/plugins/hoster/UnibytesCom.py74
-rw-r--r--pyload/plugins/hoster/UnrestrictLi.py91
-rw-r--r--pyload/plugins/hoster/UpleaCom.py60
-rw-r--r--pyload/plugins/hoster/UploadStationCom.py19
-rw-r--r--pyload/plugins/hoster/UploadboxCom.py18
-rw-r--r--pyload/plugins/hoster/UploadedTo.py245
-rw-r--r--pyload/plugins/hoster/UploadhereCom.py18
-rw-r--r--pyload/plugins/hoster/UploadheroCom.py79
-rw-r--r--pyload/plugins/hoster/UploadingCom.py104
-rw-r--r--pyload/plugins/hoster/UploadkingCom.py18
-rw-r--r--pyload/plugins/hoster/UpstoreNet.py73
-rw-r--r--pyload/plugins/hoster/UptoboxCom.py34
-rw-r--r--pyload/plugins/hoster/VeehdCom.py81
-rw-r--r--pyload/plugins/hoster/VeohCom.py52
-rw-r--r--pyload/plugins/hoster/VidPlayNet.py26
-rw-r--r--pyload/plugins/hoster/VimeoCom.py74
-rw-r--r--pyload/plugins/hoster/Vipleech4uCom.py18
-rw-r--r--pyload/plugins/hoster/WarserverCz.py18
-rw-r--r--pyload/plugins/hoster/WebshareCz.py61
-rw-r--r--pyload/plugins/hoster/WrzucTo.py52
-rw-r--r--pyload/plugins/hoster/WuploadCom.py19
-rw-r--r--pyload/plugins/hoster/X7To.py18
-rw-r--r--pyload/plugins/hoster/XHamsterCom.py129
-rw-r--r--pyload/plugins/hoster/XVideosCom.py28
-rw-r--r--pyload/plugins/hoster/Xdcc.py207
-rw-r--r--pyload/plugins/hoster/YibaishiwuCom.py55
-rw-r--r--pyload/plugins/hoster/YoupornCom.py60
-rw-r--r--pyload/plugins/hoster/YourfilesTo.py87
-rw-r--r--pyload/plugins/hoster/YoutubeCom.py184
-rw-r--r--pyload/plugins/hoster/ZDF.py59
-rw-r--r--pyload/plugins/hoster/ZeveraCom.py41
-rw-r--r--pyload/plugins/hoster/ZippyshareCom.py72
-rw-r--r--pyload/plugins/hoster/__init__.py0
-rw-r--r--pyload/plugins/internal/AbstractExtractor.py109
-rw-r--r--pyload/plugins/internal/Account.py305
-rw-r--r--pyload/plugins/internal/Addon.py169
-rw-r--r--pyload/plugins/internal/BasePlugin.py108
-rw-r--r--pyload/plugins/internal/Captcha.py51
-rw-r--r--pyload/plugins/internal/Container.py64
-rw-r--r--pyload/plugins/internal/Crypter.py107
-rw-r--r--pyload/plugins/internal/DeadCrypter.py19
-rw-r--r--pyload/plugins/internal/DeadHoster.py27
-rw-r--r--pyload/plugins/internal/Hoster.py21
-rw-r--r--pyload/plugins/internal/MultiHoster.py205
-rw-r--r--pyload/plugins/internal/OCR.py314
-rw-r--r--pyload/plugins/internal/SimpleCrypter.py152
-rw-r--r--pyload/plugins/internal/SimpleHoster.py473
-rw-r--r--pyload/plugins/internal/UnRar.py221
-rw-r--r--pyload/plugins/internal/UnZip.py41
-rw-r--r--pyload/plugins/internal/UpdateManager.py300
-rw-r--r--pyload/plugins/internal/XFSAccount.py120
-rw-r--r--pyload/plugins/internal/XFSCrypter.py29
-rw-r--r--pyload/plugins/internal/XFSHoster.py344
-rw-r--r--pyload/plugins/internal/__init__.py0
-rw-r--r--pyload/plugins/ocr/GigasizeCom.py24
-rw-r--r--pyload/plugins/ocr/LinksaveIn.py158
-rw-r--r--pyload/plugins/ocr/NetloadIn.py29
-rw-r--r--pyload/plugins/ocr/ShareonlineBiz.py39
-rw-r--r--pyload/plugins/ocr/__init__.py0
433 files changed, 27618 insertions, 0 deletions
diff --git a/pyload/plugins/Plugin.py b/pyload/plugins/Plugin.py
new file mode 100644
index 000000000..cf4debd37
--- /dev/null
+++ b/pyload/plugins/Plugin.py
@@ -0,0 +1,751 @@
+# -*- coding: utf-8 -*-
+
+from time import time, sleep
+from random import randint
+
+import os
+from os import remove, makedirs, chmod, stat
+from os.path import exists, join
+
+if os.name != "nt":
+ from os import chown
+ from pwd import getpwnam
+ from grp import getgrnam
+
+from itertools import islice
+from traceback import print_exc
+from urlparse import urlparse
+
+from pyload.utils import fs_decode, fs_encode, safe_join, safe_path
+
+
+def chunks(iterable, size):
+ it = iter(iterable)
+ item = list(islice(it, size))
+ while item:
+ yield item
+ item = list(islice(it, size))
+
+
+class Abort(Exception):
+ """ raised when aborted """
+
+
+class Fail(Exception):
+ """ raised when failed """
+
+
+class Reconnect(Exception):
+ """ raised when reconnected """
+
+
+class Retry(Exception):
+ """ raised when start again from beginning """
+
+
+class SkipDownload(Exception):
+ """ raised when download should be skipped """
+
+
+class Base(object):
+ """
+ A Base class with log/config/db methods *all* plugin types can use
+ """
+
+ def __init__(self, core):
+ #: Core instance
+ self.core = core
+
+
+ def _log(self, type, args):
+ msg = " | ".join([encode(a).strip() for a in args if a])
+ logger = getattr(self.core.log, type)
+ logger("%s: %s" % (self.__name__, msg or _("%s MARK" % type.upper())))
+
+
+ def logDebug(self, *args):
+ if self.core.debug:
+ return self._log("debug", args)
+
+
+ def logInfo(self, *args):
+ return self._log("info", args)
+
+
+ def logWarning(self, *args):
+ return self._log("warning", args)
+
+
+ def logError(self, *args):
+ return self._log("error", args)
+
+
+ def logCritical(self, *args):
+ return self._log("critical", args)
+
+
+ #: Deprecated method
+ def setConf(self, option, value):
+ """ see `setConfig` """
+ self.setConfig(option, value)
+
+
+ def setConfig(self, option, value):
+ """ Set config value for current plugin
+
+ :param option:
+ :param value:
+ :return:
+ """
+ self.core.config.setPlugin(self.__name__, option, value)
+
+
+ #: Deprecated method
+ def getConf(self, option):
+ """ see `getConfig` """
+ return self.getConfig(option)
+
+
+ def getConfig(self, option):
+ """ Returns config value for current plugin
+
+ :param option:
+ :return:
+ """
+ return self.core.config.getPlugin(self.__name__, option)
+
+
+ def setStorage(self, key, value):
+ """ Saves a value persistently to the database """
+ self.core.db.setStorage(self.__name__, key, value)
+
+
+ def store(self, key, value):
+ """ same as `setStorage` """
+ self.core.db.setStorage(self.__name__, key, value)
+
+
+ def getStorage(self, key=None, default=None):
+ """ Retrieves saved value or dict of all saved entries if key is None """
+ if key:
+ return self.core.db.getStorage(self.__name__, key) or default
+ return self.core.db.getStorage(self.__name__, key)
+
+
+ def retrieve(self, *args, **kwargs):
+ """ same as `getStorage` """
+ return self.getStorage(*args, **kwargs)
+
+
+ def delStorage(self, key):
+ """ Delete entry in db """
+ self.core.db.delStorage(self.__name__, key)
+
+
+class Plugin(Base):
+ """
+ Base plugin for hoster/crypter.
+ Overwrite `process` / `decrypt` in your subclassed plugin.
+ """
+ __name__ = "Plugin"
+ __type__ = "hoster"
+ __version__ = "0.07"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [] #: [("name", "type", "desc", "default")]
+
+ __description__ = """Base plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("spoob", "spoob@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de")]
+
+
+ info = {} #: file info dict
+
+
+ def __init__(self, pyfile):
+ Base.__init__(self, pyfile.m.core)
+
+ #: engage wan reconnection
+ self.wantReconnect = False
+
+ #: enable simultaneous processing of multiple downloads
+ self.multiDL = True
+ self.limitDL = 0
+
+ #: chunk limit
+ self.chunkLimit = 1
+ self.resumeDownload = False
+
+ #: time() + wait in seconds
+ self.waitUntil = 0
+ self.waiting = False
+
+ #: captcha reader instance
+ self.ocr = None
+
+ #: account handler instance, see :py:class:`Account`
+ self.account = pyfile.m.core.accountManager.getAccountPlugin(self.__name__)
+
+ #: premium status
+ self.premium = False
+ #: username/login
+ self.user = None
+
+ if self.account and not self.account.canUse():
+ self.account = None
+
+ if self.account:
+ self.user, data = self.account.selectAccount()
+ #: Browser instance, see `network.Browser`
+ self.req = self.account.getAccountRequest(self.user)
+ self.chunkLimit = -1 # chunk limit, -1 for unlimited
+ #: enables resume (will be ignored if server dont accept chunks)
+ self.resumeDownload = True
+ self.multiDL = True #every hoster with account should provide multiple downloads
+ #: premium status
+ self.premium = self.account.isPremium(self.user)
+ else:
+ self.req = pyfile.m.core.requestFactory.getRequest(self.__name__)
+
+ #: associated pyfile instance, see `PyFile`
+ self.pyfile = pyfile
+
+ self.thread = None # holds thread in future
+
+ #: location where the last call to download was saved
+ self.lastDownload = ""
+ #: re match of the last call to `checkDownload`
+ self.lastCheck = None
+
+ #: js engine, see `JsEngine`
+ self.js = self.core.js
+
+ #: captcha task
+ self.cTask = None
+
+ self.html = None #@TODO: Move to hoster class in 0.4.10
+ self.retries = 0
+
+ self.init()
+
+
+ def getChunkCount(self):
+ if self.chunkLimit <= 0:
+ return self.core.config['download']['chunks']
+ return min(self.core.config['download']['chunks'], self.chunkLimit)
+
+
+ def __call__(self):
+ return self.__name__
+
+
+ def init(self):
+ """initialize the plugin (in addition to `__init__`)"""
+ pass
+
+
+ def setup(self):
+ """ setup for enviroment and other things, called before downloading (possibly more than one time)"""
+ pass
+
+
+ def preprocessing(self, thread):
+ """ handles important things to do before starting """
+ self.thread = thread
+
+ if self.account:
+ self.account.checkLogin(self.user)
+ else:
+ self.req.clearCookies()
+
+ self.setup()
+
+ self.pyfile.setStatus("starting")
+
+ return self.process(self.pyfile)
+
+
+ def process(self, pyfile):
+ """the 'main' method of every plugin, you **have to** overwrite it"""
+ raise NotImplementedError
+
+
+ def resetAccount(self):
+ """ dont use account and retry download """
+ self.account = None
+ self.req = self.core.requestFactory.getRequest(self.__name__)
+ self.retry()
+
+
+ def checksum(self, local_file=None):
+ """
+ return codes:
+ 0 - checksum ok
+ 1 - checksum wrong
+ 5 - can't get checksum
+ 10 - not implemented
+ 20 - unknown error
+ """
+ #@TODO checksum check addon
+
+ return True, 10
+
+
+ def setReconnect(self, reconnect):
+ reconnect = bool(reconnect)
+ self.logDebug("Set wantReconnect to: %s (previous: %s)" % (reconnect, self.wantReconnect))
+ self.wantReconnect = reconnect
+
+
+ def setWait(self, seconds, reconnect=None):
+ """Set a specific wait time later used with `wait`
+
+ :param seconds: wait time in seconds
+ :param reconnect: True if a reconnect would avoid wait time
+ """
+ wait_time = int(seconds) + 1
+ wait_until = time() + wait_time
+
+ self.logDebug("Set waitUntil to: %f (previous: %f)" % (wait_until, self.pyfile.waitUntil),
+ "Wait: %d seconds" % wait_time)
+
+ self.pyfile.waitUntil = wait_until
+
+ if reconnect is not None:
+ self.setReconnect(reconnect)
+
+
+ def wait(self, seconds=None, reconnect=None):
+ """ waits the time previously set """
+
+ pyfile = self.pyfile
+
+ if seconds is not None:
+ self.setWait(seconds)
+
+ if reconnect is not None:
+ self.setReconnect(reconnect)
+
+ self.waiting = True
+
+ status = pyfile.status
+ pyfile.setStatus("waiting")
+
+ self.logInfo(_("Wait: %d seconds") % (pyfile.waitUntil - time()),
+ _("Reconnect: %s") % self.wantReconnect)
+
+ if self.account:
+ self.logDebug("Ignore reconnection due account logged")
+
+ while pyfile.waitUntil > time():
+ if pyfile.abort:
+ self.abort()
+
+ sleep(1)
+ else:
+ while pyfile.waitUntil > time():
+ self.thread.m.reconnecting.wait(2)
+
+ if pyfile.abort:
+ self.abort()
+
+ if self.thread.m.reconnecting.isSet():
+ self.waiting = False
+ self.wantReconnect = False
+ raise Reconnect
+
+ sleep(1)
+
+ self.waiting = False
+
+ pyfile.status = status
+
+
+ def fail(self, reason):
+ """ fail and give reason """
+ raise Fail(reason)
+
+
+ def abort(self, reason=""):
+ """ abort and give reason """
+ if reason:
+ self.pyfile.error = str(reason)
+ raise Abort
+
+
+ def error(self, reason="", type=""):
+ if not reason and not type:
+ type = "unknown"
+
+ msg = _("%s error") % _(type.strip().capitalize()) if type else _("Error")
+ msg += ": " + reason.strip() if reason else ""
+ msg += _(" | Plugin may be out of date")
+
+ raise Fail(msg)
+
+
+ def offline(self, reason=""):
+ """ fail and indicate file is offline """
+ if reason:
+ self.pyfile.error = str(reason)
+ raise Fail("offline")
+
+
+ def tempOffline(self, reason=""):
+ """ fail and indicates file ist temporary offline, the core may take consequences """
+ if reason:
+ self.pyfile.error = str(reason)
+ raise Fail("temp. offline")
+
+
+ def retry(self, max_tries=5, wait_time=1, reason=""):
+ """Retries and begin again from the beginning
+
+ :param max_tries: number of maximum retries
+ :param wait_time: time to wait in seconds
+ :param reason: reason for retrying, will be passed to fail if max_tries reached
+ """
+ if 0 < max_tries <= self.retries:
+ self.error(reason or _("Max retries reached"), "retry")
+
+ self.wait(wait_time, False)
+
+ self.retries += 1
+ raise Retry(reason)
+
+
+ def invalidCaptcha(self):
+ self.logError(_("Invalid captcha"))
+ if self.cTask:
+ self.cTask.invalid()
+
+
+ def correctCaptcha(self):
+ self.logInfo(_("Correct captcha"))
+ if self.cTask:
+ self.cTask.correct()
+
+
+ def decryptCaptcha(self, url, get={}, post={}, cookies=False, forceUser=False, imgtype='jpg',
+ result_type='textual', timeout=290):
+ """ Loads a captcha and decrypts it with ocr, plugin, user input
+
+ :param url: url of captcha image
+ :param get: get part for request
+ :param post: post part for request
+ :param cookies: True if cookies should be enabled
+ :param forceUser: if True, ocr is not used
+ :param imgtype: Type of the Image
+ :param result_type: 'textual' if text is written on the captcha\
+ or 'positional' for captcha where the user have to click\
+ on a specific region on the captcha
+
+ :return: result of decrypting
+ """
+
+ img = self.load(url, get=get, post=post, cookies=cookies)
+
+ id = ("%.2f" % time())[-6:].replace(".", "")
+
+ with open(join("tmp", "tmpCaptcha_%s_%s.%s" % (self.__name__, id, imgtype)), "wb") as tmpCaptcha:
+ tmpCaptcha.write(img)
+
+ has_plugin = self.__name__ in self.core.pluginManager.ocrPlugins
+
+ if self.core.captcha:
+ Ocr = self.core.pluginManager.loadClass("ocr", self.__name__)
+ else:
+ Ocr = None
+
+ if Ocr and not forceUser:
+ sleep(randint(3000, 5000) / 1000.0)
+ if self.pyfile.abort:
+ self.abort()
+
+ ocr = Ocr()
+ result = ocr.get_captcha(tmpCaptcha.name)
+ else:
+ captchaManager = self.core.captchaManager
+ task = captchaManager.newTask(img, imgtype, tmpCaptcha.name, result_type)
+ self.cTask = task
+ captchaManager.handleCaptcha(task, timeout)
+
+ while task.isWaiting():
+ if self.pyfile.abort:
+ captchaManager.removeTask(task)
+ self.abort()
+ sleep(1)
+
+ captchaManager.removeTask(task)
+
+ if task.error and has_plugin: #ignore default error message since the user could use OCR
+ self.fail(_("Pil and tesseract not installed and no Client connected for captcha decrypting"))
+ elif task.error:
+ self.fail(task.error)
+ elif not task.result:
+ self.fail(_("No captcha result obtained in appropiate time by any of the plugins"))
+
+ result = task.result
+ self.logDebug("Received captcha result: %s" % result)
+
+ if not self.core.debug:
+ try:
+ remove(tmpCaptcha.name)
+ except:
+ pass
+
+ return result
+
+
+ def load(self, url, get={}, post={}, ref=True, cookies=True, just_header=False, decode=False, follow_location=True, save_cookies=True):
+ """Load content at url and returns it
+
+ :param url:
+ :param get:
+ :param post:
+ :param ref:
+ :param cookies:
+ :param just_header: If True only the header will be retrieved and returned as dict
+ :param decode: Wether to decode the output according to http header, should be True in most cases
+ :param follow_location: If True follow location else not
+ :param save_cookies: If True saves received cookies else discard them
+ :return: Loaded content
+ """
+ if self.pyfile.abort:
+ self.abort()
+
+ if not url:
+ self.fail(_("No url given"))
+
+ url = encode(url).strip() #@NOTE: utf8 vs decode -> please use decode attribute in all future plugins
+
+ if self.core.debug:
+ self.logDebug("Load url: " + url, *["%s=%s" % (key, val) for key, val in locals().iteritems() if key not in ("self", "url")])
+
+ res = self.req.load(url, get, post, ref, cookies, just_header, decode=decode, follow_location=follow_location, save_cookies=save_cookies)
+
+ if decode:
+ res = encode(res)
+
+ if self.core.debug:
+ from inspect import currentframe
+
+ frame = currentframe()
+ framefile = safe_join("tmp", self.__name__, "%s_line%s.dump.html" % (frame.f_back.f_code.co_name, frame.f_back.f_lineno))
+ try:
+ if not exists(join("tmp", self.__name__)):
+ makedirs(join("tmp", self.__name__))
+
+ with open(framefile, "wb") as f:
+ del frame #: delete the frame or it wont be cleaned
+ f.write(res)
+ except IOError, e:
+ self.logError(e)
+
+ if just_header:
+ #parse header
+ header = {"code": self.req.code}
+ for line in res.splitlines():
+ line = line.strip()
+ if not line or ":" not in line: continue
+
+ key, none, value = line.partition(":")
+ key = key.strip().lower()
+ value = value.strip()
+
+ if key in header:
+ if type(header[key]) == list:
+ header[key].append(value)
+ else:
+ header[key] = [header[key], value]
+ else:
+ header[key] = value
+ res = header
+
+ return res
+
+
+ def download(self, url, get={}, post={}, ref=True, cookies=True, disposition=False):
+ """Downloads the content at url to download folder
+
+ :param url:
+ :param get:
+ :param post:
+ :param ref:
+ :param cookies:
+ :param disposition: if True and server provides content-disposition header\
+ the filename will be changed if needed
+ :return: The location where the file was saved
+ """
+ if self.pyfile.abort:
+ self.abort()
+
+ if not url:
+ self.fail(_("No url given"))
+
+ url = encode(url).strip()
+
+ if self.core.debug:
+ self.logDebug("Download url: " + url, *["%s=%s" % (key, val) for key, val in locals().iteritems() if key not in ("self", "url")])
+
+ self.checkForSameFiles()
+
+ self.pyfile.setStatus("downloading")
+
+ download_folder = self.core.config['general']['download_folder']
+
+ location = safe_join(download_folder, self.pyfile.package().folder)
+
+ if not exists(location):
+ try:
+ makedirs(location, int(self.core.config['permission']['folder'], 8))
+
+ if self.core.config['permission']['change_dl'] and os.name != "nt":
+ uid = getpwnam(self.core.config['permission']['user'])[2]
+ gid = getgrnam(self.core.config['permission']['group'])[2]
+ chown(location, uid, gid)
+
+ except Exception, e:
+ self.fail(e)
+
+ # convert back to unicode
+ location = fs_decode(location)
+ name = safe_filename(self.pyfile.name)
+
+ filename = join(location, name)
+
+ self.core.addonManager.dispatchEvent("downloadStarts", self.pyfile, url, filename)
+
+ try:
+ newname = self.req.httpDownload(url, filename, get=get, post=post, ref=ref, cookies=cookies,
+ chunks=self.getChunkCount(), resume=self.resumeDownload,
+ progressNotify=self.pyfile.setProgress, disposition=disposition)
+ finally:
+ self.pyfile.size = self.req.size
+
+ if newname:
+ newname = urlparse(newname).path.split("/")[-1]
+
+ if disposition and newname != name:
+ self.logInfo(_("%(name)s saved as %(newname)s") % {"name": name, "newname": newname})
+ self.pyfile.name = newname
+ filename = join(location, newname)
+
+ fs_filename = fs_encode(filename)
+
+ if self.core.config['permission']['change_file']:
+ try:
+ chmod(fs_filename, int(self.core.config['permission']['file'], 8))
+ except Exception, e:
+ self.logWarning(_("Setting file mode failed"), e)
+
+ if self.core.config['permission']['change_dl'] and os.name != "nt":
+ try:
+ uid = getpwnam(self.core.config['permission']['user'])[2]
+ gid = getgrnam(self.core.config['permission']['group'])[2]
+ chown(fs_filename, uid, gid)
+
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed"), e)
+
+ self.lastDownload = filename
+ return self.lastDownload
+
+
+ def checkDownload(self, rules, api_size=0, max_size=50000, delete=True, read_size=0):
+ """ checks the content of the last downloaded file, re match is saved to `lastCheck`
+
+ :param rules: dict with names and rules to match (compiled regexp or strings)
+ :param api_size: expected file size
+ :param max_size: if the file is larger then it wont be checked
+ :param delete: delete if matched
+ :param read_size: amount of bytes to read from files larger then max_size
+ :return: dictionary key of the first rule that matched
+ """
+ lastDownload = fs_encode(self.lastDownload)
+ if not exists(lastDownload):
+ return None
+
+ size = stat(lastDownload)
+ size = size.st_size
+
+ if api_size and api_size <= size: return None
+ elif size > max_size and not read_size: return None
+ self.logDebug("Download Check triggered")
+
+ with open(lastDownload, "rb") as f:
+ content = f.read(read_size if read_size else -1)
+
+ #produces encoding errors, better log to other file in the future?
+ #self.logDebug("Content: %s" % content)
+ for name, rule in rules.iteritems():
+ if isinstance(rule, basestring):
+ if rule in content:
+ if delete:
+ remove(lastDownload)
+ return name
+ elif hasattr(rule, "search"):
+ m = rule.search(content)
+ if m:
+ if delete:
+ remove(lastDownload)
+ self.lastCheck = m
+ return name
+
+
+ def getPassword(self):
+ """ get the password the user provided in the package"""
+ password = self.pyfile.package().password
+ if not password: return ""
+ return password
+
+
+ def checkForSameFiles(self, starting=False):
+ """ checks if same file was/is downloaded within same package
+
+ :param starting: indicates that the current download is going to start
+ :raises SkipDownload:
+ """
+
+ pack = self.pyfile.package()
+
+ for pyfile in self.core.files.cache.values():
+ if pyfile != self.pyfile and pyfile.name == self.pyfile.name and pyfile.package().folder == pack.folder:
+ if pyfile.status in (0, 12): #finished or downloading
+ raise SkipDownload(pyfile.pluginname)
+ elif pyfile.status in (
+ 5, 7) and starting: #a download is waiting/starting and was appenrently started before
+ raise SkipDownload(pyfile.pluginname)
+
+ download_folder = self.core.config['general']['download_folder']
+ location = safe_join(download_folder, pack.folder, self.pyfile.name)
+
+ if starting and self.core.config['download']['skip_existing'] and exists(location):
+ size = os.stat(location).st_size
+ if size >= self.pyfile.size:
+ raise SkipDownload("File exists")
+
+ pyfile = self.core.db.findDuplicates(self.pyfile.id, self.pyfile.package().folder, self.pyfile.name)
+ if pyfile:
+ if exists(location):
+ raise SkipDownload(pyfile[0])
+
+ self.logDebug("File %s not skipped, because it does not exists." % self.pyfile.name)
+
+
+ def clean(self):
+ """ clean everything and remove references """
+ if hasattr(self, "pyfile"):
+ del self.pyfile
+
+ if hasattr(self, "req"):
+ self.req.close()
+ del self.req
+
+ if hasattr(self, "thread"):
+ del self.thread
+
+ if hasattr(self, "html"):
+ del self.html
diff --git a/pyload/plugins/README.md b/pyload/plugins/README.md
new file mode 100644
index 000000000..fa2a4c5b2
--- /dev/null
+++ b/pyload/plugins/README.md
@@ -0,0 +1,16 @@
+Licensing
+---------
+
+According to the terms of the GNU General Public License,
+pyload's plugins must be treated as an extension of the main program.
+This means the plugins must be released under the GPL or a GPL-compatible
+free software license, and that the terms of the GPL must be followed when
+those plugins are distributed.
+
+ * Any plugin published **without a license notice** is intend published under the **GNU GPLv3**.
+ * A different license can be used but it **must be GPL-compatible** and the license notice must be put in the plugin
+ file.
+ * Any plugin published **with a GPL incompatible license** will be rejected.
+ This includes *copyright all right reserved*.
+ * Is recommended to put the license notice at the top of the plugin file.
+ * Is recommended to **not** put the license notice when plugin is published under the GNU GPLv3.
diff --git a/pyload/plugins/__init__.py b/pyload/plugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/__init__.py
diff --git a/pyload/plugins/account/AlldebridCom.py b/pyload/plugins/account/AlldebridCom.py
new file mode 100644
index 000000000..d4770426b
--- /dev/null
+++ b/pyload/plugins/account/AlldebridCom.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+import xml.dom.minidom as dom
+
+from time import time
+from urllib import urlencode
+
+from BeautifulSoup import BeautifulSoup
+
+from pyload.plugins.internal.Account import Account
+
+
+class AlldebridCom(Account):
+ __name__ = "AlldebridCom"
+ __type__ = "account"
+ __version__ = "0.22"
+
+ __description__ = """AllDebrid.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Andy Voigt", "spamsales@online.de")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://www.alldebrid.com/account/")
+ soup = BeautifulSoup(page)
+ #Try to parse expiration date directly from the control panel page (better accuracy)
+ try:
+ time_text = soup.find('div', attrs={'class': 'remaining_time_text'}).strong.string
+ self.logDebug("Account expires in: %s" % time_text)
+ p = re.compile('\d+')
+ exp_data = p.findall(time_text)
+ exp_time = time() + int(exp_data[0]) * 24 * 60 * 60 + int(
+ exp_data[1]) * 60 * 60 + (int(exp_data[2]) - 1) * 60
+ #Get expiration date from API
+ except:
+ data = self.getAccountData(user)
+ page = req.load("http://www.alldebrid.com/api.php?action=info_user&login=%s&pw=%s" % (user,
+ data['password']))
+ self.logDebug(page)
+ xml = dom.parseString(page)
+ exp_time = time() + int(xml.getElementsByTagName("date")[0].childNodes[0].nodeValue) * 24 * 60 * 60
+ account_info = {"validuntil": exp_time, "trafficleft": -1}
+ return account_info
+
+
+ def login(self, user, data, req):
+ urlparams = urlencode({'action': 'login', 'login_login': user, 'login_password': data['password']})
+ page = req.load("http://www.alldebrid.com/register/?%s" % urlparams)
+
+ if "This login doesn't exist" in page:
+ self.wrongPassword()
+
+ if "The password is not valid" in page:
+ self.wrongPassword()
+
+ if "Invalid captcha" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/BayfilesCom.py b/pyload/plugins/account/BayfilesCom.py
new file mode 100644
index 000000000..34163d0cc
--- /dev/null
+++ b/pyload/plugins/account/BayfilesCom.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class BayfilesCom(Account):
+ __name__ = "BayfilesCom"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Bayfiles.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def loadAccountInfo(self, user, req):
+ for _i in xrange(2):
+ res = json_loads(req.load("http://api.bayfiles.com/v1/account/info"))
+ self.logDebug(res)
+ if not res['error']:
+ break
+ self.logWarning(res['error'])
+ self.relogin(user)
+
+ return {"premium": bool(res['premium']), "trafficleft": -1,
+ "validuntil": res['expires'] if res['expires'] >= int(time()) else -1}
+
+
+ def login(self, user, data, req):
+ res = json_loads(req.load("http://api.bayfiles.com/v1/account/login/%s/%s" % (user, data['password'])))
+ self.logDebug(res)
+ if res['error']:
+ self.logError(res['error'])
+ self.wrongPassword()
diff --git a/pyload/plugins/account/BillionuploadsCom.py b/pyload/plugins/account/BillionuploadsCom.py
new file mode 100644
index 000000000..b1f621ecb
--- /dev/null
+++ b/pyload/plugins/account/BillionuploadsCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class BillionuploadsCom(XFSAccount):
+ __name__ = "BillionuploadsCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Billionuploads.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "billionuploads.com"
diff --git a/pyload/plugins/account/BitshareCom.py b/pyload/plugins/account/BitshareCom.py
new file mode 100644
index 000000000..44b2818cf
--- /dev/null
+++ b/pyload/plugins/account/BitshareCom.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class BitshareCom(Account):
+ __name__ = "BitshareCom"
+ __type__ = "account"
+ __version__ = "0.12"
+
+ __description__ = """Bitshare account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Paul King", None)]
+
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://bitshare.com/mysettings.html")
+
+ if "\"http://bitshare.com/myupgrade.html\">Free" in page:
+ return {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ if not '<input type="checkbox" name="directdownload" checked="checked" />' in page:
+ self.logWarning(_("Activate direct Download in your Bitshare Account"))
+
+ return {"validuntil": -1, "trafficleft": -1, "premium": True}
+
+
+ def login(self, user, data, req):
+ page = req.load("http://bitshare.com/login.html",
+ post={"user": user, "password": data['password'], "submit": "Login"}, cookies=True)
+ if "login" in req.lastEffectiveURL:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/CatShareNet.py b/pyload/plugins/account/CatShareNet.py
new file mode 100644
index 000000000..30a2691da
--- /dev/null
+++ b/pyload/plugins/account/CatShareNet.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class CatShareNet(Account):
+ __name__ = "CatShareNet"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """CatShareNet account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("prOq", None)]
+
+
+ PREMIUM_PATTERN = r'class="nav-collapse collapse pull-right">[\s\w<>=-."/:]*\sz.</a></li>\s*<li><a href="/premium">.*\s*<span style="color: red">(.*?)</span>[\s\w<>/]*href="/logout"'
+ VALID_UNTIL_PATTERN = r'<div class="span6 pull-right">[\s\w<>=-":;]*<span style="font-size:13px;">.*?<strong>(.*?)</strong></span>'
+
+
+ def loadAccountInfo(self, user, req):
+ premium = False
+ validuntil = -1
+
+ html = req.load("http://catshare.net/", decode=True)
+
+ try:
+ m = re.search(self.PREMIUM_PATTERN, html)
+ if "Premium" in m.group(1):
+ premium = True
+ except:
+ pass
+
+ try:
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ expiredate = m.group(1)
+ if "-" not in expiredate:
+ validuntil = mktime(strptime(expiredate, "%d.%m.%Y"))
+ except:
+ pass
+
+ return {'premium': premium, 'trafficleft': -1, 'validuntil': validuntil}
+
+
+ def login(self, user, data, req):
+ html = req.load("http://catshare.net/login",
+ post={'user_email': user,
+ 'user_password': data['password'],
+ 'remindPassword': 0,
+ 'user[submit]': "Login"})
+
+ if not '<a href="/logout">Wyloguj</a>' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/CramitIn.py b/pyload/plugins/account/CramitIn.py
new file mode 100644
index 000000000..42b7cc7b2
--- /dev/null
+++ b/pyload/plugins/account/CramitIn.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class CramitIn(XFSAccount):
+ __name__ = "CramitIn"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Cramit.in account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "cramit.in"
diff --git a/pyload/plugins/account/CzshareCom.py b/pyload/plugins/account/CzshareCom.py
new file mode 100644
index 000000000..786832e8d
--- /dev/null
+++ b/pyload/plugins/account/CzshareCom.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class CzshareCom(Account):
+ __name__ = "CzshareCom"
+ __type__ = "account"
+ __version__ = "0.14"
+
+ __description__ = """Czshare.com account plugin, now Sdilej.cz"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ CREDIT_LEFT_PATTERN = r'<tr class="active">\s*<td>([\d ,]+) (KiB|MiB|GiB)</td>\s*<td>([^<]*)</td>\s*</tr>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://sdilej.cz/prehled_kreditu/")
+
+ m = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if m is None:
+ return {"validuntil": 0, "trafficleft": 0}
+ else:
+ credits = float(m.group(1).replace(' ', '').replace(',', '.'))
+ credits = credits * 1024 ** {'KiB': 0, 'MiB': 1, 'GiB': 2}[m.group(2)]
+ validuntil = mktime(strptime(m.group(3), '%d.%m.%y %H:%M'))
+ return {"validuntil": validuntil, "trafficleft": credits}
+
+
+ def login(self, user, data, req):
+ html = req.load('https://sdilej.cz/index.php', post={
+ "Prihlasit": "Prihlasit",
+ "login-password": data['password'],
+ "login-name": user
+ })
+
+ if '<div class="login' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/DebridItaliaCom.py b/pyload/plugins/account/DebridItaliaCom.py
new file mode 100644
index 000000000..30ed9fb1c
--- /dev/null
+++ b/pyload/plugins/account/DebridItaliaCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.internal.Account import Account
+
+
+class DebridItaliaCom(Account):
+ __name__ = "DebridItaliaCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Debriditalia.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ WALID_UNTIL_PATTERN = r'Premium valid till: (?P<D>[^|]+) \|'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://debriditalia.com/")
+
+ if 'Account premium not activated' in html:
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ m = re.search(self.WALID_UNTIL_PATTERN, html)
+ if m:
+ validuntil = int(time.mktime(time.strptime(m.group('D'), "%d/%m/%Y %H:%M")))
+ return {"premium": True, "validuntil": validuntil, "trafficleft": -1}
+ else:
+ self.logError(_("Unable to retrieve account information"))
+
+
+ def login(self, user, data, req):
+ html = req.load("http://debriditalia.com/login.php",
+ get={"u": user, "p": data['password']})
+ if 'NO' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/DepositfilesCom.py b/pyload/plugins/account/DepositfilesCom.py
new file mode 100644
index 000000000..df3ebe4f2
--- /dev/null
+++ b/pyload/plugins/account/DepositfilesCom.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import strptime, mktime
+
+from pyload.plugins.internal.Account import Account
+
+
+class DepositfilesCom(Account):
+ __name__ = "DepositfilesCom"
+ __type__ = "account"
+ __version__ = "0.3"
+
+ __description__ = """Depositfiles.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("https://dfiles.eu/de/gold/")
+ validuntil = re.search(r"Sie haben Gold Zugang bis: <b>(.*?)</b></div>", html).group(1)
+
+ validuntil = int(mktime(strptime(validuntil, "%Y-%m-%d %H:%M:%S")))
+
+ return {"validuntil": validuntil, "trafficleft": -1}
+
+
+ def login(self, user, data, req):
+ html = req.load("https://dfiles.eu/de/login.php", get={"return": "/de/gold/payment.php"},
+ post={"login": user, "password": data['password']})
+ if r'<div class="error_message">Sie haben eine falsche Benutzername-Passwort-Kombination verwendet.</div>' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/DevhostSt.py b/pyload/plugins/account/DevhostSt.py
new file mode 100644
index 000000000..07eaf339a
--- /dev/null
+++ b/pyload/plugins/account/DevhostSt.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://d-h.st/mM8
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DevhostSt(SimpleHoster):
+ __name__ = "DevhostSt"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?d-h\.st/(?!users/)\w{3}'
+
+ __description__ = """d-h.st hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
+
+
+ NAME_PATTERN = r'>Filename:</span> <div title="(?P<N>.+?)"'
+ SIZE_PATTERN = r'>Size:</span> (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'>File Not Found<'
+ LINK_PATTERN = r'id="downloadfile" href="(.+?)"'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download link not found"))
+
+ dl_url = m.group(1)
+ self.download(dl_url, disposition=True)
+
+ check = self.checkDownload({'html': re.compile("html")})
+ if check == "html":
+ self.error(_("Downloaded file is an html page"))
+
+
+getInfo = create_getInfo(DevhostSt)
diff --git a/pyload/plugins/account/DevhostStFolder.py b/pyload/plugins/account/DevhostStFolder.py
new file mode 100644
index 000000000..51142fde9
--- /dev/null
+++ b/pyload/plugins/account/DevhostStFolder.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://d-h.st/users/shine/?fld_id=37263#files
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DevhostStFolder(SimpleCrypter):
+ __name__ = "DevhostStFolder"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?d-h\.st/users/(?P<USER>\w+)(/\?fld_id=(?P<ID>\d+))?'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """d-h.st folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ LINK_PATTERN = r'(?:/> |;">)<a href="(.+?)"(?!>Back to \w+<)'
+ OFFLINE_PATTERN = r'"/cHP">test\.png<'
+
+
+ def getFileInfo(self):
+ if re.search(self.OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ try:
+ id = re.match(self.__pattern__, self.pyfile.url).group('ID')
+ if id == "0":
+ raise
+
+ p = r'href="(.+?)">Back to \w+<'
+ m = re.search(p, self.html)
+ html = self.load(urljoin("http://d-h.st", m.group(1)),
+ cookies=False)
+
+ p = '\?fld_id=%s.*?">(.+?)<' % id
+ m = re.search(p, html)
+ name = folder = m.group(1)
+
+ except Exception, e:
+ self.logDebug(e)
+ name = folder = re.match(self.__pattern__, self.pyfile.url).group('USER')
+
+ return {'name': name, 'folder': folder}
+
+
+ def getLinks(self):
+ return [urljoin("http://d-h.st", link) for link in re.findall(self.LINK_PATTERN, self.html)]
diff --git a/pyload/plugins/account/DropboxCom.py b/pyload/plugins/account/DropboxCom.py
new file mode 100644
index 000000000..40d5584a4
--- /dev/null
+++ b/pyload/plugins/account/DropboxCom.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DropboxCom(SimpleHoster):
+ __name__ = "DropboxCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?dropbox\.com/.+'
+
+ __description__ = """Dropbox.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
+
+
+ NAME_PATTERN = r'<title>Dropbox - (?P<N>.+?)<'
+ SIZE_PATTERN = r'&nbsp;&middot;&nbsp; (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'<title>Dropbox - (404|Shared link error)<'
+
+ COOKIES = [("dropbox.com", "lang", "en")]
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+
+ def handleFree(self):
+ self.download(self.pyfile.url, get={'dl': "1"})
+
+ check = self.checkDownload({'html': re.compile("html")})
+ if check == "html":
+ self.error(_("Downloaded file is an html page"))
+
+
+getInfo = create_getInfo(DropboxCom)
diff --git a/pyload/plugins/account/EasybytezCom.py b/pyload/plugins/account/EasybytezCom.py
new file mode 100644
index 000000000..b1fb6a213
--- /dev/null
+++ b/pyload/plugins/account/EasybytezCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import mktime, strptime, gmtime
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+from pyload.utils import parseFileSize
+
+
+class EasybytezCom(XFSAccount):
+ __name__ = "EasybytezCom"
+ __type__ = "account"
+ __version__ = "0.10"
+
+ __description__ = """EasyBytez.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "easybytez.com"
diff --git a/pyload/plugins/account/EuroshareEu.py b/pyload/plugins/account/EuroshareEu.py
new file mode 100644
index 000000000..5ec543433
--- /dev/null
+++ b/pyload/plugins/account/EuroshareEu.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class EuroshareEu(Account):
+ __name__ = "EuroshareEu"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Euroshare.eu account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def loadAccountInfo(self, user, req):
+ self.relogin(user)
+ html = req.load("http://euroshare.eu/customer-zone/settings/")
+
+ m = re.search('id="input_expire_date" value="(\d+\.\d+\.\d+ \d+:\d+)"', html)
+ if m is None:
+ premium, validuntil = False, -1
+ else:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), "%d.%m.%Y %H:%M"))
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+
+ def login(self, user, data, req):
+ html = req.load('http://euroshare.eu/customer-zone/login/', post={
+ "trvale": "1",
+ "login": user,
+ "password": data['password']
+ }, decode=True)
+
+ if u">Nesprávne prihlasovacie meno alebo heslo" in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FastixRu.py b/pyload/plugins/account/FastixRu.py
new file mode 100644
index 000000000..96db443b7
--- /dev/null
+++ b/pyload/plugins/account/FastixRu.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class FastixRu(Account):
+ __name__ = "FastixRu"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Fastix account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Massimo Rosamilia", "max@spiritix.eu")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://fastix.ru/api_v2/?apikey=%s&sub=getaccountdetails" % (data['api']))
+ page = json_loads(page)
+ points = page['points']
+ kb = float(points)
+ kb = kb * 1024 ** 2 / 1000
+ if points > 0:
+ account_info = {"validuntil": -1, "trafficleft": kb}
+ else:
+ account_info = {"validuntil": None, "trafficleft": None, "premium": False}
+ return account_info
+
+
+ def login(self, user, data, req):
+ page = req.load("http://fastix.ru/api_v2/?sub=get_apikey&email=%s&password=%s" % (user, data['password']))
+ api = json_loads(page)
+ api = api['apikey']
+ data['api'] = api
+ if "error_code" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FastshareCz.py b/pyload/plugins/account/FastshareCz.py
new file mode 100644
index 000000000..32c897317
--- /dev/null
+++ b/pyload/plugins/account/FastshareCz.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import parseFileSize
+
+
+class FastshareCz(Account):
+ __name__ = "FastshareCz"
+ __type__ = "account"
+ __version__ = "0.05"
+
+ __description__ = """Fastshare.cz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ CREDIT_PATTERN = r'My account\s*\((.+?)\)'
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = None
+ premium = None
+
+ html = req.load("http://www.fastshare.cz/user", decode=True)
+
+ m = re.search(self.CREDIT_PATTERN, html)
+ if m:
+ trafficleft = self.parseTraffic(m.group(1))
+
+ if trafficleft:
+ premium = True
+ validuntil = -1
+ else:
+ premium = False
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("fastshare.cz", "lang", "en")
+
+ req.load('http://www.fastshare.cz/login') # Do not remove or it will not login
+
+ html = req.load("http://www.fastshare.cz/sql.php",
+ post={'login': user, 'heslo': data['password']},
+ decode=True)
+
+ if ">Wrong username or password" in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/File4safeCom.py b/pyload/plugins/account/File4safeCom.py
new file mode 100644
index 000000000..42b67e08f
--- /dev/null
+++ b/pyload/plugins/account/File4safeCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class File4safeCom(XFSAccount):
+ __name__ = "File4safeCom"
+ __type__ = "account"
+ __version__ = "0.04"
+
+ __description__ = """File4safe.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "file4safe.com"
+
+ LOGIN_FAIL_PATTERN = r'input_login'
diff --git a/pyload/plugins/account/FileParadoxIn.py b/pyload/plugins/account/FileParadoxIn.py
new file mode 100644
index 000000000..cee7bec2f
--- /dev/null
+++ b/pyload/plugins/account/FileParadoxIn.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class FileParadoxIn(XFSAccount):
+ __name__ = "FileParadoxIn"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """FileParadox.in account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "fileparadox.in"
diff --git a/pyload/plugins/account/FilecloudIo.py b/pyload/plugins/account/FilecloudIo.py
new file mode 100644
index 000000000..babef59a2
--- /dev/null
+++ b/pyload/plugins/account/FilecloudIo.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class FilecloudIo(Account):
+ __name__ = "FilecloudIo"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """FilecloudIo account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ def loadAccountInfo(self, user, req):
+ # It looks like the first API request always fails, so we retry 5 times, it should work on the second try
+ for _i in xrange(5):
+ rep = req.load("https://secure.filecloud.io/api-fetch_apikey.api",
+ post={"username": user, "password": self.accounts[user]['password']})
+ rep = json_loads(rep)
+ if rep['status'] == 'ok':
+ break
+ elif rep['status'] == 'error' and rep['message'] == 'no such user or wrong password':
+ self.logError(_("Wrong username or password"))
+ return {"valid": False, "premium": False}
+ else:
+ return {"premium": False}
+
+ akey = rep['akey']
+ self.accounts[user]['akey'] = akey # Saved for hoster plugin
+ rep = req.load("http://api.filecloud.io/api-fetch_account_details.api",
+ post={"akey": akey})
+ rep = json_loads(rep)
+
+ if rep['is_premium'] == 1:
+ return {"validuntil": int(rep['premium_until']), "trafficleft": -1}
+ else:
+ return {"premium": False}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("secure.filecloud.io", "lang", "en")
+ html = req.load('https://secure.filecloud.io/user-login.html')
+
+ if not hasattr(self, "form_data"):
+ self.form_data = {}
+
+ self.form_data['username'] = user
+ self.form_data['password'] = data['password']
+
+ html = req.load('https://secure.filecloud.io/user-login_p.html',
+ post=self.form_data,
+ multipart=True)
+
+ self.logged_in = True if "you have successfully logged in - filecloud.io" in html else False
+ self.form_data = {}
diff --git a/pyload/plugins/account/FilefactoryCom.py b/pyload/plugins/account/FilefactoryCom.py
new file mode 100644
index 000000000..0d7c1ff0e
--- /dev/null
+++ b/pyload/plugins/account/FilefactoryCom.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pycurl import REFERER
+
+from pyload.plugins.internal.Account import Account
+
+
+class FilefactoryCom(Account):
+ __name__ = "FilefactoryCom"
+ __type__ = "account"
+ __version__ = "0.14"
+
+ __description__ = """Filefactory.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ VALID_UNTIL_PATTERN = r'Premium valid until: <strong>(?P<d>\d{1,2})\w{1,2} (?P<m>\w{3}), (?P<y>\d{4})</strong>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.filefactory.com/account/")
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ premium = True
+ validuntil = re.sub(self.VALID_UNTIL_PATTERN, '\g<d> \g<m> \g<y>', m.group(0))
+ validuntil = mktime(strptime(validuntil, "%d %b %Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+
+ def login(self, user, data, req):
+ req.http.c.setopt(REFERER, "http://www.filefactory.com/member/login.php")
+
+ html = req.load("http://www.filefactory.com/member/signin.php", post={
+ "loginEmail": user,
+ "loginPassword": data['password'],
+ "Submit": "Sign In"})
+
+ if req.lastEffectiveURL != "http://www.filefactory.com/account/":
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FilejungleCom.py b/pyload/plugins/account/FilejungleCom.py
new file mode 100644
index 000000000..8abb89788
--- /dev/null
+++ b/pyload/plugins/account/FilejungleCom.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class FilejungleCom(Account):
+ __name__ = "FilejungleCom"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """Filejungle.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ login_timeout = 60
+
+ URL = "http://filejungle.com/"
+ TRAFFIC_LEFT_PATTERN = r'"/extend_premium\.php">Until (\d+ \w+ \d+)<br'
+ LOGIN_FAILED_PATTERN = r'<span htmlfor="loginUser(Name|Password)" generated="true" class="fail_info">'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load(self.URL + "dashboard.php")
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), "%d %b %Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+
+ def login(self, user, data, req):
+ html = req.load(self.URL + "login.php", post={
+ "loginUserName": user,
+ "loginUserPassword": data['password'],
+ "loginFormSubmit": "Login",
+ "recaptcha_challenge_field": "",
+ "recaptcha_response_field": "",
+ "recaptcha_shortencode_field": ""})
+
+ if re.search(self.LOGIN_FAILED_PATTERN, html):
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FileomCom.py b/pyload/plugins/account/FileomCom.py
new file mode 100644
index 000000000..04f503b3b
--- /dev/null
+++ b/pyload/plugins/account/FileomCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class FileomCom(XFSAccount):
+ __name__ = "FileomCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Fileom.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "fileom.com"
diff --git a/pyload/plugins/account/FilerNet.py b/pyload/plugins/account/FilerNet.py
new file mode 100644
index 000000000..f9b5efdd3
--- /dev/null
+++ b/pyload/plugins/account/FilerNet.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.internal.Account import Account
+
+
+class FilerNet(Account):
+ __name__ = "FilerNet"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Filer.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ TOKEN_PATTERN = r'_csrf_token" value="([^"]+)" />'
+ WALID_UNTIL_PATTERN = r'Der Premium-Zugang ist gÃŒltig bis (.+)\.\s*</td>'
+ TRAFFIC_PATTERN = r'Traffic</th>\s*<td>([^<]+)</td>'
+ FREE_PATTERN = r'Account Status</th>\s*<td>\s*Free'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("https://filer.net/profile")
+
+ # Free user
+ if re.search(self.FREE_PATTERN, html):
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ until = re.search(self.WALID_UNTIL_PATTERN, html)
+ traffic = re.search(self.TRAFFIC_PATTERN, html)
+ if until and traffic:
+ validuntil = int(time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S")))
+ trafficleft = self.parseTraffic(traffic.group(1))
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ else:
+ self.logError(_("Unable to retrieve account information"))
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+
+ def login(self, user, data, req):
+ html = req.load("https://filer.net/login")
+ token = re.search(self.TOKEN_PATTERN, html).group(1)
+ html = req.load("https://filer.net/login_check",
+ post={"_username": user, "_password": data['password'],
+ "_remember_me": "on", "_csrf_token": token, "_target_path": "https://filer.net/"})
+ if 'Logout' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FilerioCom.py b/pyload/plugins/account/FilerioCom.py
new file mode 100644
index 000000000..27d8df13f
--- /dev/null
+++ b/pyload/plugins/account/FilerioCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class FilerioCom(XFSAccount):
+ __name__ = "FilerioCom"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """FileRio.in account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "filerio.in"
diff --git a/pyload/plugins/account/FilesMailRu.py b/pyload/plugins/account/FilesMailRu.py
new file mode 100644
index 000000000..bfd9d7ffb
--- /dev/null
+++ b/pyload/plugins/account/FilesMailRu.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class FilesMailRu(Account):
+ __name__ = "FilesMailRu"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Filesmail.ru account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def loadAccountInfo(self, user, req):
+ return {"validuntil": None, "trafficleft": None}
+
+
+ def login(self, user, data, req):
+ user, domain = user.split("@")
+
+ page = req.load("http://swa.mail.ru/cgi-bin/auth", None,
+ {"Domain": domain, "Login": user, "Password": data['password'],
+ "Page": "http://files.mail.ru/"}, cookies=True)
+
+ if "НеверМПе ОЌя пПльзПвателя ОлО парПль" in page: # @TODO seems not to work
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FileserveCom.py b/pyload/plugins/account/FileserveCom.py
new file mode 100644
index 000000000..5a014ab03
--- /dev/null
+++ b/pyload/plugins/account/FileserveCom.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class FileserveCom(Account):
+ __name__ = "FileserveCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Fileserve.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+
+ page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
+ "submit": "Submit+Query"})
+ res = json_loads(page)
+
+ if res['type'] == "premium":
+ validuntil = mktime(strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
+ return {"trafficleft": res['traffic'], "validuntil": validuntil}
+ else:
+ return {"premium": False, "trafficleft": None, "validuntil": None}
+
+
+ def login(self, user, data, req):
+ page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
+ "submit": "Submit+Query"})
+ res = json_loads(page)
+
+ if not res['type']:
+ self.wrongPassword()
+
+ #login at fileserv page
+ req.load("http://www.fileserve.com/login.php",
+ post={"loginUserName": user, "loginUserPassword": data['password'], "autoLogin": "checked",
+ "loginFormSubmit": "Login"})
diff --git a/pyload/plugins/account/FourSharedCom.py b/pyload/plugins/account/FourSharedCom.py
new file mode 100644
index 000000000..20293fa5d
--- /dev/null
+++ b/pyload/plugins/account/FourSharedCom.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class FourSharedCom(Account):
+ __name__ = "FourSharedCom"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """FourShared.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ def loadAccountInfo(self, user, req):
+ # Free mode only for now
+ return {"premium": False}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("4shared.com", "4langcookie", "en")
+ res = req.load('http://www.4shared.com/web/login',
+ post={'login': user,
+ 'password': data['password'],
+ 'remember': "on",
+ '_remember': "on",
+ 'returnTo': "http://www.4shared.com/account/home.jsp"})
+
+ if 'Please log in to access your 4shared account' in res:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FreakshareCom.py b/pyload/plugins/account/FreakshareCom.py
new file mode 100644
index 000000000..12c5277a6
--- /dev/null
+++ b/pyload/plugins/account/FreakshareCom.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import strptime, mktime
+
+from pyload.plugins.internal.Account import Account
+
+
+class FreakshareCom(Account):
+ __name__ = "FreakshareCom"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """Freakshare.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://freakshare.com/")
+
+ validuntil = r'ltig bis:</td>\s*<td><b>([\d.:-]+)</b></td>'
+ validuntil = re.search(validuntil, page, re.M)
+ validuntil = validuntil.group(1).strip()
+ validuntil = mktime(strptime(validuntil, "%d.%m.%Y - %H:%M"))
+
+ traffic = r'Traffic verbleibend:</td>\s*<td>([^<]+)'
+ traffic = re.search(traffic, page, re.M)
+ traffic = traffic.group(1).strip()
+ traffic = self.parseTraffic(traffic)
+
+ return {"validuntil": validuntil, "trafficleft": traffic}
+
+
+ def login(self, user, data, req):
+ req.load("http://freakshare.com/index.php?language=EN")
+
+ page = req.load("http://freakshare.com/login.html", None,
+ {"submit": "Login", "user": user, "pass": data['password']}, cookies=True)
+
+ if ">Wrong Username or Password" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/FreeWayMe.py b/pyload/plugins/account/FreeWayMe.py
new file mode 100644
index 000000000..db879c052
--- /dev/null
+++ b/pyload/plugins/account/FreeWayMe.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class FreeWayMe(Account):
+ __name__ = "FreeWayMe"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """FreeWayMe account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Nicolas Giese", "james@free-way.me")]
+
+
+ def loadAccountInfo(self, user, req):
+ status = self.getAccountStatus(user, req)
+ if not status:
+ return False
+ self.logDebug(status)
+
+ account_info = {"validuntil": -1, "premium": False}
+ if status['premium'] == "Free":
+ account_info['trafficleft'] = int(status['guthaben']) * 1024
+ elif status['premium'] == "Spender":
+ account_info['trafficleft'] = -1
+ elif status['premium'] == "Flatrate":
+ account_info = {"validuntil": int(status['Flatrate']),
+ "trafficleft": -1,
+ "premium": True}
+
+ return account_info
+
+
+ def getpw(self, user):
+ return self.accounts[user]['password']
+
+
+ def login(self, user, data, req):
+ status = self.getAccountStatus(user, req)
+
+ # Check if user and password are valid
+ if not status:
+ self.wrongPassword()
+
+
+ def getAccountStatus(self, user, req):
+ answer = req.load("https://www.free-way.me/ajax/jd.php",
+ get={"id": 4, "user": user, "pass": self.accounts[user]['password']})
+ self.logDebug("Login: %s" % answer)
+ if answer == "Invalid login":
+ self.wrongPassword()
+ return False
+ return json_loads(answer)
diff --git a/pyload/plugins/account/FshareVn.py b/pyload/plugins/account/FshareVn.py
new file mode 100644
index 000000000..ee8aacaf9
--- /dev/null
+++ b/pyload/plugins/account/FshareVn.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+from pycurl import REFERER
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class FshareVn(Account):
+ __name__ = "FshareVn"
+ __type__ = "account"
+ __version__ = "0.07"
+
+ __description__ = """Fshare.vn account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ VALID_UNTIL_PATTERN = ur'<dt>Thời hạn dùng:</dt>\s*<dd>([^<]+)</dd>'
+ LIFETIME_PATTERN = ur'<dt>Lần đăng nhập trước:</dt>\s*<dd>[^<]+</dd>'
+ TRAFFIC_LEFT_PATTERN = ur'<dt>Tổng Dung Lượng Tài Khoản</dt>\s*<dd[^>]*>([\d.]+) ([kKMG])B</dd>'
+ DIRECT_DOWNLOAD_PATTERN = ur'<input type="checkbox"\s*([^=>]*)[^>]*/>Kích hoạt download trực tiếp</dt>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.fshare.vn/account_info.php", decode=True)
+
+ if re.search(self.LIFETIME_PATTERN, html):
+ self.logDebug("Lifetime membership detected")
+ trafficleft = self.getTrafficLeft()
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": True}
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), '%I:%M:%S %p %d-%m-%Y'))
+ trafficleft = self.getTrafficLeft()
+ else:
+ premium = False
+ validuntil = None
+ trafficleft = None
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+
+ def login(self, user, data, req):
+ req.http.c.setopt(REFERER, "https://www.fshare.vn/login.php")
+
+ html = req.load('https://www.fshare.vn/login.php', post={
+ "login_password": data['password'],
+ "login_useremail": user,
+ "url_refe": "http://www.fshare.vn/index.php"
+ }, referer=True, decode=True)
+
+ if not re.search(r'<img\s+alt="VIP"', html):
+ self.wrongPassword()
+
+
+ def getTrafficLeft(self):
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ return float(m.group(1)) * 1024 ** {'k': 0, 'K': 0, 'M': 1, 'G': 2}[m.group(2)] if m else 0
diff --git a/pyload/plugins/account/Ftp.py b/pyload/plugins/account/Ftp.py
new file mode 100644
index 000000000..d439512ce
--- /dev/null
+++ b/pyload/plugins/account/Ftp.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class Ftp(Account):
+ __name__ = "Ftp"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Ftp dummy account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ login_timeout = -1 #: Unlimited
+ info_threshold = -1 #: Unlimited
diff --git a/pyload/plugins/account/HellshareCz.py b/pyload/plugins/account/HellshareCz.py
new file mode 100644
index 000000000..1b93cf77d
--- /dev/null
+++ b/pyload/plugins/account/HellshareCz.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.plugins.internal.Account import Account
+
+
+class HellshareCz(Account):
+ __name__ = "HellshareCz"
+ __type__ = "account"
+ __version__ = "0.14"
+
+ __description__ = """Hellshare.cz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ CREDIT_LEFT_PATTERN = r'<div class="credit-link">\s*<table>\s*<tr>\s*<th>(\d+|\d\d\.\d\d\.)</th>'
+
+
+ def loadAccountInfo(self, user, req):
+ self.relogin(user)
+ html = req.load("http://www.hellshare.com/")
+
+ m = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if m is None:
+ trafficleft = None
+ validuntil = None
+ premium = False
+ else:
+ credit = m.group(1)
+ premium = True
+ try:
+ if "." in credit:
+ #Time-based account
+ vt = [int(x) for x in credit.split('.')[:2]]
+ lt = time.localtime()
+ year = lt.tm_year + int(vt[1] < lt.tm_mon or (vt[1] == lt.tm_mon and vt[0] < lt.tm_mday))
+ validuntil = time.mktime(time.strptime("%s%d 23:59:59" % (credit, year), "%d.%m.%Y %H:%M:%S"))
+ trafficleft = -1
+ else:
+ #Traffic-based account
+ trafficleft = int(credit) * 1024
+ validuntil = -1
+ except Exception, e:
+ self.logError(_("Unable to parse credit info"), e)
+ validuntil = -1
+ trafficleft = -1
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+
+ def login(self, user, data, req):
+ html = req.load('http://www.hellshare.com/')
+ if req.lastEffectiveURL != 'http://www.hellshare.com/':
+ #Switch to English
+ self.logDebug("Switch lang - URL: %s" % req.lastEffectiveURL)
+ json = req.load("%s?do=locRouter-show" % req.lastEffectiveURL)
+ hash = re.search(r"(\-\-[0-9a-f]+\-)", json).group(1)
+ self.logDebug("Switch lang - HASH: %s" % hash)
+ html = req.load('http://www.hellshare.com/%s/' % hash)
+
+ if re.search(self.CREDIT_LEFT_PATTERN, html):
+ self.logDebug("Already logged in")
+ return
+
+ html = req.load('http://www.hellshare.com/login?do=loginForm-submit', post={
+ "login": "Log in",
+ "password": data['password'],
+ "username": user,
+ "perm_login": "on"
+ })
+
+ if "<p>You input a wrong user name or wrong password</p>" in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/Http.py b/pyload/plugins/account/Http.py
new file mode 100644
index 000000000..8c0aeb2e4
--- /dev/null
+++ b/pyload/plugins/account/Http.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class Http(Account):
+ __name__ = "Http"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Http dummy account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ login_timeout = -1 #: Unlimited
+ info_threshold = -1 #: Unlimited
diff --git a/pyload/plugins/account/HugefilesNet.py b/pyload/plugins/account/HugefilesNet.py
new file mode 100644
index 000000000..242ae118c
--- /dev/null
+++ b/pyload/plugins/account/HugefilesNet.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class HugefilesNet(XFSAccount):
+ __name__ = "HugefilesNet"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Hugefiles.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "hugefiles.net"
diff --git a/pyload/plugins/account/HundredEightyUploadCom.py b/pyload/plugins/account/HundredEightyUploadCom.py
new file mode 100644
index 000000000..89d779e52
--- /dev/null
+++ b/pyload/plugins/account/HundredEightyUploadCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class HundredEightyUploadCom(XFSAccount):
+ __name__ = "HundredEightyUploadCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """180upload.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "180upload.com"
diff --git a/pyload/plugins/account/JunocloudMe.py b/pyload/plugins/account/JunocloudMe.py
new file mode 100644
index 000000000..14fe8ddd7
--- /dev/null
+++ b/pyload/plugins/account/JunocloudMe.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class JunocloudMe(XFSAccount):
+ __name__ = "JunocloudMe"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Junocloud.me account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "junocloud.me"
diff --git a/pyload/plugins/account/Keep2shareCc.py b/pyload/plugins/account/Keep2shareCc.py
new file mode 100644
index 000000000..172cedfc2
--- /dev/null
+++ b/pyload/plugins/account/Keep2shareCc.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import gmtime, mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class Keep2shareCc(Account):
+ __name__ = "Keep2shareCc"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Keep2share.cc account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("aeronaut", "aeronaut@pianoguy.de")]
+
+
+ VALID_UNTIL_PATTERN = r'Premium expires: <b>(.+?)</b>'
+ TRAFFIC_LEFT_PATTERN = r'Available traffic \(today\):<b><a href="/user/statistic.html">(.+?)</a>'
+
+ LOGIN_FAIL_PATTERN = r'Please fix the following input errors'
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = None
+ premium = None
+
+ html = req.load("http://keep2share.cc/site/profile.html", decode=True)
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ expiredate = m.group(1).strip()
+ self.logDebug("Expire date: " + expiredate)
+
+ try:
+ validuntil = mktime(strptime(expiredate, "%Y.%m.%d"))
+
+ except Exception, e:
+ self.logError(e)
+
+ else:
+ if validuntil > mktime(gmtime()):
+ premium = True
+ else:
+ premium = False
+ validuntil = None
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ try:
+ trafficleft = self.parseTraffic(m.group(1))
+
+ except Exception, e:
+ self.logError(e)
+
+ return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("keep2share.cc", "lang", "en")
+
+ html = req.load("http://keep2share.cc/login.html",
+ post={'LoginForm[username]': user, 'LoginForm[password]': data['password']})
+
+ if re.search(self.LOGIN_FAIL_PATTERN, html):
+ self.wrongPassword()
diff --git a/pyload/plugins/account/KingfilesNet.py b/pyload/plugins/account/KingfilesNet.py
new file mode 100644
index 000000000..ebd5baec3
--- /dev/null
+++ b/pyload/plugins/account/KingfilesNet.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class KingfilesNet(SimpleHoster):
+ __name__ = "KingfilesNet"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?kingfiles\.net/(?P<ID>\w{12})'
+
+ __description__ = """Kingfiles.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'name="fname" value="(?P<N>.+?)">'
+ SIZE_PATTERN = r'>Size: .+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'>(File Not Found</b><br><br>|File Not Found</h2>)'
+
+ RAND_ID_PATTERN = r'type=\"hidden\" name=\"rand\" value=\"(.+)\">'
+
+ LINK_PATTERN = r'var download_url = \'(.+)\';'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.resumeDownload = True
+
+
+ def handleFree(self):
+ # Click the free user button
+ post_data = {'op': "download1",
+ 'usr_login': "",
+ 'id': self.info['ID'],
+ 'fname': self.pyfile.name,
+ 'referer': "",
+ 'method_free': "+"}
+
+ self.html = self.load(self.pyfile.url, post=post_data, cookies=True, decode=True)
+
+ solvemedia = SolveMedia(self)
+ captcha_challenge, captcha_response = solvemedia.challenge()
+
+ # Make the downloadlink appear and load the file
+ m = re.search(self.RAND_ID_PATTERN, self.html)
+ if m is None:
+ self.error(_("Random key not found"))
+
+ rand = m.group(1)
+ self.logDebug("rand = ", rand)
+
+ post_data = {'op': "download2",
+ 'id': self.info['ID'],
+ 'rand': rand,
+ 'referer': self.pyfile.url,
+ 'method_free': "+",
+ 'method_premium': "",
+ 'adcopy_response': captcha_response,
+ 'adcopy_challenge': captcha_challenge,
+ 'down_direct': "1"}
+
+ self.html = self.load(self.pyfile.url, post=post_data, cookies=True, decode=True)
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download url not found"))
+
+ self.download(m.group(1), cookies=True, disposition=True)
+
+ check = self.checkDownload({'html': re.compile("<html>")})
+ if check == "html":
+ self.error(_("Downloaded file is an html page"))
+
+
+getInfo = create_getInfo(KingfilesNet)
diff --git a/pyload/plugins/account/LetitbitNet.py b/pyload/plugins/account/LetitbitNet.py
new file mode 100644
index 000000000..5bf6de00c
--- /dev/null
+++ b/pyload/plugins/account/LetitbitNet.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+# from pyload.utils import json_loads, json_dumps
+
+
+class LetitbitNet(Account):
+ __name__ = "LetitbitNet"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Letitbit.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def loadAccountInfo(self, user, req):
+ ## DISABLED BECAUSE IT GET 'key exausted' EVEN IF VALID ##
+ # api_key = self.accounts[user]['password']
+ # json_data = [api_key, ['key/info']]
+ # api_rep = req.load('http://api.letitbit.net/json', post={'r': json_dumps(json_data)})
+ # self.logDebug("API Key Info: " + api_rep)
+ # api_rep = json_loads(api_rep)
+ #
+ # if api_rep['status'] == 'FAIL':
+ # self.logWarning(api_rep['data'])
+ # return {'valid': False, 'premium': False}
+
+ return {"premium": True}
+
+
+ def login(self, user, data, req):
+ # API_KEY is the username and the PREMIUM_KEY is the password
+ self.logInfo(_("You must use your API KEY as username and the PREMIUM KEY as password"))
diff --git a/pyload/plugins/account/LinestorageCom.py b/pyload/plugins/account/LinestorageCom.py
new file mode 100644
index 000000000..351d66d48
--- /dev/null
+++ b/pyload/plugins/account/LinestorageCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class LinestorageCom(XFSAccount):
+ __name__ = "LinestorageCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Linestorage.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "linestorage.com"
diff --git a/pyload/plugins/account/LinksnappyCom.py b/pyload/plugins/account/LinksnappyCom.py
new file mode 100644
index 000000000..028465522
--- /dev/null
+++ b/pyload/plugins/account/LinksnappyCom.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+from hashlib import md5
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class LinksnappyCom(Account):
+ __name__ = "LinksnappyCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Linksnappy.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ r = req.load('http://gen.linksnappy.com/lseAPI.php',
+ get={'act': 'USERDETAILS', 'username': user, 'password': md5(data['password']).hexdigest()})
+ self.logDebug("JSON data: " + r)
+ j = json_loads(r)
+
+ if j['error']:
+ return {"premium": False}
+
+ validuntil = j['return']['expire']
+ if validuntil == 'lifetime':
+ validuntil = -1
+ elif validuntil == 'expired':
+ return {"premium": False}
+ else:
+ validuntil = float(validuntil)
+
+ if 'trafficleft' not in j['return'] or isinstance(j['return']['trafficleft'], str):
+ trafficleft = -1
+ else:
+ trafficleft = int(j['return']['trafficleft']) * 1024
+
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+
+
+ def login(self, user, data, req):
+ r = req.load('http://gen.linksnappy.com/lseAPI.php',
+ get={'act': 'USERDETAILS', 'username': user, 'password': md5(data['password']).hexdigest()})
+
+ if 'Invalid Account Details' in r:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/LomafileCom.py b/pyload/plugins/account/LomafileCom.py
new file mode 100644
index 000000000..937b24fae
--- /dev/null
+++ b/pyload/plugins/account/LomafileCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class LomafileCom(XFSAccount):
+ __name__ = "LomafileCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Lomafile.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "lomafile.com"
diff --git a/pyload/plugins/account/MegaDebridEu.py b/pyload/plugins/account/MegaDebridEu.py
new file mode 100644
index 000000000..3db0caa35
--- /dev/null
+++ b/pyload/plugins/account/MegaDebridEu.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class MegaDebridEu(Account):
+ __name__ = "MegaDebridEu"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """mega-debrid.eu account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("D.Ducatel", "dducatel@je-geek.fr")]
+
+
+ # Define the base URL of MegaDebrid api
+ API_URL = "https://www.mega-debrid.eu/api.php"
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ jsonResponse = req.load(self.API_URL,
+ get={'action': 'connectUser', 'login': user, 'password': data['password']})
+ res = json_loads(jsonResponse)
+
+ if res['response_code'] == "ok":
+ return {"premium": True, "validuntil": float(res['vip_end']), "status": True}
+ else:
+ self.logError(res)
+ return {"status": False, "premium": False}
+
+
+ def login(self, user, data, req):
+ jsonResponse = req.load(self.API_URL,
+ get={'action': 'connectUser', 'login': user, 'password': data['password']})
+ res = json_loads(jsonResponse)
+ if res['response_code'] != "ok":
+ self.wrongPassword()
diff --git a/pyload/plugins/account/MegaRapidCz.py b/pyload/plugins/account/MegaRapidCz.py
new file mode 100644
index 000000000..a75e313ff
--- /dev/null
+++ b/pyload/plugins/account/MegaRapidCz.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import mktime, strptime
+from pyload.plugins.internal.Account import Account
+
+
+class MegaRapidCz(Account):
+ __name__ = "MegaRapidCz"
+ __type__ = "account"
+ __version__ = "0.34"
+
+ __description__ = """MegaRapid.cz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("MikyWoW", "mikywow@seznam.cz"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ login_timeout = 60
+
+ LIMITDL_PATTERN = ur'<td>Max. počet paralelních stahování: </td><td>(\d+)'
+ VALID_UNTIL_PATTERN = ur'<td>Paušální stahování aktivní. Vyprší </td><td><strong>(.*?)</strong>'
+ TRAFFIC_LEFT_PATTERN = r'<tr><td>Kredit</td><td>(.*?) GiB'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://megarapid.cz/mujucet/", decode=True)
+
+ m = re.search(self.LIMITDL_PATTERN, html)
+ if m:
+ data = self.getAccountData(user)
+ data['options']['limitDL'] = [int(m.group(1))]
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ validuntil = mktime(strptime(m.group(1), "%d.%m.%Y - %H:%M"))
+ return {"premium": True, "trafficleft": -1, "validuntil": validuntil}
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ trafficleft = float(m.group(1)) * (1 << 20)
+ return {"premium": True, "trafficleft": trafficleft, "validuntil": -1}
+
+ return {"premium": False, "trafficleft": None, "validuntil": None}
+
+
+ def login(self, user, data, req):
+ htm = req.load("http://megarapid.cz/prihlaseni/")
+ if "Heslo:" in htm:
+ start = htm.index('id="inp_hash" name="hash" value="')
+ htm = htm[start + 33:]
+ hashes = htm[0:32]
+ htm = req.load("http://megarapid.cz/prihlaseni/",
+ post={"hash": hashes,
+ "login": user,
+ "pass1": data['password'],
+ "remember": 0,
+ "sbmt": u"Přihlásit"})
diff --git a/pyload/plugins/account/MegasharesCom.py b/pyload/plugins/account/MegasharesCom.py
new file mode 100644
index 000000000..b2f439b45
--- /dev/null
+++ b/pyload/plugins/account/MegasharesCom.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class MegasharesCom(Account):
+ __name__ = "MegasharesCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Megashares.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ VALID_UNTIL_PATTERN = r'<p class="premium_info_box">Period Ends: (\w{3} \d{1,2}, \d{4})</p>'
+
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://d01.megashares.com/myms.php", decode=True)
+
+ premium = False if '>Premium Upgrade<' in html else True
+
+ validuntil = trafficleft = -1
+ try:
+ timestr = re.search(self.VALID_UNTIL_PATTERN, html).group(1)
+ self.logDebug(timestr)
+ validuntil = mktime(strptime(timestr, "%b %d, %Y"))
+ except Exception, e:
+ self.logError(e)
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+
+ def login(self, user, data, req):
+ html = req.load('http://d01.megashares.com/myms_login.php', post={
+ "httpref": "",
+ "myms_login": "Login",
+ "mymslogin_name": user,
+ "mymspassword": data['password']
+ }, decode=True)
+
+ if not '<span class="b ml">%s</span>' % user in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/MovReelCom.py b/pyload/plugins/account/MovReelCom.py
new file mode 100644
index 000000000..7a00af5d3
--- /dev/null
+++ b/pyload/plugins/account/MovReelCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class MovReelCom(XFSAccount):
+ __name__ = "MovReelCom"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Movreel.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("t4skforce", "t4skforce1337[AT]gmail[DOT]com")]
+
+
+ login_timeout = 60
+ info_threshold = 30
+
+ HOSTER_DOMAIN = "movreel.com"
diff --git a/pyload/plugins/account/MultishareCz.py b/pyload/plugins/account/MultishareCz.py
new file mode 100644
index 000000000..9dc5b1ff0
--- /dev/null
+++ b/pyload/plugins/account/MultishareCz.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class MultishareCz(Account):
+ __name__ = "MultishareCz"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Multishare.cz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ TRAFFIC_LEFT_PATTERN = r'<span class="profil-zvyrazneni">Kredit:</span>\s*<strong>(?P<S>[\d.,]+)&nbsp;(?P<U>[\w^_]+)</strong>'
+ ACCOUNT_INFO_PATTERN = r'<input type="hidden" id="(u_ID|u_hash)" name="[^"]*" value="([^"]+)">'
+
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://www.multishare.cz/profil/", decode=True)
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = self.parseTraffic(m.group('S'), m.group('U')) if m else 0
+ self.premium = True if trafficleft else False
+
+ html = req.load("http://www.multishare.cz/", decode=True)
+ mms_info = dict(re.findall(self.ACCOUNT_INFO_PATTERN, html))
+
+ return dict(mms_info, **{"validuntil": -1, "trafficleft": trafficleft})
+
+
+ def login(self, user, data, req):
+ html = req.load('http://www.multishare.cz/html/prihlaseni_process.php', post={
+ "akce": "Přihlásit",
+ "heslo": data['password'],
+ "jmeno": user
+ }, decode=True)
+
+ if '<div class="akce-chyba akce">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/MyfastfileCom.py b/pyload/plugins/account/MyfastfileCom.py
new file mode 100644
index 000000000..acc09ad8b
--- /dev/null
+++ b/pyload/plugins/account/MyfastfileCom.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class MyfastfileCom(Account):
+ __name__ = "MyfastfileCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Myfastfile.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def loadAccountInfo(self, user, req):
+ if 'days_left' in self.json_data:
+ validuntil = int(time() + self.json_data['days_left'] * 24 * 60 * 60)
+ return {"premium": True, "validuntil": validuntil, "trafficleft": -1}
+ else:
+ self.logError(_("Unable to get account information"))
+
+
+ def login(self, user, data, req):
+ # Password to use is the API-Password written in http://myfastfile.com/myaccount
+ html = req.load("http://myfastfile.com/api.php",
+ get={"user": user, "pass": data['password']})
+ self.logDebug("JSON data: " + html)
+ self.json_data = json_loads(html)
+ if self.json_data['status'] != 'ok':
+ self.logError(_('Invalid login. The password to use is the API-Password you find in your "My Account" page'))
+ self.wrongPassword()
diff --git a/pyload/plugins/account/NetloadIn.py b/pyload/plugins/account/NetloadIn.py
new file mode 100644
index 000000000..6e780225a
--- /dev/null
+++ b/pyload/plugins/account/NetloadIn.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import time
+
+from pyload.plugins.internal.Account import Account
+
+
+class NetloadIn(Account):
+ __name__ = "NetloadIn"
+ __type__ = "account"
+ __version__ = "0.22"
+
+ __description__ = """Netload.in account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("CryNickSystems", "webmaster@pcProfil.de")]
+
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://netload.in/index.php?id=2&lang=de")
+ left = r'>(\d+) (Tag|Tage), (\d+) Stunden<'
+ left = re.search(left, page)
+ if left:
+ validuntil = time() + int(left.group(1)) * 24 * 60 * 60 + int(left.group(3)) * 60 * 60
+ trafficleft = -1
+ premium = True
+ else:
+ validuntil = None
+ premium = False
+ trafficleft = None
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+
+ def login(self, user, data, req):
+ page = req.load("http://netload.in/index.php", None,
+ {"txtuser": user, "txtpass": data['password'], "txtcheck": "login", "txtlogin": "Login"},
+ cookies=True)
+ if "password or it might be invalid!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/NosuploadCom.py b/pyload/plugins/account/NosuploadCom.py
new file mode 100644
index 000000000..9c11699ec
--- /dev/null
+++ b/pyload/plugins/account/NosuploadCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class NosuploadCom(XFSAccount):
+ __name__ = "NosuploadCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Nosupload.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "nosupload.com"
diff --git a/pyload/plugins/account/NovafileCom.py b/pyload/plugins/account/NovafileCom.py
new file mode 100644
index 000000000..cb3a639f5
--- /dev/null
+++ b/pyload/plugins/account/NovafileCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class NovafileCom(XFSAccount):
+ __name__ = "NovafileCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Novafile.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "novafile.com"
diff --git a/pyload/plugins/account/NowVideoAt.py b/pyload/plugins/account/NowVideoAt.py
new file mode 100644
index 000000000..7af8bc4bc
--- /dev/null
+++ b/pyload/plugins/account/NowVideoAt.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import gmtime, mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class NowVideoAt(Account):
+ __name__ = "NowVideoAt"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """NowVideo.at account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ VALID_UNTIL_PATTERN = r'>Your premium membership expires on: (.+?)<'
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = -1
+ premium = None
+
+ html = req.load("http://www.nowvideo.at/premium.php")
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ expiredate = m.group(1).strip()
+ self.logDebug("Expire date: " + expiredate)
+
+ try:
+ validuntil = mktime(strptime(expiredate, "%Y-%b-%d"))
+
+ except Exception, e:
+ self.logError(e)
+
+ else:
+ if validuntil > mktime(gmtime()):
+ premium = True
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+
+ def login(self, user, data, req):
+ html = req.load("http://www.nowvideo.at/login.php",
+ post={'user': user, 'pass': data['password']})
+
+ if ">Invalid login details" is html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/OboomCom.py b/pyload/plugins/account/OboomCom.py
new file mode 100644
index 000000000..4f7f476e2
--- /dev/null
+++ b/pyload/plugins/account/OboomCom.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import time
+
+from beaker.crypto.pbkdf2 import PBKDF2
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Account import Account
+
+
+class OboomCom(Account):
+ __name__ = "OboomCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Oboom.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stanley", "stanley.foerster@gmail.com")]
+
+
+ def loadAccountData(self, user, req):
+ passwd = self.getAccountData(user)['password']
+ salt = passwd[::-1]
+ pbkdf2 = PBKDF2(passwd, salt, 1000).hexread(16)
+ result = json_loads(req.load("https://www.oboom.com/1/login", get={"auth": user, "pass": pbkdf2}))
+ if not result[0] == 200:
+ self.logWarning(_("Failed to log in: %s") % result[1])
+ self.wrongPassword()
+ return result[1]
+
+
+ def loadAccountInfo(self, name, req):
+ accountData = self.loadAccountData(name, req)
+
+ userData = accountData['user']
+
+ if userData['premium'] == "null":
+ premium = False
+ else:
+ premium = True
+
+ if userData['premium_unix'] == "null":
+ validUntil = -1
+ else:
+ validUntil = int(userData['premium_unix'])
+
+ traffic = userData['traffic']
+
+ trafficLeft = traffic['current']
+ maxTraffic = traffic['max']
+
+ session = accountData['session']
+
+ return {'premium': premium,
+ 'validuntil': validUntil,
+ 'trafficleft': trafficLeft,
+ 'maxtraffic': maxTraffic,
+ 'session': session}
+
+
+ def login(self, user, data, req):
+ self.loadAccountData(user, req)
diff --git a/pyload/plugins/account/OneFichierCom.py b/pyload/plugins/account/OneFichierCom.py
new file mode 100644
index 000000000..7f1140e67
--- /dev/null
+++ b/pyload/plugins/account/OneFichierCom.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import strptime, mktime
+
+from pycurl import REFERER
+
+from pyload.plugins.internal.Account import Account
+
+
+class OneFichierCom(Account):
+ __name__ = "OneFichierCom"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """1fichier.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Elrick69", "elrick69[AT]rocketmail[DOT]com"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ VALID_UNTIL_PATTERN = r'Your Premium Status will end the (\d+/\d+/\d+)'
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = -1
+ premium = None
+
+ html = req.load("https://1fichier.com/console/abo.pl")
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ expiredate = m.group(1)
+ self.logDebug("Expire date: " + expiredate)
+
+ try:
+ validuntil = mktime(strptime(expiredate, "%d/%m/%Y"))
+ except Exception, e:
+ self.logError(e)
+ else:
+ premium = True
+
+ return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium or False}
+
+
+ def login(self, user, data, req):
+ req.http.c.setopt(REFERER, "https://1fichier.com/login.pl?lg=en")
+
+ html = req.load("https://1fichier.com/login.pl?lg=en",
+ post={'mail': user, 'pass': data['password'], 'It': "on", 'purge': "off", 'valider': "Send"})
+
+ if '>Invalid email address' in html or '>Invalid password' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/OverLoadMe.py b/pyload/plugins/account/OverLoadMe.py
new file mode 100644
index 000000000..cda4bc82f
--- /dev/null
+++ b/pyload/plugins/account/OverLoadMe.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class OverLoadMe(Account):
+ __name__ = "OverLoadMe"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Over-Load.me account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("marley", "marley@over-load.me")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("https://api.over-load.me/account.php", get={"user": user, "auth": data['password']}).strip()
+ data = json_loads(page)
+
+ # Check for premium
+ if data['membership'] == "Free":
+ return {"premium": False}
+
+ account_info = {"validuntil": data['expirationunix'], "trafficleft": -1}
+ return account_info
+
+
+ def login(self, user, data, req):
+ jsondata = req.load("https://api.over-load.me/account.php",
+ get={"user": user, "auth": data['password']}).strip()
+ data = json_loads(jsondata)
+
+ if data['err'] == 1:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/PremiumTo.py b/pyload/plugins/account/PremiumTo.py
new file mode 100644
index 000000000..f7a00e194
--- /dev/null
+++ b/pyload/plugins/account/PremiumTo.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class PremiumTo(Account):
+ __name__ = "PremiumTo"
+ __type__ = "account"
+ __version__ = "0.04"
+
+ __description__ = """Premium.to account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+
+ def loadAccountInfo(self, user, req):
+ api_r = req.load("http://premium.to/api/straffic.php",
+ get={'username': self.username, 'password': self.password})
+ traffic = sum(map(int, api_r.split(';')))
+
+ return {"trafficleft": int(traffic), "validuntil": -1}
+
+
+ def login(self, user, data, req):
+ self.username = user
+ self.password = data['password']
+ authcode = req.load("http://premium.to/api/getauthcode.php?username=%s&password=%s" % (
+ user, self.password)).strip()
+
+ if "wrong username" in authcode:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/PremiumizeMe.py b/pyload/plugins/account/PremiumizeMe.py
new file mode 100644
index 000000000..5f972ca8b
--- /dev/null
+++ b/pyload/plugins/account/PremiumizeMe.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+from pyload.utils import json_loads
+
+
+class PremiumizeMe(Account):
+ __name__ = "PremiumizeMe"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """Premiumize.me account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Florian Franzen", "FlorianFranzen@gmail.com")]
+
+
+ def loadAccountInfo(self, user, req):
+ # Get user data from premiumize.me
+ status = self.getAccountStatus(user, req)
+ self.logDebug(status)
+
+ # Parse account info
+ account_info = {"validuntil": float(status['result']['expires']),
+ "trafficleft": max(0, status['result']['trafficleft_bytes'])}
+
+ if status['result']['type'] == 'free':
+ account_info['premium'] = False
+
+ return account_info
+
+
+ def login(self, user, data, req):
+ # Get user data from premiumize.me
+ status = self.getAccountStatus(user, req)
+
+ # Check if user and password are valid
+ if status['status'] != 200:
+ self.wrongPassword()
+
+
+ def getAccountStatus(self, user, req):
+ # Use premiumize.me API v1 (see https://secure.premiumize.me/?show=api)
+ # to retrieve account info and return the parsed json answer
+ answer = req.load(
+ "https://api.premiumize.me/pm-api/v1.php?method=accountstatus&params[login]=%s&params[pass]=%s" % (
+ user, self.accounts[user]['password']))
+ return json_loads(answer)
diff --git a/pyload/plugins/account/QuickshareCz.py b/pyload/plugins/account/QuickshareCz.py
new file mode 100644
index 000000000..40bf9d06d
--- /dev/null
+++ b/pyload/plugins/account/QuickshareCz.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class QuickshareCz(Account):
+ __name__ = "QuickshareCz"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Quickshare.cz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ TRAFFIC_LEFT_PATTERN = r'Stav kreditu: <strong>(.+?)</strong>'
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.quickshare.cz/premium", decode=True)
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ trafficleft = self.parseTraffic(m.group(1))
+ premium = True if trafficleft else False
+ else:
+ trafficleft = None
+ premium = False
+
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
+
+
+ def login(self, user, data, req):
+ html = req.load('http://www.quickshare.cz/html/prihlaseni_process.php', post={
+ "akce": u'Přihlásit',
+ "heslo": data['password'],
+ "jmeno": user
+ }, decode=True)
+
+ if u'>TakovÜ uşivatel neexistuje.<' in html or u'>Špatné heslo.<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/RPNetBiz.py b/pyload/plugins/account/RPNetBiz.py
new file mode 100644
index 000000000..417ca14a4
--- /dev/null
+++ b/pyload/plugins/account/RPNetBiz.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class RPNetBiz(Account):
+ __name__ = "RPNetBiz"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """RPNet.biz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Dman", "dmanugm@gmail.com")]
+
+
+ def loadAccountInfo(self, user, req):
+ # Get account information from rpnet.biz
+ res = self.getAccountStatus(user, req)
+ try:
+ if res['accountInfo']['isPremium']:
+ # Parse account info. Change the trafficleft later to support per host info.
+ account_info = {"validuntil": int(res['accountInfo']['premiumExpiry']),
+ "trafficleft": -1, "premium": True}
+ else:
+ account_info = {"validuntil": None, "trafficleft": None, "premium": False}
+
+ except KeyError:
+ #handle wrong password exception
+ account_info = {"validuntil": None, "trafficleft": None, "premium": False}
+
+ return account_info
+
+
+ def login(self, user, data, req):
+ # Get account information from rpnet.biz
+ res = self.getAccountStatus(user, req)
+
+ # If we have an error in the res, we have wrong login information
+ if 'error' in res:
+ self.wrongPassword()
+
+
+ def getAccountStatus(self, user, req):
+ # Using the rpnet API, check if valid premium account
+ res = req.load("https://premium.rpnet.biz/client_api.php",
+ get={"username": user, "password": self.accounts[user]['password'],
+ "action": "showAccountInformation"})
+ self.logDebug("JSON data: %s" % res)
+
+ return json_loads(res)
diff --git a/pyload/plugins/account/RapidfileshareNet.py b/pyload/plugins/account/RapidfileshareNet.py
new file mode 100644
index 000000000..503c731fe
--- /dev/null
+++ b/pyload/plugins/account/RapidfileshareNet.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class RapidfileshareNet(XFSAccount):
+ __name__ = "RapidfileshareNet"
+ __type__ = "account"
+ __version__ = "0.05"
+
+ __description__ = """Rapidfileshare.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "rapidfileshare.net"
+
+ TRAFFIC_LEFT_PATTERN = r'>Traffic available today:</TD><TD><label for="name">\s*(?P<S>[\d.,]+)\s*(?:(?P<U>[\w^_]+))?'
diff --git a/pyload/plugins/account/RapidgatorNet.py b/pyload/plugins/account/RapidgatorNet.py
new file mode 100644
index 000000000..72a46a928
--- /dev/null
+++ b/pyload/plugins/account/RapidgatorNet.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class RapidgatorNet(Account):
+ __name__ = "RapidgatorNet"
+ __type__ = "account"
+ __version__ = "0.04"
+
+ __description__ = """Rapidgator.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ API_URL = 'http://rapidgator.net/api/user'
+
+
+ def loadAccountInfo(self, user, req):
+ try:
+ sid = self.getAccountData(user).get('SID')
+ assert sid
+
+ json = req.load("%s/info?sid=%s" % (self.API_URL, sid))
+ self.logDebug("API:USERINFO", json)
+ json = json_loads(json)
+
+ if json['response_status'] == 200:
+ if "reset_in" in json['response']:
+ self.scheduleRefresh(user, json['response']['reset_in'])
+
+ return {"validuntil": json['response']['expire_date'],
+ "trafficleft": int(json['response']['traffic_left']),
+ "premium": True}
+ else:
+ self.logError(json['response_details'])
+ except Exception, e:
+ self.logError(e)
+
+ return {"validuntil": None, "trafficleft": None, "premium": False}
+
+
+ def login(self, user, data, req):
+ try:
+ json = req.load('%s/login' % self.API_URL, post={"username": user, "password": data['password']})
+ self.logDebug("API:LOGIN", json)
+ json = json_loads(json)
+
+ if json['response_status'] == 200:
+ data['SID'] = str(json['response']['session_id'])
+ return
+ else:
+ self.logError(json['response_details'])
+ except Exception, e:
+ self.logError(e)
+
+ self.wrongPassword()
diff --git a/pyload/plugins/account/RapidshareCom.py b/pyload/plugins/account/RapidshareCom.py
new file mode 100644
index 000000000..01adad15f
--- /dev/null
+++ b/pyload/plugins/account/RapidshareCom.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class RapidshareCom(Account):
+ __name__ = "RapidshareCom"
+ __type__ = "account"
+ __version__ = "0.22"
+
+ __description__ = """Rapidshare.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_prem = {"sub": "getaccountdetails", "type": "prem", "login": user,
+ "password": data['password'], "withcookie": 1}
+ html = req.load(api_url_base, cookies=False, get=api_param_prem)
+ if html.startswith("ERROR"):
+ raise Exception(html)
+ fields = html.split("\n")
+ info = {}
+ for t in fields:
+ if not t.strip():
+ continue
+ k, v = t.split("=")
+ info[k] = v
+
+ validuntil = int(info['billeduntil'])
+ premium = True if validuntil else False
+
+ tmp = {"premium": premium, "validuntil": validuntil, "trafficleft": -1, "maxtraffic": -1}
+
+ return tmp
+
+
+ def login(self, user, data, req):
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_prem = {"sub": "getaccountdetails", "type": "prem", "login": user,
+ "password": data['password'], "withcookie": 1}
+ html = req.load(api_url_base, cookies=False, get=api_param_prem)
+ if html.startswith("ERROR"):
+ raise Exception(html + "### Note you have to use your account number for login, instead of name")
+ fields = html.split("\n")
+ info = {}
+ for t in fields:
+ if not t.strip():
+ continue
+ k, v = t.split("=")
+ info[k] = v
+ cj = self.getAccountCookies(user)
+ cj.setCookie("rapidshare.com", "enc", info['cookie'])
diff --git a/pyload/plugins/account/RarefileNet.py b/pyload/plugins/account/RarefileNet.py
new file mode 100644
index 000000000..0d5547e57
--- /dev/null
+++ b/pyload/plugins/account/RarefileNet.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class RarefileNet(XFSAccount):
+ __name__ = "RarefileNet"
+ __type__ = "account"
+ __version__ = "0.04"
+
+ __description__ = """RareFile.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "rarefile.net"
diff --git a/pyload/plugins/account/RealdebridCom.py b/pyload/plugins/account/RealdebridCom.py
new file mode 100644
index 000000000..b7773d9a0
--- /dev/null
+++ b/pyload/plugins/account/RealdebridCom.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+import xml.dom.minidom as dom
+
+from pyload.plugins.internal.Account import Account
+
+
+class RealdebridCom(Account):
+ __name__ = "RealdebridCom"
+ __type__ = "account"
+ __version__ = "0.43"
+
+ __description__ = """Real-Debrid.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Devirex Hazzard", "naibaf_11@yahoo.de")]
+
+
+ def loadAccountInfo(self, user, req):
+ if self.pin_code:
+ return {"premium": False}
+ page = req.load("https://real-debrid.com/api/account.php")
+ xml = dom.parseString(page)
+ account_info = {"validuntil": int(xml.getElementsByTagName("expiration")[0].childNodes[0].nodeValue),
+ "trafficleft": -1}
+
+ return account_info
+
+
+ def login(self, user, data, req):
+ self.pin_code = False
+ page = req.load("https://real-debrid.com/ajax/login.php", get={"user": user, "pass": data['password']})
+ if "Your login informations are incorrect" in page:
+ self.wrongPassword()
+ elif "PIN Code required" in page:
+ self.logWarning(_("PIN code required. Please login to https://real-debrid.com using the PIN or disable the double authentication in your control panel on https://real-debrid.com"))
+ self.pin_code = True
diff --git a/pyload/plugins/account/RehostTo.py b/pyload/plugins/account/RehostTo.py
new file mode 100644
index 000000000..e8ee3ba15
--- /dev/null
+++ b/pyload/plugins/account/RehostTo.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class RehostTo(Account):
+ __name__ = "RehostTo"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Rehost.to account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://rehost.to/api.php?cmd=login&user=%s&pass=%s" % (user, data['password']))
+ data = [x.split("=") for x in page.split(",")]
+ ses = data[0][1]
+ long_ses = data[1][1]
+
+ page = req.load("http://rehost.to/api.php?cmd=get_premium_credits&long_ses=%s" % long_ses)
+ traffic, valid = page.split(",")
+
+ account_info = {"trafficleft": int(traffic) * 1024,
+ "validuntil": int(valid),
+ "long_ses": long_ses,
+ "ses": ses}
+
+ return account_info
+
+
+ def login(self, user, data, req):
+ page = req.load("http://rehost.to/api.php?cmd=login&user=%s&pass=%s" % (user, data['password']))
+
+ if "Login failed." in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/RyushareCom.py b/pyload/plugins/account/RyushareCom.py
new file mode 100644
index 000000000..47ec32c76
--- /dev/null
+++ b/pyload/plugins/account/RyushareCom.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class RyushareCom(XFSAccount):
+ __name__ = "RyushareCom"
+ __type__ = "account"
+ __version__ = "0.05"
+
+ __description__ = """Ryushare.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("trance4us", None)]
+
+
+ HOSTER_DOMAIN = "ryushare.com"
+
+
+ def login(self, user, data, req):
+ req.lastURL = "http://ryushare.com/login.python"
+ html = req.load("http://ryushare.com/login.python",
+ post={"login": user, "password": data['password'], "op": "login"})
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/SecureUploadEu.py b/pyload/plugins/account/SecureUploadEu.py
new file mode 100644
index 000000000..c98ab2de9
--- /dev/null
+++ b/pyload/plugins/account/SecureUploadEu.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class SecureUploadEu(XFSAccount):
+ __name__ = "SecureUploadEu"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """SecureUpload.eu account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "secureupload.eu"
diff --git a/pyload/plugins/account/SendmywayCom.py b/pyload/plugins/account/SendmywayCom.py
new file mode 100644
index 000000000..6749e6215
--- /dev/null
+++ b/pyload/plugins/account/SendmywayCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class SendmywayCom(XFSAccount):
+ __name__ = "SendmywayCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Sendmyway.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "sendmyway.com"
diff --git a/pyload/plugins/account/ShareonlineBiz.py b/pyload/plugins/account/ShareonlineBiz.py
new file mode 100644
index 000000000..dbbe93869
--- /dev/null
+++ b/pyload/plugins/account/ShareonlineBiz.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+
+
+class ShareonlineBiz(Account):
+ __name__ = "ShareonlineBiz"
+ __type__ = "account"
+ __version__ = "0.24"
+
+ __description__ = """Share-online.biz account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def getUserAPI(self, user, req):
+ return req.load("http://api.share-online.biz/account.php",
+ {"username": user, "password": self.accounts[user]['password'], "act": "userDetails"})
+
+
+ def loadAccountInfo(self, user, req):
+ html = self.getUserAPI(user, req)
+
+ info = {}
+ for line in html.splitlines():
+ if "=" in line:
+ key, value = line.split("=")
+ info[key] = value
+ self.logDebug(info)
+
+ if "dl" in info and info['dl'].lower() != "not_available":
+ req.cj.setCookie("share-online.biz", "dl", info['dl'])
+ if "a" in info and info['a'].lower() != "not_available":
+ req.cj.setCookie("share-online.biz", "a", info['a'])
+
+ return {"validuntil": int(info['expire_date']) if "expire_date" in info else -1,
+ "trafficleft": -1,
+ "premium": True if ("dl" in info or "a" in info) and (info['group'] != "Sammler") else False}
+
+
+ def login(self, user, data, req):
+ html = self.getUserAPI(user, req)
+ if "EXCEPTION" in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/SimplyPremiumCom.py b/pyload/plugins/account/SimplyPremiumCom.py
new file mode 100644
index 000000000..b3dad84d3
--- /dev/null
+++ b/pyload/plugins/account/SimplyPremiumCom.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Account import Account
+
+
+class SimplyPremiumCom(Account):
+ __name__ = "SimplyPremiumCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """Simply-Premium.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("EvolutionClip", "evolutionclip@live.de")]
+
+
+ def loadAccountInfo(self, user, req):
+ json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
+ self.logDebug("JSON data: " + json_data)
+ json_data = json_loads(json_data)
+
+ if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
+ return {"premium": False}
+
+ #Time package
+ validuntil = float(json_data['result']['timeend'])
+ #Traffic package
+ # {"trafficleft": int(traffic), "validuntil": -1}
+ #trafficleft = int(json_data['result']['traffic'])
+
+ #return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ return {"premium": True, "validuntil": validuntil}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("simply-premium.com", "lang", "EN")
+
+ if data['password'] == '' or data['password'] == '0':
+ post_data = {"key": user}
+ else:
+ post_data = {"login_name": user, "login_pass": data['password']}
+
+ html = req.load("http://www.simply-premium.com/login.php", post=post_data)
+
+ if 'logout' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/SimplydebridCom.py b/pyload/plugins/account/SimplydebridCom.py
new file mode 100644
index 000000000..636828146
--- /dev/null
+++ b/pyload/plugins/account/SimplydebridCom.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class SimplydebridCom(Account):
+ __name__ = "SimplydebridCom"
+ __type__ = "account"
+ __version__ = "0.1"
+
+ __description__ = """Simply-Debrid.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Kagenoshin", "kagenoshin@gmx.ch")]
+
+
+ def loadAccountInfo(self, user, req):
+ get_data = {'login': 2, 'u': self.loginname, 'p': self.password}
+ res = req.load("http://simply-debrid.com/api.php", get=get_data, decode=True)
+ data = [x.strip() for x in res.split(";")]
+ if str(data[0]) != "1":
+ return {"premium": False}
+ else:
+ return {"trafficleft": -1, "validuntil": mktime(strptime(str(data[2]), "%d/%m/%Y"))}
+
+
+ def login(self, user, data, req):
+ self.loginname = user
+ self.password = data['password']
+ get_data = {'login': 1, 'u': self.loginname, 'p': self.password}
+ res = req.load("http://simply-debrid.com/api.php", get=get_data, decode=True)
+ if res != "02: loggin success":
+ self.wrongPassword()
diff --git a/pyload/plugins/account/StahnuTo.py b/pyload/plugins/account/StahnuTo.py
new file mode 100644
index 000000000..70ef292e7
--- /dev/null
+++ b/pyload/plugins/account/StahnuTo.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class StahnuTo(Account):
+ __name__ = "StahnuTo"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """StahnuTo account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.stahnu.to/")
+
+ m = re.search(r'>VIP: (\d+.*)<', html)
+ trafficleft = self.parseTraffic(m.group(1)) * 1024 if m else 0
+
+ return {"premium": trafficleft > (512 * 1024), "trafficleft": trafficleft, "validuntil": -1}
+
+
+ def login(self, user, data, req):
+ html = req.load("http://www.stahnu.to/login.php", post={
+ "username": user,
+ "password": data['password'],
+ "submit": "Login"})
+
+ if not '<a href="logout.php">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/StreamcloudEu.py b/pyload/plugins/account/StreamcloudEu.py
new file mode 100644
index 000000000..a2a333dd5
--- /dev/null
+++ b/pyload/plugins/account/StreamcloudEu.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class StreamcloudEu(XFSAccount):
+ __name__ = "StreamcloudEu"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Streamcloud.eu account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "streamcloud.eu"
diff --git a/pyload/plugins/account/TurbobitNet.py b/pyload/plugins/account/TurbobitNet.py
new file mode 100644
index 000000000..e2f56905c
--- /dev/null
+++ b/pyload/plugins/account/TurbobitNet.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class TurbobitNet(Account):
+ __name__ = "TurbobitNet"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """TurbobitNet account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://turbobit.net")
+
+ m = re.search(r'<u>Turbo Access</u> to ([\d.]+)', html)
+ if m:
+ premium = True
+ validuntil = mktime(strptime(m.group(1), "%d.%m.%Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("turbobit.net", "user_lang", "en")
+
+ html = req.load("http://turbobit.net/user/login", post={
+ "user[login]": user,
+ "user[pass]": data['password'],
+ "user[submit]": "Login"})
+
+ if not '<div class="menu-item user-name">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/TusfilesNet.py b/pyload/plugins/account/TusfilesNet.py
new file mode 100644
index 000000000..9aa8ed543
--- /dev/null
+++ b/pyload/plugins/account/TusfilesNet.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import mktime, strptime, gmtime
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class TusfilesNet(XFSAccount):
+ __name__ = "TusfilesNet"
+ __type__ = "account"
+ __version__ = "0.06"
+
+ __description__ = """Tusfile.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "tusfiles.net"
+
+ VALID_UNTIL_PATTERN = r'<span class="label label-default">([^<]+)</span>'
+ TRAFFIC_LEFT_PATTERN = r'<td><img src="//www\.tusfiles\.net/i/icon/meter\.png" alt=""/></td>\n<td>&nbsp;(?P<S>[\d.,]+)'
diff --git a/pyload/plugins/account/UlozTo.py b/pyload/plugins/account/UlozTo.py
new file mode 100644
index 000000000..2cdab0167
--- /dev/null
+++ b/pyload/plugins/account/UlozTo.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.Account import Account
+
+
+class UlozTo(Account):
+ __name__ = "UlozTo"
+ __type__ = "account"
+ __version__ = "0.07"
+
+ __description__ = """Uloz.to account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("pulpe", None)]
+
+
+ TRAFFIC_LEFT_PATTERN = r'<li class="menu-kredit"><a href="/kredit" title="[^"]*?GB = ([\d.]+) MB"'
+
+
+ def loadAccountInfo(self, user, req):
+ self.phpsessid = req.cj.getCookie("ULOSESSID") #@NOTE: this cookie gets lost somehow after each request
+
+ html = req.load("http://www.ulozto.net/", decode=True)
+
+ req.cj.setCookie("ulozto.net", "ULOSESSID", self.phpsessid)
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = int(float(m.group(1).replace(' ', '').replace(',', '.')) * 1000 * 1.048) if m else 0
+ self.premium = True if trafficleft else False
+
+ return {"validuntil": -1, "trafficleft": trafficleft}
+
+
+ def login(self, user, data, req):
+ login_page = req.load('http://www.ulozto.net/?do=web-login', decode=True)
+ action = re.findall('<form action="(.+?)"', login_page)[1].replace('&amp;', '&')
+ token = re.search('_token_" value="(.+?)"', login_page).group(1)
+
+ html = req.load(urljoin("http://www.ulozto.net/", action),
+ post={'_token_' : token,
+ 'do' : "loginForm-submit",
+ 'login' : u"Přihlásit",
+ 'password': data['password'],
+ 'username': user},
+ decode=True)
+
+ if '<div class="flash error">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/UnrestrictLi.py b/pyload/plugins/account/UnrestrictLi.py
new file mode 100644
index 000000000..30144ca65
--- /dev/null
+++ b/pyload/plugins/account/UnrestrictLi.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Account import Account
+from pyload.utils import json_loads
+
+
+class UnrestrictLi(Account):
+ __name__ = "UnrestrictLi"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Unrestrict.li account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def loadAccountInfo(self, user, req):
+ json_data = req.load('http://unrestrict.li/api/jdownloader/user.php?format=json')
+ self.logDebug("JSON data: " + json_data)
+ json_data = json_loads(json_data)
+
+ if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
+ return {"premium": False}
+
+ validuntil = json_data['result']['expires']
+ trafficleft = int(json_data['result']['traffic'])
+
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("unrestrict.li", "lang", "EN")
+ html = req.load("https://unrestrict.li/sign_in")
+
+ if 'solvemedia' in html:
+ self.logError(_("A Captcha is required. Go to http://unrestrict.li/sign_in and login, then retry"))
+ return
+
+ post_data = {"username": user, "password": data['password'],
+ "remember_me": "remember", "signin": "Sign in"}
+ html = req.load("https://unrestrict.li/sign_in", post=post_data)
+
+ if 'sign_out' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/UploadcCom.py b/pyload/plugins/account/UploadcCom.py
new file mode 100644
index 000000000..8f437eb01
--- /dev/null
+++ b/pyload/plugins/account/UploadcCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class UploadcCom(XFSAccount):
+ __name__ = "UploadcCom"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """Uploadc.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "uploadc.com"
diff --git a/pyload/plugins/account/UploadedTo.py b/pyload/plugins/account/UploadedTo.py
new file mode 100644
index 000000000..f5c5d22b2
--- /dev/null
+++ b/pyload/plugins/account/UploadedTo.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import time
+
+from pyload.plugins.internal.Account import Account
+
+
+class UploadedTo(Account):
+ __name__ = "UploadedTo"
+ __type__ = "account"
+ __version__ = "0.27"
+
+ __description__ = """Uploaded.to account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ PREMIUM_PATTERN = r'<em>Premium</em>'
+ VALID_UNTIL_PATTERN = r'<td>Duration:</td>\s*<th>([^<]+)'
+ TRAFFIC_LEFT_PATTERN = r'<th colspan="2"><b class="cB">([^<]+)'
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = None
+ premium = None
+
+ html = req.load("http://uploaded.net/me")
+
+ premium = True if re.search(self.PREMIUM_PATTERN, html) else False
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html, re.M)
+ if m:
+ expiredate = m.group(1).strip()
+
+ if expiredate == "unlimited":
+ validuntil = -1
+ else:
+ m = re.findall(r'(\d+) (Week|weeks|day|hour)', expiredate)
+ if m:
+ validuntil = time()
+ for n, u in m:
+ validuntil += int(n) * 60 * 60 * {'Week': 168, 'weeks': 168, 'day': 24, 'hour': 1}[u]
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ trafficleft = self.parseTraffic(m.group(1).replace('.', ''))
+
+ return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
+
+
+ def login(self, user, data, req):
+ req.cj.setCookie("uploaded.net", "lang", "en")
+
+ page = req.load("http://uploaded.net/io/login",
+ post={'id': user, 'pw': data['password'], '_': ""})
+
+ if "User and password do not match" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/UploadheroCom.py b/pyload/plugins/account/UploadheroCom.py
new file mode 100644
index 000000000..be1d444be
--- /dev/null
+++ b/pyload/plugins/account/UploadheroCom.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+import re
+import datetime
+import time
+
+from pyload.plugins.internal.Account import Account
+
+
+class UploadheroCom(Account):
+ __name__ = "UploadheroCom"
+ __type__ = "account"
+ __version__ = "0.2"
+
+ __description__ = """Uploadhero.co account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mcmyst", "mcmyst@hotmail.fr")]
+
+
+ def loadAccountInfo(self, user, req):
+ premium_pattern = re.compile('Il vous reste <span class="bleu">(\d+)</span> jours premium')
+
+ data = self.getAccountData(user)
+ page = req.load("http://uploadhero.co/my-account")
+
+ if premium_pattern.search(page):
+ end_date = datetime.date.today() + datetime.timedelta(days=int(premium_pattern.search(page).group(1)))
+ end_date = time.mktime(future.timetuple())
+ account_info = {"validuntil": end_date, "trafficleft": -1, "premium": True}
+ else:
+ account_info = {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ return account_info
+
+
+ def login(self, user, data, req):
+ page = req.load("http://uploadhero.co/lib/connexion.php",
+ post={"pseudo_login": user, "password_login": data['password']})
+
+ if "mot de passe invalide" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/UploadingCom.py b/pyload/plugins/account/UploadingCom.py
new file mode 100644
index 000000000..8f308b0b6
--- /dev/null
+++ b/pyload/plugins/account/UploadingCom.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time, strptime, mktime
+
+from pyload.plugins.internal.Account import Account
+from pyload.plugins.internal.SimpleHoster import set_cookies
+
+
+class UploadingCom(Account):
+ __name__ = "UploadingCom"
+ __type__ = "account"
+ __version__ = "0.11"
+
+ __description__ = """Uploading.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ PREMIUM_PATTERN = r'UPGRADE TO PREMIUM'
+ VALID_UNTIL_PATTERN = r'Valid Until:(.+?)<'
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = None
+ premium = None
+
+ html = req.load("http://uploading.com/")
+
+ premium = False if re.search(self.PREMIUM_PATTERN, html) else True
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ expiredate = m.group(1).strip()
+ self.logDebug("Expire date: " + expiredate)
+
+ try:
+ validuntil = mktime(strptime(expiredate, "%b %d, %Y"))
+
+ except Exception, e:
+ self.logError(e)
+
+ else:
+ if validuntil > mktime(gmtime()):
+ premium = True
+ else:
+ premium = False
+ validuntil = None
+
+ return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
+
+
+ def login(self, user, data, req):
+ set_cookies([("uploading.com", "lang", "1"),
+ ("uploading.com", "language", "1"),
+ ("uploading.com", "setlang", "en"),
+ ("uploading.com", "_lang", "en")]
+
+ req.load("http://uploading.com/")
+ req.load("http://uploading.com/general/login_form/?JsHttpRequest=%s-xml" % long(time() * 1000),
+ post={'email': user, 'password': data['password'], 'remember': "on"})
diff --git a/pyload/plugins/account/UptoboxCom.py b/pyload/plugins/account/UptoboxCom.py
new file mode 100644
index 000000000..9438888e3
--- /dev/null
+++ b/pyload/plugins/account/UptoboxCom.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class UptoboxCom(XFSAccount):
+ __name__ = "UptoboxCom"
+ __type__ = "account"
+ __version__ = "0.07"
+
+ __description__ = """DDLStorage.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "uptobox.com"
+ HOSTER_URL = "https://uptobox.com/"
diff --git a/pyload/plugins/account/VidPlayNet.py b/pyload/plugins/account/VidPlayNet.py
new file mode 100644
index 000000000..4393b3a82
--- /dev/null
+++ b/pyload/plugins/account/VidPlayNet.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class VidPlayNet(XFSAccount):
+ __name__ = "VidPlayNet"
+ __type__ = "account"
+ __version__ = "0.02"
+
+ __description__ = """VidPlay.net account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "vidplay.net"
diff --git a/pyload/plugins/account/XFileSharingPro.py b/pyload/plugins/account/XFileSharingPro.py
new file mode 100644
index 000000000..aa16d5045
--- /dev/null
+++ b/pyload/plugins/account/XFileSharingPro.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSAccount import XFSAccount
+
+
+class XFileSharingPro(XFSAccount):
+ __name__ = "XFileSharingPro"
+ __type__ = "account"
+ __version__ = "0.05"
+
+ __description__ = """XFileSharingPro multi-purpose account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = None
+
+
+ def init(self):
+ if self.HOSTER_DOMAIN:
+ return super(XFileSharingPro, self).init()
+
+
+ def loadAccountInfo(self, user, req):
+ return super(XFileSharingPro if self.HOSTER_DOMAIN else XFSAccount, self).loadAccountInfo(user, req)
+
+
+ def login(self, user, data, req):
+ if self.HOSTER_DOMAIN:
+ return super(XFileSharingPro, self).login(user, data, req)
diff --git a/pyload/plugins/account/YibaishiwuCom.py b/pyload/plugins/account/YibaishiwuCom.py
new file mode 100644
index 000000000..06a606bc4
--- /dev/null
+++ b/pyload/plugins/account/YibaishiwuCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Account import Account
+
+
+class YibaishiwuCom(Account):
+ __name__ = "YibaishiwuCom"
+ __type__ = "account"
+ __version__ = "0.01"
+
+ __description__ = """115.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ ACCOUNT_INFO_PATTERN = r'var USER_PERMISSION = {(.*?)}'
+
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://115.com/", decode=True)
+
+ m = re.search(self.ACCOUNT_INFO_PATTERN, html, re.S)
+ premium = True if (m and 'is_vip: 1' in m.group(1)) else False
+ validuntil = trafficleft = (-1 if m else 0)
+ return dict({"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium})
+
+
+ def login(self, user, data, req):
+ html = req.load('http://passport.115.com/?ac=login', post={
+ "back": "http://www.115.com/",
+ "goto": "http://115.com/",
+ "login[account]": user,
+ "login[passwd]": data['password']
+ }, decode=True)
+
+ if not 'var USER_PERMISSION = {' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/account/ZeveraCom.py b/pyload/plugins/account/ZeveraCom.py
new file mode 100644
index 000000000..dfd4f29f5
--- /dev/null
+++ b/pyload/plugins/account/ZeveraCom.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+from time import mktime, strptime
+
+from pyload.plugins.internal.Account import Account
+
+
+class ZeveraCom(Account):
+ __name__ = "ZeveraCom"
+ __type__ = "account"
+ __version__ = "0.21"
+
+ __description__ = """Zevera.com account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAPIData(req)
+ if data == "No traffic":
+ account_info = {"trafficleft": 0, "validuntil": 0, "premium": False}
+ else:
+ account_info = {
+ "trafficleft": int(data['availabletodaytraffic']) * 1024,
+ "validuntil": mktime(strptime(data['endsubscriptiondate'], "%Y/%m/%d %H:%M:%S")),
+ "premium": True
+ }
+ return account_info
+
+
+ def login(self, user, data, req):
+ self.loginname = user
+ self.password = data['password']
+ if self.getAPIData(req) == "No traffic":
+ self.wrongPassword()
+
+
+ def getAPIData(self, req, just_header=False, **kwargs):
+ get_data = {
+ 'cmd': 'accountinfo',
+ 'login': self.loginname,
+ 'pass': self.password
+ }
+ get_data.update(kwargs)
+
+ res = req.load("http://www.zevera.com/jDownloader.ashx", get=get_data,
+ decode=True, just_header=just_header)
+ self.logDebug(res)
+
+ if ':' in res:
+ if not just_header:
+ res = res.replace(',', '\n')
+ return dict((y.strip().lower(), z.strip()) for (y, z) in
+ [x.split(':', 1) for x in res.splitlines() if ':' in x])
+ else:
+ return res
diff --git a/pyload/plugins/account/__init__.py b/pyload/plugins/account/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/account/__init__.py
diff --git a/pyload/plugins/addon/Checksum.py b/pyload/plugins/addon/Checksum.py
new file mode 100644
index 000000000..3e1b90941
--- /dev/null
+++ b/pyload/plugins/addon/Checksum.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import hashlib
+import re
+import zlib
+
+from os import remove
+from os.path import getsize, isfile, splitext
+
+from pyload.plugins.internal.Addon import Addon
+from pyload.utils import safe_join, fs_encode
+
+
+def computeChecksum(local_file, algorithm):
+ if algorithm in getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")):
+ h = getattr(hashlib, algorithm)()
+
+ with open(local_file, 'rb') as f:
+ for chunk in iter(lambda: f.read(128 * h.block_size), ''):
+ h.update(chunk)
+
+ return h.hexdigest()
+
+ elif algorithm in ("adler32", "crc32"):
+ hf = getattr(zlib, algorithm)
+ last = 0
+
+ with open(local_file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), ''):
+ last = hf(chunk, last)
+
+ return "%x" % last
+
+ else:
+ return None
+
+
+class Checksum(Addon):
+ __name__ = "Checksum"
+ __type__ = "addon"
+ __version__ = "0.14"
+
+ __config__ = [("activated" , "bool" , "Activated" , True ),
+ ("check_checksum", "bool" , "Check checksum? (If False only size will be verified)", True ),
+ ("check_action" , "fail;retry;nothing", "What to do if check fails?" , "retry"),
+ ("max_tries" , "int" , "Number of retries" , 2 ),
+ ("retry_action" , "fail;nothing" , "What to do if all retries fail?" , "fail" ),
+ ("wait_time" , "int" , "Time to wait before each retry (seconds)" , 1 )]
+
+ __description__ = """Verify downloaded file size and checksum"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ methods = {'sfv': 'crc32', 'crc': 'crc32', 'hash': 'md5'}
+ regexps = {'sfv': r'^(?P<name>[^;].+)\s+(?P<hash>[0-9A-Fa-f]{8})$',
+ 'md5': r'^(?P<name>[0-9A-Fa-f]{32}) (?P<file>.+)$',
+ 'crc': r'filename=(?P<name>.+)\nsize=(?P<size>\d+)\ncrc32=(?P<hash>[0-9A-Fa-f]{8})$',
+ 'default': r'^(?P<hash>[0-9A-Fa-f]+)\s+\*?(?P<name>.+)$'}
+
+
+ def coreReady(self):
+ if not self.getConfig("check_checksum"):
+ self.logInfo(_("Checksum validation is disabled in plugin configuration"))
+
+
+ def setup(self):
+ self.algorithms = sorted(
+ getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")), reverse=True)
+ self.algorithms.extend(["crc32", "adler32"])
+ self.formats = self.algorithms + ["sfv", "crc", "hash"]
+
+
+ def downloadFinished(self, pyfile):
+ """
+ Compute checksum for the downloaded file and compare it with the hash provided by the hoster.
+ pyfile.plugin.check_data should be a dictionary which can contain:
+ a) if known, the exact filesize in bytes (e.g. "size": 123456789)
+ b) hexadecimal hash string with algorithm name as key (e.g. "md5": "d76505d0869f9f928a17d42d66326307")
+ """
+ if hasattr(pyfile.plugin, "check_data") and (isinstance(pyfile.plugin.check_data, dict)):
+ data = pyfile.plugin.check_data.copy()
+ elif hasattr(pyfile.plugin, "api_data") and (isinstance(pyfile.plugin.api_data, dict)):
+ data = pyfile.plugin.api_data.copy()
+ else:
+ return
+
+ self.logDebug(data)
+
+ if not pyfile.plugin.lastDownload:
+ self.checkFailed(pyfile, None, "No file downloaded")
+
+ local_file = fs_encode(pyfile.plugin.lastDownload)
+ #download_folder = self.config['general']['download_folder']
+ #local_file = fs_encode(safe_join(download_folder, pyfile.package().folder, pyfile.name))
+
+ if not isfile(local_file):
+ self.checkFailed(pyfile, None, "File does not exist")
+
+ # validate file size
+ if "size" in data:
+ api_size = int(data['size'])
+ file_size = getsize(local_file)
+ if api_size != file_size:
+ self.logWarning(_("File %s has incorrect size: %d B (%d expected)") % (pyfile.name, file_size, api_size))
+ self.checkFailed(pyfile, local_file, "Incorrect file size")
+ del data['size']
+
+ # validate checksum
+ if data and self.getConfig("check_checksum"):
+ if "checksum" in data:
+ data['md5'] = data['checksum']
+
+ for key in self.algorithms:
+ if key in data:
+ checksum = computeChecksum(local_file, key.replace("-", "").lower())
+ if checksum:
+ if checksum == data[key].lower():
+ self.logInfo(_('File integrity of "%s" verified by %s checksum (%s)') %
+ (pyfile.name, key.upper(), checksum))
+ break
+ else:
+ self.logWarning(_("%s checksum for file %s does not match (%s != %s)") %
+ (key.upper(), pyfile.name, checksum, data[key]))
+ self.checkFailed(pyfile, local_file, "Checksums do not match")
+ else:
+ self.logWarning(_("Unsupported hashing algorithm"), key.upper())
+ else:
+ self.logWarning(_("Unable to validate checksum for file: ") + pyfile.name)
+
+
+ def checkFailed(self, pyfile, local_file, msg):
+ check_action = self.getConfig("check_action")
+ if check_action == "retry":
+ max_tries = self.getConfig("max_tries")
+ retry_action = self.getConfig("retry_action")
+ if pyfile.plugin.retries < max_tries:
+ if local_file:
+ remove(local_file)
+ pyfile.plugin.retry(max_tries, self.getConfig("wait_time"), msg)
+ elif retry_action == "nothing":
+ return
+ elif check_action == "nothing":
+ return
+ pyfile.plugin.fail(reason=msg)
+
+
+ def packageFinished(self, pypack):
+ download_folder = safe_join(self.config['general']['download_folder'], pypack.folder, "")
+
+ for link in pypack.getChildren().itervalues():
+ file_type = splitext(link['name'])[1][1:].lower()
+
+ if file_type not in self.formats:
+ continue
+
+ hash_file = fs_encode(safe_join(download_folder, link['name']))
+ if not isfile(hash_file):
+ self.logWarning(_("File not found"), link['name'])
+ continue
+
+ with open(hash_file) as f:
+ text = f.read()
+
+ for m in re.finditer(self.regexps.get(file_type, self.regexps['default']), text):
+ data = m.groupdict()
+ self.logDebug(link['name'], data)
+
+ local_file = fs_encode(safe_join(download_folder, data['name']))
+ algorithm = self.methods.get(file_type, file_type)
+ checksum = computeChecksum(local_file, algorithm)
+ if checksum == data['hash']:
+ self.logInfo(_('File integrity of "%s" verified by %s checksum (%s)') %
+ (data['name'], algorithm, checksum))
+ else:
+ self.logWarning(_("%s checksum for file %s does not match (%s != %s)") %
+ (algorithm, data['name'], checksum, data['hash']))
diff --git a/pyload/plugins/addon/ClickAndLoad.py b/pyload/plugins/addon/ClickAndLoad.py
new file mode 100644
index 000000000..cad6e5c13
--- /dev/null
+++ b/pyload/plugins/addon/ClickAndLoad.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+from socket import socket, error
+from threading import Thread
+
+from pyload.plugins.internal.Addon import Addon
+
+
+def forward(source, destination):
+ string = ' '
+ while string:
+ string = source.recv(1024)
+ if string:
+ destination.sendall(string)
+ else:
+ #source.shutdown(socket.SHUT_RD)
+ destination.shutdown(socket.SHUT_WR)
+
+
+class ClickAndLoad(Addon):
+ __name__ = "ClickAndLoad"
+ __type__ = "addon"
+ __version__ = "0.23"
+
+ __config__ = [("activated", "bool", "Activated" , True ),
+ ("port" , "int" , "Port" , 9666 ),
+ ("extern" , "bool", "Allow external link adding", False)]
+
+ __description__ = """Click'N'Load hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.de"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ def coreReady(self):
+ self.interval = 300
+
+
+ def periodical(self):
+ webip = "0.0.0.0" if self.getConfig("extern") else "127.0.0.1"
+ webport = self.config['webinterface']['port']
+ cnlport = self.getConfig("port"))
+
+ try:
+ s = socket()
+ s.bind((webip, cnlport))
+ s.listen(5)
+
+ client = s.accept()[0]
+ server = socket()
+
+ server.connect(("127.0.0.1", webport))
+
+ except error, e:
+ if hasattr(e, "errno"):
+ errno = e.errno
+ else:
+ errno = e.args[0]
+
+ if errno == 98:
+ self.logWarning(_("Port %d already in use") % cnlport)
+ else:
+ self.logDebug(e)
+
+ else:
+ t = Thread(target=forward, args=[client, server])
+ t.setDaemon(True)
+ t.start()
+ self.interval = -1
+
diff --git a/pyload/plugins/addon/DeleteFinished.py b/pyload/plugins/addon/DeleteFinished.py
new file mode 100644
index 000000000..6c2ba38d8
--- /dev/null
+++ b/pyload/plugins/addon/DeleteFinished.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+from pyload.database import style
+from pyload.plugins.internal.Addon import Addon
+
+
+class DeleteFinished(Addon):
+ __name__ = "DeleteFinished"
+ __type__ = "addon"
+ __version__ = "1.11"
+
+ __config__ = [("activated" , "bool", "Activated" , False ),
+ ('interval' , 'int' , 'Delete every (hours)' , '72' ),
+ ('deloffline', 'bool', 'Delete packages with offline links', 'False')]
+
+ __description__ = """Automatically delete all finished packages from queue"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ # event_list = ["pluginConfigChanged"]
+
+
+ ## overwritten methods ##
+ def periodical(self):
+ if not self.info['sleep']:
+ deloffline = self.getConfig('deloffline')
+ mode = '0,1,4' if deloffline else '0,4'
+ msg = _('delete all finished packages in queue list (%s packages with offline links)')
+ self.logInfo(msg % (_('including') if deloffline else _('excluding')))
+ self.deleteFinished(mode)
+ self.info['sleep'] = True
+ self.addEvent('packageFinished', self.wakeup)
+
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == "interval" and value != self.interval:
+ self.interval = value * 3600
+ self.initPeriodical()
+
+
+ def unload(self):
+ self.removeEvent('packageFinished', self.wakeup)
+
+
+ def coreReady(self):
+ self.info = {'sleep': True}
+ interval = self.getConfig('interval')
+ self.pluginConfigChanged(self.__name__, 'interval', interval)
+ self.addEvent('packageFinished', self.wakeup)
+
+
+ ## own methods ##
+ @style.queue
+ def deleteFinished(self, mode):
+ self.c.execute('DELETE FROM packages WHERE NOT EXISTS(SELECT 1 FROM links WHERE package=packages.id AND status NOT IN (%s))' % mode)
+ self.c.execute('DELETE FROM links WHERE NOT EXISTS(SELECT 1 FROM packages WHERE id=links.package)')
+
+
+ def wakeup(self, pypack):
+ self.removeEvent('packageFinished', self.wakeup)
+ self.info['sleep'] = False
+
+
+ ## event managing ##
+ def addEvent(self, event, func):
+ """Adds an event listener for event name"""
+ if event in self.m.events:
+ if func in self.m.events[event]:
+ self.logDebug("Function already registered", func)
+ else:
+ self.m.events[event].append(func)
+ else:
+ self.m.events[event] = [func]
+
+
+ def setup(self):
+ self.m = self.manager
+ self.removeEvent = self.m.removeEvent
diff --git a/pyload/plugins/addon/DownloadScheduler.py b/pyload/plugins/addon/DownloadScheduler.py
new file mode 100644
index 000000000..a35bba04e
--- /dev/null
+++ b/pyload/plugins/addon/DownloadScheduler.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import localtime
+
+from pyload.plugins.internal.Addon import Addon
+
+
+class DownloadScheduler(Addon):
+ __name__ = "DownloadScheduler"
+ __type__ = "addon"
+ __version__ = "0.21"
+
+ __config__ = [("timetable", "str" , "List time periods as hh:mm full or number(kB/s)" , "0:00 full, 7:00 250, 10:00 0, 17:00 150"),
+ ("abort" , "bool", "Abort active downloads when start period with speed 0", False )]
+
+ __description__ = """Download Scheduler"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ def setup(self):
+ self.cb = None #: callback to scheduler job; will be by removed AddonManager when addon unloaded
+
+
+ def coreReady(self):
+ self.updateSchedule()
+
+
+ def updateSchedule(self, schedule=None):
+ if schedule is None:
+ schedule = self.getConfig("timetable")
+
+ schedule = re.findall("(\d{1,2}):(\d{2})[\s]*(-?\d+)",
+ schedule.lower().replace("full", "-1").replace("none", "0"))
+ if not schedule:
+ self.logError(_("Invalid schedule"))
+ return
+
+ t0 = localtime()
+ now = (t0.tm_hour, t0.tm_min, t0.tm_sec, "X")
+ schedule = sorted([(int(x[0]), int(x[1]), 0, int(x[2])) for x in schedule] + [now])
+
+ self.logDebug("Schedule", schedule)
+
+ for i, v in enumerate(schedule):
+ if v[3] == "X":
+ last, next = schedule[i - 1], schedule[(i + 1) % len(schedule)]
+ self.logDebug("Now/Last/Next", now, last, next)
+
+ self.setDownloadSpeed(last[3])
+
+ next_time = (((24 + next[0] - now[0]) * 60 + next[1] - now[1]) * 60 + next[2] - now[2]) % 86400
+ self.core.scheduler.removeJob(self.cb)
+ self.cb = self.core.scheduler.addJob(next_time, self.updateSchedule, threaded=False)
+
+
+ def setDownloadSpeed(self, speed):
+ if speed == 0:
+ abort = self.getConfig("abort")
+ self.logInfo(_("Stopping download server. (Running downloads will %sbe aborted.)") % '' if abort else _('not '))
+ self.core.api.pauseServer()
+ if abort:
+ self.core.api.stopAllDownloads()
+ else:
+ self.core.api.unpauseServer()
+
+ if speed > 0:
+ self.logInfo(_("Setting download speed to %d kB/s") % speed)
+ self.core.api.setConfigValue("download", "limit_speed", 1)
+ self.core.api.setConfigValue("download", "max_speed", speed)
+ else:
+ self.logInfo(_("Setting download speed to FULL"))
+ self.core.api.setConfigValue("download", "limit_speed", 0)
+ self.core.api.setConfigValue("download", "max_speed", -1)
diff --git a/pyload/plugins/addon/ExternalScripts.py b/pyload/plugins/addon/ExternalScripts.py
new file mode 100644
index 000000000..5aff0f11f
--- /dev/null
+++ b/pyload/plugins/addon/ExternalScripts.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+
+import subprocess
+
+from itertools import chain
+from os import listdir, access, X_OK, makedirs
+from os.path import join, exists, basename, abspath
+
+from pyload.plugins.internal.Addon import Addon
+from pyload.utils import safe_join
+
+
+class ExternalScripts(Addon):
+ __name__ = "ExternalScripts"
+ __type__ = "addon"
+ __version__ = "0.24"
+
+ __config__ = [("activated", "bool", "Activated", True)]
+
+ __description__ = """Run external scripts"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de"),
+ ("RaNaN", "ranan@pyload.org"),
+ ("spoob", "spoob@pyload.org"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ event_list = ["archive_extracted", "package_extracted", "all_archives_extracted", "all_archives_processed",
+ "allDownloadsFinished", "allDownloadsProcessed"]
+
+
+ def setup(self):
+ self.scripts = {}
+
+ folders = ["download_preparing", "download_finished", "all_downloads_finished", "all_downloads_processed",
+ "before_reconnect", "after_reconnect",
+ "package_finished", "package_extracted",
+ "archive_extracted", "all_archives_extracted", "all_archives_processed",
+ # deprecated folders
+ "unrar_finished", "all_dls_finished", "all_dls_processed"]
+
+ for folder in folders:
+ self.scripts[folder] = []
+
+ self.initPluginType(folder, join(pypath, 'scripts', folder))
+ self.initPluginType(folder, join('scripts', folder))
+
+ for script_type, names in self.scripts.iteritems():
+ if names:
+ self.logInfo(_("Installed scripts for"), script_type, ", ".join([basename(x) for x in names]))
+
+
+ def initPluginType(self, folder, path):
+ if not exists(path):
+ try:
+ makedirs(path)
+ except:
+ self.logDebug("Script folder %s not created" % folder)
+ return
+
+ for f in listdir(path):
+ if f.startswith("#") or f.startswith(".") or f.startswith("_") or f.endswith("~") or f.endswith(".swp"):
+ continue
+
+ if not access(join(path, f), X_OK):
+ self.logWarning(_("Script not executable:") + " %s/%s" % (folder, f))
+
+ self.scripts[folder].append(join(path, f))
+
+
+ def callScript(self, script, *args):
+ try:
+ cmd = [script] + [str(x) if not isinstance(x, basestring) else x for x in args]
+ self.logDebug("Executing", abspath(script), " ".join(cmd))
+ #output goes to pyload
+ subprocess.Popen(cmd, bufsize=-1)
+ except Exception, e:
+ self.logError(_("Error in %(script)s: %(error)s") % {"script": basename(script), "error": e})
+
+
+ def downloadPreparing(self, pyfile):
+ for script in self.scripts['download_preparing']:
+ self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.id)
+
+
+ def downloadFinished(self, pyfile):
+ download_folder = self.config['general']['download_folder']
+ for script in self.scripts['download_finished']:
+ filename = safe_join(download_folder, pyfile.package().folder, pyfile.name)
+ self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.name, filename, pyfile.id)
+
+
+ def packageFinished(self, pypack):
+ download_folder = self.config['general']['download_folder']
+ for script in self.scripts['package_finished']:
+ folder = safe_join(download_folder, pypack.folder)
+ self.callScript(script, pypack.name, folder, pypack.password, pypack.id)
+
+
+ def beforeReconnecting(self, ip):
+ for script in self.scripts['before_reconnect']:
+ self.callScript(script, ip)
+
+
+ def afterReconnecting(self, ip):
+ for script in self.scripts['after_reconnect']:
+ self.callScript(script, ip)
+
+
+ def archive_extracted(self, pyfile, folder, filename, files):
+ for script in self.scripts['archive_extracted']:
+ self.callScript(script, folder, filename, files)
+ for script in self.scripts['unrar_finished']: #: deprecated
+ self.callScript(script, folder, filename)
+
+
+ def package_extracted(self, pypack):
+ download_folder = self.config['general']['download_folder']
+ for script in self.scripts['package_extracted']:
+ folder = safe_join(download_folder, pypack.folder)
+ self.callScript(script, pypack.name, folder, pypack.password, pypack.id)
+
+
+ def all_archives_extracted(self):
+ for script in self.scripts['all_archives_extracted']:
+ self.callScript(script)
+
+
+ def all_archives_processed(self):
+ for script in self.scripts['all_archives_processed']:
+ self.callScript(script)
+
+
+ def allDownloadsFinished(self):
+ for script in chain(self.scripts['all_downloads_finished'], self.scripts['all_dls_finished']):
+ self.callScript(script)
+
+
+ def allDownloadsProcessed(self):
+ for script in chain(self.scripts['all_downloads_processed'], self.scripts['all_dls_processed']):
+ self.callScript(script)
diff --git a/pyload/plugins/addon/ExtractArchive.py b/pyload/plugins/addon/ExtractArchive.py
new file mode 100644
index 000000000..91f477cf8
--- /dev/null
+++ b/pyload/plugins/addon/ExtractArchive.py
@@ -0,0 +1,361 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+
+from copy import copy
+from os import remove, chmod, makedirs
+from os.path import exists, basename, isfile, isdir
+from traceback import print_exc
+
+# monkey patch bug in python 2.6 and lower
+# http://bugs.python.org/issue6122 , http://bugs.python.org/issue1236 , http://bugs.python.org/issue1731717
+if sys.version_info < (2, 7) and os.name != "nt":
+ import errno
+ from subprocess import Popen
+
+
+ def _eintr_retry_call(func, *args):
+ while True:
+ try:
+ return func(*args)
+ except OSError, e:
+ if e.errno == errno.EINTR:
+ continue
+ raise
+
+
+ # unsued timeout option for older python version
+ def wait(self, timeout=0):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ try:
+ pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
+ except OSError, e:
+ if e.errno != errno.ECHILD:
+ raise
+ # This happens if SIGCLD is set to be ignored or waiting
+ # for child processes has otherwise been disabled for our
+ # process. This child is dead, we can't get the status.
+ sts = 0
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+ Popen.wait = wait
+
+if os.name != "nt":
+ from grp import getgrnam
+ from os import chown
+ from pwd import getpwnam
+
+from pyload.plugins.internal.Addon import Addon, threaded, Expose
+from pyload.plugins.internal.AbstractExtractor import ArchiveError, CRCError, WrongPassword
+from pyload.utils import safe_join, fs_encode
+
+
+class ExtractArchive(Addon):
+ __name__ = "ExtractArchive"
+ __type__ = "addon"
+ __version__ = "0.17"
+
+ __config__ = [("activated" , "bool" , "Activated" , True ),
+ ("fullpath" , "bool" , "Extract full path" , True ),
+ ("overwrite" , "bool" , "Overwrite files" , True ),
+ ("passwordfile" , "file" , "password file" , "archive_password.txt"),
+ ("deletearchive", "bool" , "Delete archives when done" , False ),
+ ("subfolder" , "bool" , "Create subfolder for each package" , False ),
+ ("destination" , "folder", "Extract files to" , "" ),
+ ("excludefiles" , "str" , "Exclude files from unpacking (seperated by ;)", "" ),
+ ("recursive" , "bool" , "Extract archives in archvies" , True ),
+ ("queue" , "bool" , "Wait for all downloads to be finished" , True ),
+ ("renice" , "int" , "CPU Priority" , 0 )]
+
+ __description__ = """Extract different kind of archives"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "ranan@pyload.org"),
+ ("AndroKev", None),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ event_list = ["allDownloadsProcessed"]
+
+
+ def setup(self):
+ self.plugins = []
+ self.passwords = []
+ names = []
+
+ for p in ("UnRar", "UnZip"):
+ try:
+ module = self.core.pluginManager.loadModule("internal", p)
+ klass = getattr(module, p)
+ if klass.checkDeps():
+ names.append(p)
+ self.plugins.append(klass)
+
+ except OSError, e:
+ if e.errno == 2:
+ self.logInfo(_("No %s installed") % p)
+ else:
+ self.logWarning(_("Could not activate %s") % p, e)
+ if self.core.debug:
+ print_exc()
+
+ except Exception, e:
+ self.logWarning(_("Could not activate %s") % p, e)
+ if self.core.debug:
+ print_exc()
+
+ if names:
+ self.logInfo(_("Activated") + " " + " ".join(names))
+ else:
+ self.logInfo(_("No Extract plugins activated"))
+
+ # queue with package ids
+ self.queue = []
+
+
+ @Expose
+ def extractPackage(self, id):
+ """ Extract package with given id"""
+ self.manager.startThread(self.extract, [id])
+
+
+ def packageFinished(self, pypack):
+ pid = pypack.id
+ if self.getConfig("queue"):
+ self.logInfo(_("Package %s queued for later extracting") % pypack.name)
+ self.queue.append(pid)
+ else:
+ self.manager.startThread(self.extract, [pid])
+
+
+ @threaded
+ def allDownloadsProcessed(self, thread):
+ local = copy(self.queue)
+ del self.queue[:]
+ if self.extract(local, thread): #: check only if all gone fine, no failed reporting for now
+ self.manager.dispatchEvent("all_archives_extracted")
+ self.manager.dispatchEvent("all_archives_processed")
+
+
+ def extract(self, ids, thread=None):
+ processed = []
+ extracted = []
+ failed = []
+
+ destination = self.getConfig("destination")
+ subfolder = self.getConfig("subfolder")
+ fullpath = self.getConfig("fullpath")
+ overwrite = self.getConfig("overwrite")
+ excludefiles = self.getConfig("excludefiles")
+ renice = self.getConfig("renice")
+ recursive = self.getConfig("recursive")
+
+ # reload from txt file
+ self.reloadPasswords()
+
+ # dl folder
+ dl = self.config['general']['download_folder']
+
+ #iterate packages -> plugins -> targets
+ for pid in ids:
+ p = self.core.files.getPackage(pid)
+ self.logInfo(_("Check package %s") % p.name)
+ if not p:
+ continue
+
+ # determine output folder
+ out = safe_join(dl, p.folder, "")
+
+ out = safe_join(dl, p.folder, self.getConfig("destination"), "")
+ if subfolder:
+ out = safe_join(out, fs_encode(p.folder))
+
+ if not exists(out):
+ makedirs(out)
+
+ files_ids = [(safe_join(dl, p.folder, x['name']), x['id']) for x in p.getChildren().itervalues()]
+ matched = False
+ success = True
+
+ # check as long there are unseen files
+ while files_ids:
+ new_files_ids = []
+
+ for plugin in self.plugins:
+ targets = plugin.getTargets(files_ids)
+ if targets:
+ self.logDebug("Targets for %s: %s" % (plugin.__name__, targets))
+ matched = True
+ for target, fid in targets:
+ if target in processed:
+ self.logDebug(basename(target), "skipped")
+ continue
+
+ processed.append(target) # prevent extracting same file twice
+
+ self.logInfo(basename(target), _("Extract to %s") % out)
+ try:
+ klass = plugin(self, target, out, fullpath, overwrite, excludefiles, renice)
+ klass.init()
+ password = p.password.strip().splitlines()
+ new_files = self._extract(klass, fid, password, thread)
+ except Exception, e:
+ self.logError(basename(target), e)
+ success = False
+ continue
+
+ self.logDebug("Extracted", new_files)
+ self.setPermissions(new_files)
+
+ for file in new_files:
+ if not exists(file):
+ self.logDebug("New file %s does not exists" % file)
+ continue
+ if recursive and isfile(file):
+ new_files_ids.append((file, fid)) # append as new target
+
+ files_ids = new_files_ids # also check extracted files
+
+ if matched:
+ if success:
+ extracted.append(pid)
+ self.manager.dispatchEvent("package_extracted", p)
+ else:
+ failed.append(pid)
+ self.manager.dispatchEvent("package_extract_failed", p)
+ else:
+ self.logInfo(_("No files found to extract"))
+
+ return True if not failed else False
+
+
+ def _extract(self, plugin, fid, passwords, thread):
+ pyfile = self.core.files.getFile(fid)
+ deletearchive = self.getConfig("deletearchive")
+
+ pyfile.setCustomStatus(_("extracting"))
+ thread.addActive(pyfile) # keep this file until everything is done
+
+ try:
+ progress = lambda x: pyfile.setProgress(x)
+ success = False
+
+ if not plugin.checkArchive():
+ plugin.extract(progress)
+ success = True
+ else:
+ self.logInfo(basename(plugin.file), _("Password protected"))
+ self.logDebug("Passwords", passwords)
+
+ pwlist = copy(self.getPasswords())
+ # remove already supplied pws from list (only local)
+ for pw in passwords:
+ if pw in pwlist:
+ pwlist.remove(pw)
+
+ for pw in passwords + pwlist:
+ try:
+ self.logDebug("Try password", pw)
+ if plugin.checkPassword(pw):
+ plugin.extract(progress, pw)
+ self.addPassword(pw)
+ success = True
+ break
+ except WrongPassword:
+ self.logDebug("Password was wrong")
+
+ if not success:
+ raise Exception(_("Wrong password"))
+
+ if self.core.debug:
+ self.logDebug("Would delete", ", ".join(plugin.getDeleteFiles()))
+
+ if deletearchive:
+ files = plugin.getDeleteFiles()
+ self.logInfo(_("Deleting %s files") % len(files))
+ for f in files:
+ if exists(f):
+ remove(f)
+ else:
+ self.logDebug("%s does not exists" % f)
+
+ self.logInfo(basename(plugin.file), _("Extracting finished"))
+
+ extracted_files = plugin.getExtractedFiles()
+ self.manager.dispatchEvent("archive_extracted", pyfile, plugin.out, plugin.file, extracted_files)
+
+ return extracted_files
+
+ except ArchiveError, e:
+ self.logError(basename(plugin.file), _("Archive Error"), e)
+ except CRCError:
+ self.logError(basename(plugin.file), _("CRC Mismatch"))
+ except Exception, e:
+ if self.core.debug:
+ print_exc()
+ self.logError(basename(plugin.file), _("Unknown Error"), e)
+
+ self.manager.dispatchEvent("archive_extract_failed", pyfile)
+ raise Exception(_("Extract failed"))
+
+
+ @Expose
+ def getPasswords(self):
+ """ List of saved passwords """
+ return self.passwords
+
+
+ def reloadPasswords(self):
+ passwordfile = self.getConfig("passwordfile")
+
+ try:
+ passwords = []
+ with open(passwordfile, "a+") as f:
+ for pw in f.read().splitlines():
+ passwords.append(pw)
+
+ except IOError, e:
+ self.logError(e)
+
+ else:
+ self.passwords = passwords
+
+
+ @Expose
+ def addPassword(self, pw):
+ """ Adds a password to saved list"""
+ passwordfile = self.getConfig("passwordfile")
+
+ if pw in self.passwords:
+ self.passwords.remove(pw)
+
+ self.passwords.insert(0, pw)
+
+ try:
+ with open(passwordfile, "wb") as f:
+ for pw in self.passwords:
+ f.write(pw + "\n")
+ except IOError, e:
+ self.logError(e)
+
+
+ def setPermissions(self, files):
+ for f in files:
+ if not exists(f):
+ continue
+ try:
+ if self.config['permission']['change_file']:
+ if isfile(f):
+ chmod(f, int(self.config['permission']['file'], 8))
+ elif isdir(f):
+ chmod(f, int(self.config['permission']['folder'], 8))
+
+ if self.config['permission']['change_dl'] and os.name != "nt":
+ uid = getpwnam(self.config['permission']['user'])[2]
+ gid = getgrnam(self.config['permission']['group'])[2]
+ chown(f, uid, gid)
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed"), e)
diff --git a/pyload/plugins/addon/HotFolder.py b/pyload/plugins/addon/HotFolder.py
new file mode 100644
index 000000000..e91b9e04f
--- /dev/null
+++ b/pyload/plugins/addon/HotFolder.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+import time
+
+from os import listdir, makedirs
+from os.path import exists, isfile, join
+from shutil import move
+
+from pyload.plugins.internal.Addon import Addon
+from pyload.utils import fs_encode, safe_join
+
+
+class HotFolder(Addon):
+ __name__ = "HotFolder"
+ __type__ = "addon"
+ __version__ = "0.11"
+
+ __config__ = [("activated" , "bool", "Activated" , False ),
+ ("folder" , "str" , "Folder to observe" , "container"),
+ ("watch_file", "bool", "Observe link file" , False ),
+ ("keep" , "bool", "Keep added containers", True ),
+ ("file" , "str" , "Link file" , "links.txt")]
+
+ __description__ = """Observe folder and file for changes and add container and links"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.de")]
+
+
+ def setup(self):
+ self.interval = 10
+
+
+ def periodical(self):
+ folder = fs_encode(self.getConfig("folder"))
+
+ try:
+ if not exists(join(folder, "finished")):
+ makedirs(join(folder, "finished"))
+
+ if self.getConfig("watch_file"):
+ with open(fs_encode(self.getConfig("file")), "a+") as f:
+ content = f.read().strip()
+
+ if content:
+ name = "%s_%s.txt" % (self.getConfig("file"), time.strftime("%H-%M-%S_%d%b%Y"))
+
+ with open(safe_join(folder, "finished", name), "wb") as f:
+ f.write(content)
+
+ self.core.api.addPackage(f.name, [f.name], 1)
+
+ for f in listdir(folder):
+ path = join(folder, f)
+
+ if not isfile(path) or f.endswith("~") or f.startswith("#") or f.startswith("."):
+ continue
+
+ newpath = join(folder, "finished", f if self.getConfig("keep") else "tmp_" + f)
+ move(path, newpath)
+
+ self.logInfo(_("Added %s from HotFolder") % f)
+ self.core.api.addPackage(f, [newpath], 1)
+
+ except IOError, e:
+ self.logError(e)
diff --git a/pyload/plugins/addon/IRCInterface.py b/pyload/plugins/addon/IRCInterface.py
new file mode 100644
index 000000000..a89efcd0c
--- /dev/null
+++ b/pyload/plugins/addon/IRCInterface.py
@@ -0,0 +1,432 @@
+# -*- coding: utf-8 -*-
+
+import re
+import socket
+import ssl
+import time
+
+from pycurl import FORM_FILE
+from select import select
+from threading import Thread
+from time import sleep
+from traceback import print_exc
+
+from pyload.api import PackageDoesNotExists, FileDoesNotExists
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Addon import Addon
+from pyload.utils import formatSize
+
+
+class IRCInterface(Thread, Addon):
+ __name__ = "IRCInterface"
+ __type__ = "addon"
+ __version__ = "0.12"
+
+ __config__ = [("activated", "bool", "Activated", False),
+ ("host" , "str" , "IRC-Server Address" , "Enter your server here!"),
+ ("port" , "int" , "IRC-Server Port" , 6667 ),
+ ("ident" , "str" , "Clients ident" , "pyload-irc" ),
+ ("realname" , "str" , "Realname" , "pyload-irc" ),
+ ("ssl" , "bool", "Use SSL" , False ),
+ ("nick" , "str" , "Nickname the Client will take" , "pyLoad-IRC" ),
+ ("owner" , "str" , "Nickname the Client will accept commands from", "Enter your nick here!" ),
+ ("info_file", "bool", "Inform about every file finished" , False ),
+ ("info_pack", "bool", "Inform about every package finished" , True ),
+ ("captcha" , "bool", "Send captcha requests" , True )]
+
+ __description__ = """Connect to irc and let owner perform different tasks"""
+ __license__ = "GPLv3"
+ __authors__ = [("Jeix", "Jeix@hasnomail.com")]
+
+
+ def __init__(self, core, manager):
+ Thread.__init__(self)
+ Addon.__init__(self, core, manager)
+ self.setDaemon(True)
+
+
+ def coreReady(self):
+ self.abort = False
+ self.more = []
+ self.new_package = {}
+
+ self.start()
+
+
+ def packageFinished(self, pypack):
+ try:
+ if self.getConfig("info_pack"):
+ self.response(_("Package finished: %s") % pypack.name)
+ except:
+ pass
+
+
+ def downloadFinished(self, pyfile):
+ try:
+ if self.getConfig("info_file"):
+ self.response(
+ _("Download finished: %(name)s @ %(plugin)s ") % {"name": pyfile.name, "plugin": pyfile.pluginname})
+ except:
+ pass
+
+
+ def newCaptchaTask(self, task):
+ if self.getConfig("captcha") and task.isTextual():
+ task.handler.append(self)
+ task.setWaiting(60)
+
+ page = getURL("http://www.freeimagehosting.net/upload.php",
+ post={"attached": (FORM_FILE, task.captchaFile)}, multipart=True)
+
+ url = re.search(r"\[img\]([^\[]+)\[/img\]\[/url\]", page).group(1)
+ self.response(_("New Captcha Request: %s") % url)
+ self.response(_("Answer with 'c %s text on the captcha'") % task.id)
+
+
+ def run(self):
+ # connect to IRC etc.
+ self.sock = socket.socket()
+ host = self.getConfig("host")
+ self.sock.connect((host, self.getConfig("port")))
+
+ if self.getConfig("ssl"):
+ self.sock = ssl.wrap_socket(self.sock, cert_reqs=ssl.CERT_NONE) #@TODO: support custom certificate
+
+ nick = self.getConfig("nick")
+ self.sock.send("NICK %s\r\n" % nick)
+ self.sock.send("USER %s %s bla :%s\r\n" % (nick, host, nick))
+ for t in self.getConfig("owner").split():
+ if t.strip().startswith("#"):
+ self.sock.send("JOIN %s\r\n" % t.strip())
+ self.logInfo(_("Connected to"), host)
+ self.logInfo(_("Switching to listening mode!"))
+ try:
+ self.main_loop()
+
+ except IRCError, ex:
+ self.sock.send("QUIT :byebye\r\n")
+ print_exc()
+ self.sock.close()
+
+
+ def main_loop(self):
+ readbuffer = ""
+ while True:
+ sleep(1)
+ fdset = select([self.sock], [], [], 0)
+ if self.sock not in fdset[0]:
+ continue
+
+ if self.abort:
+ raise IRCError("quit")
+
+ readbuffer += self.sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ line = line.rstrip()
+ first = line.split()
+
+ if first[0] == "PING":
+ self.sock.send("PONG %s\r\n" % first[1])
+
+ if first[0] == "ERROR":
+ raise IRCError(line)
+
+ msg = line.split(None, 3)
+ if len(msg) < 4:
+ continue
+
+ msg = {
+ "origin": msg[0][1:],
+ "action": msg[1],
+ "target": msg[2],
+ "text": msg[3][1:]
+ }
+
+ self.handle_events(msg)
+
+
+ def handle_events(self, msg):
+ if not msg['origin'].split("!", 1)[0] in self.getConfig("owner").split():
+ return
+
+ if msg['target'].split("!", 1)[0] != self.getConfig("nick"):
+ return
+
+ if msg['action'] != "PRIVMSG":
+ return
+
+ # HANDLE CTCP ANTI FLOOD/BOT PROTECTION
+ if msg['text'] == "\x01VERSION\x01":
+ self.logDebug("Sending CTCP VERSION")
+ self.sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
+ return
+ elif msg['text'] == "\x01TIME\x01":
+ self.logDebug("Sending CTCP TIME")
+ self.sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
+ return
+ elif msg['text'] == "\x01LAG\x01":
+ self.logDebug("Received CTCP LAG") #: don't know how to answer
+ return
+
+ trigger = "pass"
+ args = None
+
+ try:
+ temp = msg['text'].split()
+ trigger = temp[0]
+ if len(temp) > 1:
+ args = temp[1:]
+ except:
+ pass
+
+ handler = getattr(self, "event_%s" % trigger, self.event_pass)
+ try:
+ res = handler(args)
+ for line in res:
+ self.response(line, msg['origin'])
+ except Exception, e:
+ self.logError(e)
+
+
+ def response(self, msg, origin=""):
+ if origin == "":
+ for t in self.getConfig("owner").split():
+ self.sock.send("PRIVMSG %s :%s\r\n" % (t.strip(), msg))
+ else:
+ self.sock.send("PRIVMSG %s :%s\r\n" % (origin.split("!", 1)[0], msg))
+
+
+ #### Events
+
+ def event_pass(self, args):
+ return []
+
+
+ def event_status(self, args):
+ downloads = self.core.api.statusDownloads()
+ if not downloads:
+ return ["INFO: There are no active downloads currently."]
+
+ temp_progress = ""
+ lines = ["ID - Name - Status - Speed - ETA - Progress"]
+ for data in downloads:
+
+ if data.status == 5:
+ temp_progress = data.format_wait
+ else:
+ temp_progress = "%d%% (%s)" % (data.percent, data.format_size)
+
+ lines.append("#%d - %s - %s - %s - %s - %s" %
+ (
+ data.fid,
+ data.name,
+ data.statusmsg,
+ "%s/s" % formatSize(data.speed),
+ "%s" % data.format_eta,
+ temp_progress
+ ))
+ return lines
+
+
+ def event_queue(self, args):
+ ps = self.core.api.getQueueData()
+
+ if not ps:
+ return ["INFO: There are no packages in queue."]
+
+ lines = []
+ for pack in ps:
+ lines.append('PACKAGE #%s: "%s" with %d links.' % (pack.pid, pack.name, len(pack.links)))
+
+ return lines
+
+
+ def event_collector(self, args):
+ ps = self.core.api.getCollectorData()
+ if not ps:
+ return ["INFO: No packages in collector!"]
+
+ lines = []
+ for pack in ps:
+ lines.append('PACKAGE #%s: "%s" with %d links.' % (pack.pid, pack.name, len(pack.links)))
+
+ return lines
+
+
+ def event_info(self, args):
+ if not args:
+ return ["ERROR: Use info like this: info <id>"]
+
+ info = None
+ try:
+ info = self.core.api.getFileData(int(args[0]))
+
+ except FileDoesNotExists:
+ return ["ERROR: Link doesn't exists."]
+
+ return ['LINK #%s: %s (%s) [%s][%s]' % (info.fid, info.name, info.format_size, info.statusmsg, info.plugin)]
+
+
+ def event_packinfo(self, args):
+ if not args:
+ return ["ERROR: Use packinfo like this: packinfo <id>"]
+
+ lines = []
+ pack = None
+ try:
+ pack = self.core.api.getPackageData(int(args[0]))
+
+ except PackageDoesNotExists:
+ return ["ERROR: Package doesn't exists."]
+
+ id = args[0]
+
+ self.more = []
+
+ lines.append('PACKAGE #%s: "%s" with %d links' % (id, pack.name, len(pack.links)))
+ for pyfile in pack.links:
+ self.more.append('LINK #%s: %s (%s) [%s][%s]' % (pyfile.fid, pyfile.name, pyfile.format_size,
+ pyfile.statusmsg, pyfile.plugin))
+
+ if len(self.more) < 6:
+ lines.extend(self.more)
+ self.more = []
+ else:
+ lines.extend(self.more[:6])
+ self.more = self.more[6:]
+ lines.append("%d more links do display." % len(self.more))
+
+ return lines
+
+
+ def event_more(self, args):
+ if not self.more:
+ return ["No more information to display."]
+
+ lines = self.more[:6]
+ self.more = self.more[6:]
+ lines.append("%d more links do display." % len(self.more))
+
+ return lines
+
+
+ def event_start(self, args):
+ self.core.api.unpauseServer()
+ return ["INFO: Starting downloads."]
+
+
+ def event_stop(self, args):
+ self.core.api.pauseServer()
+ return ["INFO: No new downloads will be started."]
+
+
+ def event_add(self, args):
+ if len(args) < 2:
+ return ['ERROR: Add links like this: "add <packagename|id> links". ',
+ "This will add the link <link> to to the package <package> / the package with id <id>!"]
+
+ pack = args[0].strip()
+ links = [x.strip() for x in args[1:]]
+
+ count_added = 0
+ count_failed = 0
+ try:
+ id = int(pack)
+ pack = self.core.api.getPackageData(id)
+ if not pack:
+ return ["ERROR: Package doesn't exists."]
+
+ #TODO add links
+
+ return ["INFO: Added %d links to Package %s [#%d]" % (len(links), pack['name'], id)]
+
+ except:
+ # create new package
+ id = self.core.api.addPackage(pack, links, 1)
+ return ["INFO: Created new Package %s [#%d] with %d links." % (pack, id, len(links))]
+
+
+ def event_del(self, args):
+ if len(args) < 2:
+ return ["ERROR: Use del command like this: del -p|-l <id> [...] (-p indicates that the ids are from packages, -l indicates that the ids are from links)"]
+
+ if args[0] == "-p":
+ ret = self.core.api.deletePackages(map(int, args[1:]))
+ return ["INFO: Deleted %d packages!" % len(args[1:])]
+
+ elif args[0] == "-l":
+ ret = self.core.api.delLinks(map(int, args[1:]))
+ return ["INFO: Deleted %d links!" % len(args[1:])]
+
+ else:
+ return ["ERROR: Use del command like this: del <-p|-l> <id> [...] (-p indicates that the ids are from packages, -l indicates that the ids are from links)"]
+
+
+ def event_push(self, args):
+ if not args:
+ return ["ERROR: Push package to queue like this: push <package id>"]
+
+ id = int(args[0])
+ try:
+ info = self.core.api.getPackageInfo(id)
+ except PackageDoesNotExists:
+ return ["ERROR: Package #%d does not exist." % id]
+
+ self.core.api.pushToQueue(id)
+ return ["INFO: Pushed package #%d to queue." % id]
+
+
+ def event_pull(self, args):
+ if not args:
+ return ["ERROR: Pull package from queue like this: pull <package id>."]
+
+ id = int(args[0])
+ if not self.core.api.getPackageData(id):
+ return ["ERROR: Package #%d does not exist." % id]
+
+ self.core.api.pullFromQueue(id)
+ return ["INFO: Pulled package #%d from queue to collector." % id]
+
+
+ def event_c(self, args):
+ """ captcha answer """
+ if not args:
+ return ["ERROR: Captcha ID missing."]
+
+ task = self.core.captchaManager.getTaskByID(args[0])
+ if not task:
+ return ["ERROR: Captcha Task with ID %s does not exists." % args[0]]
+
+ task.setResult(" ".join(args[1:]))
+ return ["INFO: Result %s saved." % " ".join(args[1:])]
+
+
+ def event_help(self, args):
+ lines = ["The following commands are available:",
+ "add <package|packid> <links> [...] Adds link to package. (creates new package if it does not exist)",
+ "queue Shows all packages in the queue",
+ "collector Shows all packages in collector",
+ "del -p|-l <id> [...] Deletes all packages|links with the ids specified",
+ "info <id> Shows info of the link with id <id>",
+ "packinfo <id> Shows info of the package with id <id>",
+ "more Shows more info when the result was truncated",
+ "start Starts all downloads",
+ "stop Stops the download (but not abort active downloads)",
+ "push <id> Push package to queue",
+ "pull <id> Pull package from queue",
+ "status Show general download status",
+ "help Shows this help message"]
+ return lines
+
+
+class IRCError(Exception):
+
+ def __init__(self, value):
+ self.value = value
+
+
+ def __str__(self):
+ return repr(self.value)
diff --git a/pyload/plugins/addon/MergeFiles.py b/pyload/plugins/addon/MergeFiles.py
new file mode 100644
index 000000000..2483135f4
--- /dev/null
+++ b/pyload/plugins/addon/MergeFiles.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+
+from traceback import print_exc
+
+from pyload.plugins.internal.Addon import Addon, threaded
+from pyload.utils import safe_join, fs_encode
+
+
+class MergeFiles(Addon):
+ __name__ = "MergeFiles"
+ __type__ = "addon"
+ __version__ = "0.12"
+
+ __config__ = [("activated", "bool", "Activated", True)]
+
+ __description__ = """Merges parts splitted with hjsplit"""
+ __license__ = "GPLv3"
+ __authors__ = [("and9000", "me@has-no-mail.com")]
+
+
+ BUFFER_SIZE = 4096
+
+
+ def setup(self):
+ # nothing to do
+ pass
+
+
+ @threaded
+ def packageFinished(self, pack):
+ files = {}
+ fid_dict = {}
+ for fid, data in pack.getChildren().iteritems():
+ if re.search("\.\d{3}$", data['name']):
+ if data['name'][:-4] not in files:
+ files[data['name'][:-4]] = []
+ files[data['name'][:-4]].append(data['name'])
+ files[data['name'][:-4]].sort()
+ fid_dict[data['name']] = fid
+
+ download_folder = self.config['general']['download_folder']
+
+ if self.config['general']['folder_per_package']:
+ download_folder = safe_join(download_folder, pack.folder)
+
+ for name, file_list in files.iteritems():
+ self.logInfo(_("Starting merging of"), name)
+ final_file = open(safe_join(download_folder, name), "wb")
+
+ for splitted_file in file_list:
+ self.logDebug("Merging part", splitted_file)
+ pyfile = self.core.files.getFile(fid_dict[splitted_file])
+ pyfile.setStatus("processing")
+ try:
+ s_file = open(os.path.join(download_folder, splitted_file), "rb")
+ size_written = 0
+ s_file_size = int(os.path.getsize(os.path.join(download_folder, splitted_file)))
+
+ while True:
+ f_buffer = s_file.read(self.BUFFER_SIZE)
+ if f_buffer:
+ final_file.write(f_buffer)
+ size_written += self.BUFFER_SIZE
+ pyfile.setProgress((size_written * 100) / s_file_size)
+ else:
+ break
+
+ s_file.close()
+ self.logDebug("Finished merging part", splitted_file)
+
+ except Exception, e:
+ print_exc()
+
+ finally:
+ pyfile.setProgress(100)
+ pyfile.setStatus("finished")
+ pyfile.release()
+
+ final_file.close()
+ self.logInfo(_("Finished merging of"), name)
diff --git a/pyload/plugins/addon/MultiHome.py b/pyload/plugins/addon/MultiHome.py
new file mode 100644
index 000000000..4371125dc
--- /dev/null
+++ b/pyload/plugins/addon/MultiHome.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.plugins.internal.Addon import Addon
+
+
+class MultiHome(Addon):
+ __name__ = "MultiHome"
+ __type__ = "addon"
+ __version__ = "0.11"
+
+ __config__ = [("activated" , "bool", "Activated" , False ),
+ ("interfaces", "str" , "Interfaces", "None")]
+
+ __description__ = """Ip address changer"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ def setup(self):
+ self.register = {}
+ self.interfaces = []
+ self.parseInterfaces(self.getConfig("interfaces").split(";"))
+ if not self.interfaces:
+ self.parseInterfaces([self.config['download']['interface']])
+ self.setConfig("interfaces", self.toConfig())
+
+
+ def toConfig(self):
+ return ";".join([i.adress for i in self.interfaces])
+
+
+ def parseInterfaces(self, interfaces):
+ for interface in interfaces:
+ if not interface or str(interface).lower() == "none":
+ continue
+ self.interfaces.append(Interface(interface))
+
+
+ def coreReady(self):
+ requestFactory = self.core.requestFactory
+ oldGetRequest = requestFactory.getRequest
+
+ def getRequest(pluginName, account=None):
+ iface = self.bestInterface(pluginName, account)
+ if iface:
+ iface.useFor(pluginName, account)
+ requestFactory.iface = lambda: iface.adress
+ self.logDebug("Using address", iface.adress)
+ return oldGetRequest(pluginName, account)
+
+ requestFactory.getRequest = getRequest
+
+
+ def bestInterface(self, pluginName, account):
+ best = None
+ for interface in self.interfaces:
+ if not best or interface.lastPluginAccess(pluginName, account) < best.lastPluginAccess(pluginName, account):
+ best = interface
+ return best
+
+
+class Interface(object):
+
+ def __init__(self, adress):
+ self.adress = adress
+ self.history = {}
+
+
+ def lastPluginAccess(self, pluginName, account):
+ if (pluginName, account) in self.history:
+ return self.history[(pluginName, account)]
+ return 0
+
+
+ def useFor(self, pluginName, account):
+ self.history[(pluginName, account)] = time()
+
+
+ def __repr__(self):
+ return "<Interface - %s>" % self.adress
diff --git a/pyload/plugins/addon/RestartFailed.py b/pyload/plugins/addon/RestartFailed.py
new file mode 100644
index 000000000..861223f3d
--- /dev/null
+++ b/pyload/plugins/addon/RestartFailed.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Addon import Addon
+
+
+class RestartFailed(Addon):
+ __name__ = "RestartFailed"
+ __type__ = "addon"
+ __version__ = "1.57"
+
+ __config__ = [("activated", "bool", "Activated" , True),
+ ("interval" , "int" , "Check interval in minutes", 90 )]
+
+ __description__ = """Periodically restart all failed downloads in queue"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ # event_list = ["pluginConfigChanged"]
+
+ MIN_INTERVAL = 15 * 60 #: 15m minimum check interval (value is in seconds)
+
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == "interval":
+ interval = value * 60
+ if self.MIN_INTERVAL <= interval != self.interval:
+ self.core.scheduler.removeJob(self.cb)
+ self.interval = interval
+ self.initPeriodical()
+ else:
+ self.logDebug("Invalid interval value, kept current")
+
+
+ def periodical(self):
+ self.logDebug(_("Restart failed downloads"))
+ self.core.api.restartFailed()
+
+
+ def setup(self):
+ self.interval = self.MIN_INTERVAL
+
+
+ def coreReady(self):
+ self.pluginConfigChanged(self.__name__, "interval", self.getConfig("interval"))
diff --git a/pyload/plugins/addon/UnSkipOnFail.py b/pyload/plugins/addon/UnSkipOnFail.py
new file mode 100644
index 000000000..0bccca75f
--- /dev/null
+++ b/pyload/plugins/addon/UnSkipOnFail.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+from os.path import basename
+
+from pyload.datatype.PyFile import PyFile
+from pyload.plugins.internal.Addon import Addon
+from pyload.utils import fs_encode
+
+
+class UnSkipOnFail(Addon):
+ __name__ = "UnSkipOnFail"
+ __type__ = "addon"
+ __version__ = "0.01"
+
+ __config__ = [("activated", "bool", "Activated", True)]
+
+ __description__ = """When a download fails, restart skipped duplicates"""
+ __license__ = "GPLv3"
+ __authors__ = [("hagg", None)]
+
+
+ def downloadFailed(self, pyfile):
+ pyfile_name = basename(pyfile.name)
+ pid = pyfile.package().id
+ msg = _('look for skipped duplicates for %s (pid:%s)')
+ self.logInfo(msg % (pyfile_name, pid))
+ dups = self.findDuplicates(pyfile)
+ for link in dups:
+ # check if link is "skipped"(=4)
+ if link.status == 4:
+ lpid = link.packageID
+ self.logInfo(_('restart "%s" (pid:%s)') % (pyfile_name, lpid))
+ self.setLinkStatus(link, "queued")
+
+
+ def findDuplicates(self, pyfile):
+ """ Search all packages for duplicate links to "pyfile".
+ Duplicates are links that would overwrite "pyfile".
+ To test on duplicity the package-folder and link-name
+ of twolinks are compared (basename(link.name)).
+ So this method returns a list of all links with equal
+ package-folders and filenames as "pyfile", but except
+ the data for "pyfile" iotselöf.
+ It does MOT check the link's status.
+ """
+ dups = []
+ pyfile_name = fs_encode(basename(pyfile.name))
+ # get packages (w/o files, as most file data is useless here)
+ queue = self.core.api.getQueue()
+ for package in queue:
+ # check if package-folder equals pyfile's package folder
+ if fs_encode(package.folder) == fs_encode(pyfile.package().folder):
+ # now get packaged data w/ files/links
+ pdata = self.core.api.getPackageData(package.pid)
+ if pdata.links:
+ for link in pdata.links:
+ link_name = fs_encode(basename(link.name))
+ # check if link name collides with pdata's name
+ if link_name == pyfile_name:
+ # at last check if it is not pyfile itself
+ if link.fid != pyfile.id:
+ dups.append(link)
+ return dups
+
+
+ def setLinkStatus(self, link, new_status):
+ """ Change status of "link" to "new_status".
+ "link" has to be a valid FileData object,
+ "new_status" has to be a valid status name
+ (i.e. "queued" for this Plugin)
+ It creates a temporary PyFile object using
+ "link" data, changes its status, and tells
+ the core.files-manager to save its data.
+ """
+ pyfile = PyFile(self.core.files,
+ link.fid,
+ link.url,
+ link.name,
+ link.size,
+ link.status,
+ link.error,
+ link.plugin,
+ link.packageID,
+ link.order)
+ pyfile.setStatus(new_status)
+ self.core.files.save()
+ pyfile.release()
diff --git a/pyload/plugins/addon/UpdateManager.py b/pyload/plugins/addon/UpdateManager.py
new file mode 100644
index 000000000..082721e2f
--- /dev/null
+++ b/pyload/plugins/addon/UpdateManager.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+
+import re
+import sys
+
+from operator import itemgetter
+from os import path, remove, stat
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Addon import Expose, Addon, threaded
+from pyload.utils import safe_join
+
+
+class UpdateManager(Addon):
+ __name__ = "UpdateManager"
+ __type__ = "addon"
+ __version__ = "0.40"
+
+ __config__ = [("activated" , "bool" , "Activated" , True ),
+ ("mode" , "pyLoad + plugins;plugins only", "Check updates for" , "pyLoad + plugins"),
+ ("interval" , "int" , "Check interval in hours" , 8 ),
+ ("reloadplugins", "bool" , "Monitor plugins for code changes (debug mode only)", True ),
+ ("nodebugupdate", "bool" , "Don't check for updates in debug mode" , True )]
+
+ __description__ = """Check for updates"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ # event_list = ["pluginConfigChanged"]
+
+ SERVER_URL = "http://updatemanager.pyload.org"
+ MIN_INTERVAL = 6 * 60 * 60 #: 6h minimum check interval (value is in seconds)
+
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == "interval":
+ interval = value * 60 * 60
+ if self.MIN_INTERVAL <= interval != self.interval:
+ self.core.scheduler.removeJob(self.cb)
+ self.interval = interval
+ self.initPeriodical()
+ else:
+ self.logDebug("Invalid interval value, kept current")
+
+ elif name == "reloadplugins":
+ if self.cb2:
+ self.core.scheduler.removeJob(self.cb2)
+ if value is True and self.core.debug:
+ self.periodical2()
+
+
+ def coreReady(self):
+ self.pluginConfigChanged(self.__name__, "interval", self.getConfig("interval"))
+ x = lambda: self.pluginConfigChanged(self.__name__, "reloadplugins", self.getConfig("reloadplugins"))
+ self.core.scheduler.addJob(10, x, threaded=False)
+
+
+ def unload(self):
+ self.pluginConfigChanged(self.__name__, "reloadplugins", False)
+
+
+ def setup(self):
+ self.cb2 = None
+ self.interval = self.MIN_INTERVAL
+ self.updating = False
+ self.info = {'pyload': False, 'version': None, 'plugins': False}
+ self.mtimes = {} #: store modification time for each plugin
+
+
+ def periodical2(self):
+ if not self.updating:
+ self.autoreloadPlugins()
+
+ self.cb2 = self.core.scheduler.addJob(4, self.periodical2, threaded=False)
+
+
+ @Expose
+ def autoreloadPlugins(self):
+ """ reload and reindex all modified plugins """
+ modules = filter(
+ lambda m: m and (m.__name__.startswith("pyload.plugins.") or
+ m.__name__.startswith("userplugins.")) and
+ m.__name__.count(".") >= 2, sys.modules.itervalues()
+ )
+
+ reloads = []
+
+ for m in modules:
+ root, type, name = m.__name__.rsplit(".", 2)
+ id = (type, name)
+ if type in self.core.pluginManager.plugins:
+ f = m.__file__.replace(".pyc", ".py")
+ if not path.isfile(f):
+ continue
+
+ mtime = stat(f).st_mtime
+
+ if id not in self.mtimes:
+ self.mtimes[id] = mtime
+ elif self.mtimes[id] < mtime:
+ reloads.append(id)
+ self.mtimes[id] = mtime
+
+ return True if self.core.pluginManager.reloadPlugins(reloads) else False
+
+
+ def periodical(self):
+ if self.info['pyload'] or self.getConfig("nodebugupdate") and self.core.debug:
+ return
+
+ self.updateThread()
+
+
+ def server_request(self):
+ try:
+ return getURL(self.SERVER_URL, get={'v': self.core.api.getServerVersion()}).splitlines()
+ except:
+ self.logWarning(_("Unable to contact server to get updates"))
+
+
+ @threaded
+ def updateThread(self):
+ self.updating = True
+
+ status = self.update(onlyplugin=self.getConfig("mode") == "plugins only")
+
+ if status == 2:
+ self.core.api.restart()
+ else:
+ self.updating = False
+
+
+ @Expose
+ def updatePlugins(self):
+ """ simple wrapper for calling plugin update quickly """
+ return self.update(onlyplugin=True)
+
+
+ @Expose
+ def update(self, onlyplugin=False):
+ """ check for updates """
+ data = self.server_request()
+
+ if not data:
+ exitcode = 0
+
+ elif data[0] == "None":
+ self.logInfo(_("No new pyLoad version available"))
+ updates = data[1:]
+ exitcode = self._updatePlugins(updates)
+
+ elif onlyplugin:
+ exitcode = 0
+
+ else:
+ newversion = data[0]
+ self.logInfo(_("*** New pyLoad Version %s available ***") % newversion)
+ self.logInfo(_("*** Get it here: https://github.com/pyload/pyload/releases ***"))
+ exitcode = 3
+ self.info['pyload'] = True
+ self.info['version'] = newversion
+
+ return exitcode #: 0 = No plugins updated; 1 = Plugins updated; 2 = Plugins updated, but restart required; 3 = No plugins updated, new pyLoad version available
+
+
+ def _updatePlugins(self, updates):
+ """ check for plugin updates """
+
+ if self.info['plugins']:
+ return False #: plugins were already updated
+
+ exitcode = 0
+ updated = []
+
+ vre = re.compile(r'__version__.*=.*("|\')([\d.]+)')
+ url = updates[0]
+ schema = updates[1].split('|')
+
+ if "BLACKLIST" in updates:
+ blacklist = updates[updates.index('BLACKLIST') + 1:]
+ updates = updates[2:updates.index('BLACKLIST')]
+ else:
+ blacklist = None
+ updates = updates[2:]
+
+ upgradable = sorted(map(lambda x: dict(zip(schema, x.split('|'))), updates),
+ key=itemgetter("type", "name"))
+
+ for plugin in upgradable:
+ filename = plugin['name']
+ type = plugin['type']
+ version = plugin['version']
+
+ if filename.endswith(".pyc"):
+ name = filename[:filename.find("_")]
+ else:
+ name = filename.replace(".py", "")
+
+ plugins = getattr(self.core.pluginManager, "%sPlugins" % type)
+
+ oldver = float(plugins[name]['version']) if name in plugins else None
+ newver = float(version)
+
+ if not oldver:
+ msg = "New plugin: [%(type)s] %(name)s (v%(newver).2f)"
+ elif newver > oldver:
+ msg = "New version of plugin: [%(type)s] %(name)s (v%(oldver).2f -> v%(newver).2f)"
+ else:
+ continue
+
+ self.logInfo(_(msg) % {'type' : type,
+ 'name' : name,
+ 'oldver': oldver,
+ 'newver': newver})
+ try:
+ content = getURL(url % plugin)
+ m = vre.search(content)
+
+ if m and m.group(2) == version:
+ f = open(safe_join("userplugins", prefix, filename), "wb")
+ f.write(content)
+ f.close()
+ updated.append((prefix, name))
+ else:
+ raise Exception, _("Version mismatch")
+
+ except Exception, e:
+ self.logError(_("Error updating plugin %s") % filename, e)
+
+ if blacklist:
+ blacklisted = map(lambda x: (x.split('|')[0], x.split('|')[1].rsplit('.', 1)[0]), blacklist)
+
+ # Always protect internal plugins from removing
+ for i, n, t in blacklisted.enumerate():
+ if t == "internal":
+ del blacklisted[i]
+
+ blacklisted = sorted(blacklisted)
+ removed = self.removePlugins(blacklisted)
+ for t, n in removed:
+ self.logInfo(_("Removed blacklisted plugin [%(type)s] %(name)s") % {
+ 'type': t,
+ 'name': n,
+ })
+
+ if updated:
+ reloaded = self.core.pluginManager.reloadPlugins(updated)
+ if reloaded:
+ self.logInfo(_("Plugins updated and reloaded"))
+ exitcode = 1
+ else:
+ self.logInfo(_("*** Plugins have been updated, but need a pyLoad restart to be reloaded ***"))
+ self.info['plugins'] = True
+ exitcode = 2
+ else:
+ self.logInfo(_("No plugin updates available"))
+
+ return exitcode #: 0 = No plugins updated; 1 = Plugins updated; 2 = Plugins updated, but restart required
+
+
+ @Expose
+ def removePlugins(self, type_plugins):
+ """ delete plugins from disk """
+
+ if not type_plugins:
+ return
+
+ self.logDebug("Requested deletion of plugins: %s" % type_plugins)
+
+ removed = []
+
+ for type, name in type_plugins:
+ err = False
+ file = name + ".py"
+
+ for root in ("userplugins", path.join(pypath, "pyload", "plugins")):
+
+ filename = safe_join(root, type, file)
+ try:
+ remove(filename)
+ except Exception, e:
+ self.logDebug("Error deleting: %s" % path.basename(filename), e)
+ err = True
+
+ filename += "c"
+ if path.isfile(filename):
+ try:
+ if type == "addon":
+ self.manager.deactivateAddon(name)
+ remove(filename)
+ except Exception, e:
+ self.logDebug("Error deleting: %s" % path.basename(filename), e)
+ err = True
+
+ if not err:
+ id = (type, name)
+ removed.append(id)
+
+ return removed #: return a list of the plugins successfully removed
diff --git a/pyload/plugins/addon/WindowsPhoneToastNotify.py b/pyload/plugins/addon/WindowsPhoneToastNotify.py
new file mode 100644
index 000000000..5b62c49a8
--- /dev/null
+++ b/pyload/plugins/addon/WindowsPhoneToastNotify.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+import httplib
+import time
+
+from pyload.plugins.internal.Addon import Addon
+
+
+class WindowsPhoneToastNotify(Addon):
+ __name__ = "WindowsPhoneToastNotify"
+ __type__ = "addon"
+ __version__ = "0.02"
+
+ __config__ = [("activated" , "bool", "Activated" , False),
+ ("force" , "bool", "Force even if client is connected" , False),
+ ("pushId" , "str" , "pushId" , "" ),
+ ("pushUrl" , "str" , "pushUrl" , "" ),
+ ("pushTimeout", "int" , "Timeout between notifications in seconds", 0 )]
+
+ __description__ = """Send push notifications to Windows Phone"""
+ __license__ = "GPLv3"
+ __authors__ = [("Andy Voigt", "phone-support@hotmail.de")]
+
+
+ def getXmlData(self):
+ myxml = ("<?xml version='1.0' encoding='utf-8'?> <wp:Notification xmlns:wp='WPNotification'> "
+ "<wp:Toast> <wp:Text1>Pyload Mobile</wp:Text1> <wp:Text2>Captcha waiting!</wp:Text2> "
+ "</wp:Toast> </wp:Notification>")
+ return myxml
+
+
+ def doRequest(self):
+ URL = self.getConfig("pushUrl")
+ request = self.getXmlData()
+ webservice = httplib.HTTP(URL)
+ webservice.putrequest("POST", self.getConfig("pushId"))
+ webservice.putheader("Host", URL)
+ webservice.putheader("Content-type", "text/xml")
+ webservice.putheader("X-NotificationClass", "2")
+ webservice.putheader("X-WindowsPhone-Target", "toast")
+ webservice.putheader("Content-length", "%d" % len(request))
+ webservice.endheaders()
+ webservice.send(request)
+ webservice.close()
+ self.setStorage("LAST_NOTIFY", time.time())
+
+
+ def newCaptchaTask(self, task):
+ if not self.getConfig("pushId") or not self.getConfig("pushUrl"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if (time.time() - float(self.getStorage("LAST_NOTIFY", 0))) < self.getConf("pushTimeout"):
+ return False
+
+ self.doRequest()
diff --git a/pyload/plugins/addon/XMPPInterface.py b/pyload/plugins/addon/XMPPInterface.py
new file mode 100644
index 000000000..7b11bd4dd
--- /dev/null
+++ b/pyload/plugins/addon/XMPPInterface.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+
+from pyxmpp import streamtls
+from pyxmpp.all import JID, Message
+from pyxmpp.interface import implements
+from pyxmpp.interfaces import *
+from pyxmpp.jabber.client import JabberClient
+
+from pyload.plugins.addon.IRCInterface import IRCInterface
+
+
+class XMPPInterface(IRCInterface, JabberClient):
+ __name__ = "XMPPInterface"
+ __type__ = "addon"
+ __version__ = "0.11"
+
+ __config__ = [("activated", "bool", "Activated" , False ),
+ ("jid" , "str" , "Jabber ID" , "user@exmaple-jabber-server.org" ),
+ ("pw" , "str" , "Password" , "" ),
+ ("tls" , "bool", "Use TLS" , False ),
+ ("owners" , "str" , "List of JIDs accepting commands from", "me@icq-gateway.org;some@msn-gateway.org"),
+ ("info_file", "bool", "Inform about every file finished" , False ),
+ ("info_pack", "bool", "Inform about every package finished" , True ),
+ ("captcha" , "bool", "Send captcha requests" , True )]
+
+ __description__ = """Connect to jabber and let owner perform different tasks"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ implements(IMessageHandlersProvider)
+
+
+ def __init__(self, core, manager):
+ IRCInterface.__init__(self, core, manager)
+
+ self.jid = JID(self.getConfig("jid"))
+ password = self.getConfig("pw")
+
+ # if bare JID is provided add a resource -- it is required
+ if not self.jid.resource:
+ self.jid = JID(self.jid.node, self.jid.domain, "pyLoad")
+
+ if self.getConfig("tls"):
+ tls_settings = streamtls.TLSSettings(require=True, verify_peer=False)
+ auth = ("sasl:PLAIN", "sasl:DIGEST-MD5")
+ else:
+ tls_settings = None
+ auth = ("sasl:DIGEST-MD5", "digest")
+
+ # setup client with provided connection information
+ # and identity data
+ JabberClient.__init__(self, self.jid, password,
+ disco_name="pyLoad XMPP Client", disco_type="bot",
+ tls_settings=tls_settings, auth_methods=auth)
+
+ self.interface_providers = [
+ VersionHandler(self),
+ self,
+ ]
+
+
+ def coreReady(self):
+ self.new_package = {}
+
+ self.start()
+
+
+ def packageFinished(self, pypack):
+ try:
+ if self.getConfig("info_pack"):
+ self.announce(_("Package finished: %s") % pypack.name)
+ except:
+ pass
+
+
+ def downloadFinished(self, pyfile):
+ try:
+ if self.getConfig("info_file"):
+ self.announce(
+ _("Download finished: %(name)s @ %(plugin)s") % {"name": pyfile.name, "plugin": pyfile.pluginname})
+ except:
+ pass
+
+
+ def run(self):
+ # connect to IRC etc.
+ self.connect()
+ try:
+ self.loop()
+ except Exception, ex:
+ self.logError(ex)
+
+
+ def stream_state_changed(self, state, arg):
+ """This one is called when the state of stream connecting the component
+ to a server changes. This will usually be used to let the user
+ know what is going on."""
+ self.logDebug("*** State changed: %s %r ***" % (state, arg))
+
+
+ def disconnected(self):
+ self.logDebug("Client was disconnected")
+
+
+ def stream_closed(self, stream):
+ self.logDebug("Stream was closed", stream)
+
+
+ def stream_error(self, err):
+ self.logDebug("Stream Error", err)
+
+
+ def get_message_handlers(self):
+ """Return list of (message_type, message_handler) tuples.
+
+ The handlers returned will be called when matching message is received
+ in a client session."""
+ return [("normal", self.message)]
+
+
+ def message(self, stanza):
+ """Message handler for the component."""
+ subject = stanza.get_subject()
+ body = stanza.get_body()
+ t = stanza.get_type()
+ self.logDebug("Message from %s received." % unicode(stanza.get_from()))
+ self.logDebug("Body: %s Subject: %s Type: %s" % (body, subject, t))
+
+ if t == "headline":
+ # 'headline' messages should never be replied to
+ return True
+ if subject:
+ subject = u"Re: " + subject
+
+ to_jid = stanza.get_from()
+ from_jid = stanza.get_to()
+
+ #j = JID()
+ to_name = to_jid.as_utf8()
+ from_name = from_jid.as_utf8()
+
+ names = self.getConfig("owners").split(";")
+
+ if to_name in names or to_jid.node + "@" + to_jid.domain in names:
+ messages = []
+
+ trigger = "pass"
+ args = None
+
+ try:
+ temp = body.split()
+ trigger = temp[0]
+ if len(temp) > 1:
+ args = temp[1:]
+ except:
+ pass
+
+ handler = getattr(self, "event_%s" % trigger, self.event_pass)
+ try:
+ res = handler(args)
+ for line in res:
+ m = Message(
+ to_jid=to_jid,
+ from_jid=from_jid,
+ stanza_type=stanza.get_type(),
+ subject=subject,
+ body=line)
+
+ messages.append(m)
+ except Exception, e:
+ self.logError(e)
+
+ return messages
+
+ else:
+ return True
+
+
+ def response(self, msg, origin=""):
+ return self.announce(msg)
+
+
+ def announce(self, message):
+ """ send message to all owners"""
+ for user in self.getConfig("owners").split(";"):
+ self.logDebug("Send message to", user)
+
+ to_jid = JID(user)
+
+ m = Message(from_jid=self.jid,
+ to_jid=to_jid,
+ stanza_type="chat",
+ body=message)
+
+ stream = self.get_stream()
+ if not stream:
+ self.connect()
+ stream = self.get_stream()
+
+ stream.send(m)
+
+
+ def beforeReconnecting(self, ip):
+ self.disconnect()
+
+
+ def afterReconnecting(self, ip):
+ self.connect()
+
+
+class VersionHandler(object):
+ """Provides handler for a version query.
+
+ This class will answer version query and announce 'jabber:iq:version' namespace
+ in the client's disco#info results."""
+
+ implements(IIqHandlersProvider, IFeaturesProvider)
+
+
+ def __init__(self, client):
+ """Just remember who created this."""
+ self.client = client
+
+
+ def get_features(self):
+ """Return namespace which should the client include in its reply to a
+ disco#info query."""
+ return ["jabber:iq:version"]
+
+
+ def get_iq_get_handlers(self):
+ """Return list of tuples (element_name, namespace, handler) describing
+ handlers of <iq type='get'/> stanzas"""
+ return [("query", "jabber:iq:version", self.get_version)]
+
+
+ def get_iq_set_handlers(self):
+ """Return empty list, as this class provides no <iq type='set'/> stanza handler."""
+ return []
+
+
+ def get_version(self, iq):
+ """Handler for jabber:iq:version queries.
+
+ jabber:iq:version queries are not supported directly by PyXMPP, so the
+ XML node is accessed directly through the libxml2 API. This should be
+ used very carefully!"""
+ iq = iq.make_result_response()
+ q = iq.new_query("jabber:iq:version")
+ q.newTextChild(q.ns(), "name", "Echo component")
+ q.newTextChild(q.ns(), "version", "1.0")
+ return iq
diff --git a/pyload/plugins/addon/__init__.py b/pyload/plugins/addon/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/addon/__init__.py
diff --git a/pyload/plugins/captcha/AdsCaptcha.py b/pyload/plugins/captcha/AdsCaptcha.py
new file mode 100644
index 000000000..845205e4c
--- /dev/null
+++ b/pyload/plugins/captcha/AdsCaptcha.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+
+from pyload.plugins.internal.Captcha import Captcha
+
+
+class AdsCaptcha(Captcha):
+ __name__ = "AdsCaptcha"
+ __type__ = "captcha"
+ __version__ = "0.04"
+
+ __description__ = """AdsCaptcha captcha service plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ ID_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?[^"\']*CaptchaId=(?P<ID>\d+)'
+ KEY_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?[^"\']*PublicKey=(?P<KEY>[\w-]+)'
+
+
+ def detect_key(self, html=None):
+ if not html:
+ if hasattr(self.plugin, "html") and self.plugin.html:
+ html = self.plugin.html
+ else:
+ errmsg = _("AdsCaptcha html not found")
+ self.plugin.error(errmsg)
+ raise TypeError(errmsg)
+
+ m = re.search(self.ID_PATTERN, html)
+ n = re.search(self.KEY_PATTERN, html)
+ if m and n:
+ self.key = (m.group("ID"), m.group("KEY"))
+ self.plugin.logDebug("AdsCaptcha id|key: %s | %s" % self.key)
+ return self.key
+ else:
+ self.plugin.logDebug("AdsCaptcha id or key not found")
+ return None
+
+
+ def challenge(self, key=None): #: key is a tuple(CaptchaId, PublicKey)
+ if not key:
+ if self.detect_key():
+ key = self.key
+ else:
+ errmsg = _("AdsCaptcha key not found")
+ self.plugin.error(errmsg)
+ raise TypeError(errmsg)
+
+ CaptchaId, PublicKey = key
+
+ js = self.plugin.req.load("http://api.adscaptcha.com/Get.aspx", get={'CaptchaId': CaptchaId, 'PublicKey': PublicKey})
+
+ try:
+ challenge = re.search("challenge: '(.+?)',", js).group(1)
+ server = re.search("server: '(.+?)',", js).group(1)
+ except:
+ self.plugin.error("AdsCaptcha challenge pattern not found")
+
+ result = self.result(server, challenge)
+
+ return challenge, result
+
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%sChallenge.aspx" % server, get={'cid': challenge, 'dummy': random()},
+ cookies=True, imgtype="jpg")
diff --git a/pyload/plugins/captcha/ReCaptcha.py b/pyload/plugins/captcha/ReCaptcha.py
new file mode 100644
index 000000000..4516b76de
--- /dev/null
+++ b/pyload/plugins/captcha/ReCaptcha.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Captcha import Captcha
+
+
+class ReCaptcha(Captcha):
+ __name__ = "ReCaptcha"
+ __type__ = "captcha"
+ __version__ = "0.07"
+
+ __description__ = """ReCaptcha captcha service plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ KEY_PATTERN = r'recaptcha(/api|\.net)/(challenge|noscript)\?k=(?P<KEY>[\w-]+)'
+ KEY_AJAX_PATTERN = r'Recaptcha\.create\s*\(\s*["\'](?P<KEY>[\w-]+)'
+
+
+ def detect_key(self, html=None):
+ if not html:
+ if hasattr(self.plugin, "html") and self.plugin.html:
+ html = self.plugin.html
+ else:
+ errmsg = _("ReCaptcha html not found")
+ self.plugin.error(errmsg)
+ raise TypeError(errmsg)
+
+ m = re.search(self.KEY_PATTERN, html) or re.search(self.KEY_AJAX_PATTERN, html)
+ if m:
+ self.key = m.group("KEY")
+ self.plugin.logDebug("ReCaptcha key: %s" % self.key)
+ return self.key
+ else:
+ self.plugin.logDebug("ReCaptcha key not found")
+ return None
+
+
+ def challenge(self, key=None):
+ if not key:
+ if self.detect_key():
+ key = self.key
+ else:
+ errmsg = _("ReCaptcha key not found")
+ self.plugin.error(errmsg)
+ raise TypeError(errmsg)
+
+ js = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={'k': key})
+
+ try:
+ challenge = re.search("challenge : '(.+?)',", js).group(1)
+ server = re.search("server : '(.+?)',", js).group(1)
+ except:
+ self.plugin.error("ReCaptcha challenge pattern not found")
+
+ result = self.result(server, challenge)
+
+ return challenge, result
+
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%simage" % server, get={'c': challenge},
+ cookies=True, forceUser=True, imgtype="jpg")
diff --git a/pyload/plugins/captcha/SolveMedia.py b/pyload/plugins/captcha/SolveMedia.py
new file mode 100644
index 000000000..82f1c4722
--- /dev/null
+++ b/pyload/plugins/captcha/SolveMedia.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Captcha import Captcha
+
+
+class SolveMedia(Captcha):
+ __name__ = "SolveMedia"
+ __type__ = "captcha"
+ __version__ = "0.05"
+
+ __description__ = """SolveMedia captcha service plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ KEY_PATTERN = r'api\.solvemedia\.com/papi/challenge\.(no)?script\?k=(?P<KEY>.+?)["\']'
+
+
+ def challenge(self, key=None):
+ if not key:
+ if self.detect_key():
+ key = self.key
+ else:
+ errmsg = _("SolveMedia key not found")
+ self.plugin.error(errmsg)
+ raise TypeError(errmsg)
+
+ html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript", get={'k': key})
+ try:
+ challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="([^"]+)">',
+ html).group(1)
+ server = "http://api.solvemedia.com/papi/media"
+ except:
+ self.plugin.error("SolveMedia challenge pattern not found")
+
+ result = self.result(server, challenge)
+
+ return challenge, result
+
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha(server, get={'c': challenge}, imgtype="gif")
diff --git a/pyload/plugins/captcha/__init__.py b/pyload/plugins/captcha/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/captcha/__init__.py
diff --git a/pyload/plugins/container/CCF.py b/pyload/plugins/container/CCF.py
new file mode 100644
index 000000000..98533cbd8
--- /dev/null
+++ b/pyload/plugins/container/CCF.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from os import makedirs
+from os.path import exists
+from urllib2 import build_opener
+
+from MultipartPostHandler import MultipartPostHandler
+
+from pyload.plugins.internal.Container import Container
+from pyload.utils import safe_join
+
+
+class CCF(Container):
+ __name__ = "CCF"
+ __version__ = "0.2"
+
+ __pattern__ = r'.+\.ccf'
+
+ __description__ = """CCF container decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Willnix", "Willnix@pyload.org")]
+
+
+ def decrypt(self, pyfile):
+ infile = pyfile.url.replace("\n", "")
+
+ opener = build_opener(MultipartPostHandler)
+ params = {"src": "ccf",
+ "filename": "test.ccf",
+ "upload": open(infile, "rb")}
+ tempdlc_content = opener.open('http://service.jdownloader.net/dlcrypt/getDLC.php', params).read()
+
+ download_folder = self.config['general']['download_folder']
+
+ tempdlc_name = safe_join(download_folder, "tmp_%s.dlc" % pyfile.name)
+ tempdlc = open(tempdlc_name, "w")
+ tempdlc.write(re.search(r'<dlc>(.*)</dlc>', tempdlc_content, re.S).group(1))
+ tempdlc.close()
+
+ self.urls = [tempdlc_name]
diff --git a/pyload/plugins/container/LinkList.py b/pyload/plugins/container/LinkList.py
new file mode 100644
index 000000000..b66e44558
--- /dev/null
+++ b/pyload/plugins/container/LinkList.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import codecs
+
+from pyload.plugins.internal.Container import Container
+from pyload.utils import fs_encode
+
+
+class LinkList(Container):
+ __name__ = "LinkList"
+ __version__ = "0.12"
+
+ __pattern__ = r'.+\.txt'
+ __config__ = [("clear", "bool", "Clear Linklist after adding", False),
+ ("encoding", "string", "File encoding (default utf-8)", "")]
+
+ __description__ = """Read link lists in txt format"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("jeix", "jeix@hasnomail.com")]
+
+
+ def decrypt(self, pyfile):
+ try:
+ file_enc = codecs.lookup(self.getConfig("encoding")).name
+ except:
+ file_enc = "utf-8"
+
+ file_name = fs_encode(pyfile.url)
+
+ txt = codecs.open(file_name, 'r', file_enc)
+ links = txt.readlines()
+ curPack = "Parsed links from %s" % pyfile.name
+
+ packages = {curPack:[],}
+
+ for link in links:
+ link = link.strip()
+ if not link:
+ continue
+
+ if link.startswith(";"):
+ continue
+ if link.startswith("[") and link.endswith("]"):
+ # new package
+ curPack = link[1:-1]
+ packages[curPack] = []
+ continue
+ packages[curPack].append(link)
+ txt.close()
+
+ # empty packages fix
+
+ delete = []
+
+ for key,value in packages.iteritems():
+ if not value:
+ delete.append(key)
+
+ for key in delete:
+ del packages[key]
+
+ if self.getConfig("clear"):
+ try:
+ txt = open(file_name, 'wb')
+ txt.close()
+ except:
+ self.logWarning(_("LinkList could not be cleared"))
+
+ for name, links in packages.iteritems():
+ self.packages.append((name, links, name))
diff --git a/pyload/plugins/container/RSDF.py b/pyload/plugins/container/RSDF.py
new file mode 100644
index 000000000..67325f20d
--- /dev/null
+++ b/pyload/plugins/container/RSDF.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from pyload.plugins.internal.Container import Container
+from pyload.utils import fs_encode
+
+
+class RSDF(Container):
+ __name__ = "RSDF"
+ __version__ = "0.23"
+
+ __pattern__ = r'.+\.rsdf'
+
+ __description__ = """RSDF container decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("spoob", "spoob@pyload.org")]
+
+
+ def decrypt(self, pyfile):
+
+ from Crypto.Cipher import AES
+
+ infile = fs_encode(pyfile.url.replace("\n", ""))
+ Key = binascii.unhexlify('8C35192D964DC3182C6F84F3252239EB4A320D2500000000')
+
+ IV = binascii.unhexlify('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')
+ IV_Cipher = AES.new(Key, AES.MODE_ECB)
+ IV = IV_Cipher.encrypt(IV)
+
+ obj = AES.new(Key, AES.MODE_CFB, IV)
+
+ try:
+ with open(infile, 'r') as rsdf:
+ data = rsdf.read()
+ except IOError, e:
+ self.fail(str(e))
+
+ if re.search(r"<title>404 - Not Found</title>", data) is None:
+ data = binascii.unhexlify(''.join(data.split()))
+ data = data.splitlines()
+
+ for link in data:
+ if not link:
+ continue
+ link = base64.b64decode(link)
+ link = obj.decrypt(link)
+ decryptedUrl = link.replace('CCF: ', '')
+ self.urls.append(decryptedUrl)
+
+ self.logDebug("Adding package %s with %d links" % (pyfile.package().name, len(self.urls)))
diff --git a/pyload/plugins/container/__init__.py b/pyload/plugins/container/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/container/__init__.py
diff --git a/pyload/plugins/crypter/BitshareCom.py b/pyload/plugins/crypter/BitshareCom.py
new file mode 100644
index 000000000..723faf594
--- /dev/null
+++ b/pyload/plugins/crypter/BitshareCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class BitshareCom(SimpleCrypter):
+ __name__ = "BitshareCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Bitshare.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<a href="(http://bitshare\.com/files/.+)">.+</a></td>'
+ NAME_PATTERN = r'View public folder "(?P<N>.+)"</h1>'
diff --git a/pyload/plugins/crypter/C1neonCom.py b/pyload/plugins/crypter/C1neonCom.py
new file mode 100644
index 000000000..91a22ea39
--- /dev/null
+++ b/pyload/plugins/crypter/C1neonCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class C1neonCom(DeadCrypter):
+ __name__ = "C1neonCom"
+ __type__ = "crypter"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?c1neon\.com/.*?'
+ __config__ = []
+
+ __description__ = """C1neon.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com")]
diff --git a/pyload/plugins/crypter/ChipDe.py b/pyload/plugins/crypter/ChipDe.py
new file mode 100644
index 000000000..96892bace
--- /dev/null
+++ b/pyload/plugins/crypter/ChipDe.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class ChipDe(Crypter):
+ __name__ = "ChipDe"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?chip\.de/video/.*\.html'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Chip.de decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("4Christopher", "4Christopher@gmx.de")]
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+ try:
+ f = re.search(r'"(http://video\.chip\.de/.+)"', self.html)
+ except:
+ self.fail(_("Failed to find the URL"))
+ else:
+ self.urls = [f.group(1)]
+ self.logDebug("The file URL is %s" % self.urls[0])
diff --git a/pyload/plugins/crypter/CrockoCom.py b/pyload/plugins/crypter/CrockoCom.py
new file mode 100644
index 000000000..c959fa70a
--- /dev/null
+++ b/pyload/plugins/crypter/CrockoCom.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class CrockoCom(SimpleCrypter):
+ __name__ = "CrockoCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?crocko\.com/f/.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Crocko.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ LINK_PATTERN = r'<td class="last"><a href="([^"]+)">download</a>'
diff --git a/pyload/plugins/crypter/CryptItCom.py b/pyload/plugins/crypter/CryptItCom.py
new file mode 100644
index 000000000..9ca6f2bc3
--- /dev/null
+++ b/pyload/plugins/crypter/CryptItCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class CryptItCom(DeadCrypter):
+ __name__ = "CryptItCom"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?crypt-it\.com/(s|e|d|c)/\w+'
+ __config__ = []
+
+ __description__ = """Crypt-it.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de")]
diff --git a/pyload/plugins/crypter/CzshareCom.py b/pyload/plugins/crypter/CzshareCom.py
new file mode 100644
index 000000000..b79f540ff
--- /dev/null
+++ b/pyload/plugins/crypter/CzshareCom.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class CzshareCom(Crypter):
+ __name__ = "CzshareCom"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?(czshare|sdilej)\.(com|cz)/folders/.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Czshare.com folder decrypter plugin, now Sdilej.cz"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ FOLDER_PATTERN = r'<tr class="subdirectory">\s*<td>\s*<table>(.*?)</table>'
+ LINK_PATTERN = r'<td class="col2"><a href="([^"]+)">info</a></td>'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ m = re.search(self.FOLDER_PATTERN, html, re.S)
+ if m is None:
+ self.error(_("FOLDER_PATTERN not found"))
+
+ self.urls.extend(re.findall(self.LINK_PATTERN, m.group(1)))
diff --git a/pyload/plugins/crypter/DDLMusicOrg.py b/pyload/plugins/crypter/DDLMusicOrg.py
new file mode 100644
index 000000000..348c2db51
--- /dev/null
+++ b/pyload/plugins/crypter/DDLMusicOrg.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import sleep
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class DDLMusicOrg(Crypter):
+ __name__ = "DDLMusicOrg"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?ddl-music\.org/captcha/ddlm_cr\d\.php\?\d+\?\d+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Ddl-music.org decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ def setup(self):
+ self.multiDL = False
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url, cookies=True)
+
+ if re.search(r"Wer dies nicht rechnen kann", html) is not None:
+ self.offline()
+
+ math = re.search(r"(\d+) ([+-]) (\d+) =\s+<inp", self.html)
+ id = re.search(r"name=\"id\" value=\"(\d+)\"", self.html).group(1)
+ linknr = re.search(r"name=\"linknr\" value=\"(\d+)\"", self.html).group(1)
+
+ solve = ""
+ if math.group(2) == "+":
+ solve = int(math.group(1)) + int(math.group(3))
+ else:
+ solve = int(math.group(1)) - int(math.group(3))
+ sleep(3)
+ htmlwithlink = self.load(pyfile.url, cookies=True,
+ post={"calc%s" % linknr: solve, "send%s" % linknr: "Send", "id": id,
+ "linknr": linknr})
+ m = re.search(r"<form id=\"ff\" action=\"(.*?)\" method=\"post\">", htmlwithlink)
+ if m:
+ self.urls = [m.group(1)]
+ else:
+ self.retry()
diff --git a/pyload/plugins/crypter/DailymotionBatch.py b/pyload/plugins/crypter/DailymotionBatch.py
new file mode 100644
index 000000000..4ea26a830
--- /dev/null
+++ b/pyload/plugins/crypter/DailymotionBatch.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.utils import safe_join
+
+
+class DailymotionBatch(Crypter):
+ __name__ = "DailymotionBatch"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?dailymotion\.com/((playlists/)?(?P<TYPE>playlist|user)/)?(?P<ID>[\w^_]+)(?(TYPE)|#)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Dailymotion.com channel & playlist decrypter"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ def api_response(self, ref, req=None):
+ url = urljoin("https://api.dailymotion.com/", ref)
+ page = self.load(url, get=req)
+ return json_loads(page)
+
+
+ def getPlaylistInfo(self, id):
+ ref = "playlist/" + id
+ req = {"fields": "name,owner.screenname"}
+ playlist = self.api_response(ref, req)
+
+ if "error" in playlist:
+ return
+
+ name = playlist['name']
+ owner = playlist['owner.screenname']
+ return name, owner
+
+
+ def _getPlaylists(self, user_id, page=1):
+ ref = "user/%s/playlists" % user_id
+ req = {"fields": "id", "page": page, "limit": 100}
+ user = self.api_response(ref, req)
+
+ if "error" in user:
+ return
+
+ for playlist in user['list']:
+ yield playlist['id']
+
+ if user['has_more']:
+ for item in self._getPlaylists(user_id, page + 1):
+ yield item
+
+
+ def getPlaylists(self, user_id):
+ return [(id,) + self.getPlaylistInfo(id) for id in self._getPlaylists(user_id)]
+
+
+ def _getVideos(self, id, page=1):
+ ref = "playlist/%s/videos" % id
+ req = {"fields": "url", "page": page, "limit": 100}
+ playlist = self.api_response(ref, req)
+
+ if "error" in playlist:
+ return
+
+ for video in playlist['list']:
+ yield video['url']
+
+ if playlist['has_more']:
+ for item in self._getVideos(id, page + 1):
+ yield item
+
+
+ def getVideos(self, playlist_id):
+ return list(self._getVideos(playlist_id))[::-1]
+
+
+ def decrypt(self, pyfile):
+ m = re.match(self.__pattern__, pyfile.url)
+ m_id = m.group("ID")
+ m_type = m.group("TYPE")
+
+ if m_type == "playlist":
+ self.logDebug("Url recognized as Playlist")
+ p_info = self.getPlaylistInfo(m_id)
+ playlists = [(m_id,) + p_info] if p_info else None
+ else:
+ self.logDebug("Url recognized as Channel")
+ playlists = self.getPlaylists(m_id)
+ self.logDebug("%s playlist\s found on channel \"%s\"" % (len(playlists), m_id))
+
+ if not playlists:
+ self.fail(_("No playlist available"))
+
+ for p_id, p_name, p_owner in playlists:
+ p_videos = self.getVideos(p_id)
+ p_folder = safe_join(self.config['general']['download_folder'], p_owner, p_name)
+ self.logDebug("%s video\s found on playlist \"%s\"" % (len(p_videos), p_name))
+ self.packages.append((p_name, p_videos, p_folder)) #: folder is NOT recognized by pyload 0.4.9!
diff --git a/pyload/plugins/crypter/DataHu.py b/pyload/plugins/crypter/DataHu.py
new file mode 100644
index 000000000..2a02e03c8
--- /dev/null
+++ b/pyload/plugins/crypter/DataHu.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DataHu(SimpleCrypter):
+ __name__ = "DataHu"
+ __type__ = "crypter"
+ __version__ = "0.06"
+
+ __pattern__ = r'http://(?:www\.)?data\.hu/dir/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Data.hu folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("crash", None),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<a href=\'(http://data\.hu/get/.+)\' target=\'_blank\'>\1</a>'
+ NAME_PATTERN = ur'<title>(?P<N>.+) Let\xf6lt\xe9se</title>'
+
+
+ def prepare(self):
+ super(DataHu, self).prepare()
+
+ if u'K\xe9rlek add meg a jelsz\xf3t' in self.html: # Password protected
+ password = self.getPassword()
+ if not password:
+ self.fail(_("Password required"))
+
+ self.logDebug("The folder is password protected', 'Using password: " + password)
+
+ self.html = self.load(self.pyfile.url, post={'mappa_pass': password}, decode=True)
+
+ if u'Hib\xe1s jelsz\xf3' in self.html: # Wrong password
+ self.fail(_("Wrong password"))
diff --git a/pyload/plugins/crypter/DdlstorageCom.py b/pyload/plugins/crypter/DdlstorageCom.py
new file mode 100644
index 000000000..b4a8d07b4
--- /dev/null
+++ b/pyload/plugins/crypter/DdlstorageCom.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter, create_getInfo
+
+
+class DdlstorageCom(DeadCrypter):
+ __name__ = "DdlstorageCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?ddlstorage\.com/folder/\w+'
+ __config__ = []
+
+ __description__ = """DDLStorage.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(SpeedLoadOrg)
diff --git a/pyload/plugins/crypter/DepositfilesCom.py b/pyload/plugins/crypter/DepositfilesCom.py
new file mode 100644
index 000000000..9ba211607
--- /dev/null
+++ b/pyload/plugins/crypter/DepositfilesCom.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DepositfilesCom(SimpleCrypter):
+ __name__ = "DepositfilesCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?depositfiles\.com/folders/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Depositfiles.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ LINK_PATTERN = r'<div class="progressName"[^>]*>\s*<a href="([^"]+)" title="[^"]*" target="_blank">'
diff --git a/pyload/plugins/crypter/Dereferer.py b/pyload/plugins/crypter/Dereferer.py
new file mode 100644
index 000000000..4a647a952
--- /dev/null
+++ b/pyload/plugins/crypter/Dereferer.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class Dereferer(Crypter):
+ __name__ = "Dereferer"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://([^/]+)/.*?(?P<url>(ht|f)tps?(://|%3A%2F%2F).*)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Crypter for dereferers"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def decrypt(self, pyfile):
+ link = re.match(self.__pattern__, pyfile.url).group('url')
+ self.urls = [unquote(link).rstrip('+')]
diff --git a/pyload/plugins/crypter/DlProtectCom.py b/pyload/plugins/crypter/DlProtectCom.py
new file mode 100644
index 000000000..4f96a1d13
--- /dev/null
+++ b/pyload/plugins/crypter/DlProtectCom.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from base64 import urlsafe_b64encode
+from time import time
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DlProtectCom(SimpleCrypter):
+ __name__ = "DlProtectCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?dl-protect\.com/((en|fr)/)?(?P<ID>\w+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Dl-protect.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ OFFLINE_PATTERN = r'>Unfortunately, the link you are looking for is not found'
+
+
+ def getLinks(self):
+ # Direct link with redirect
+ if not re.match(r"http://(?:www\.)?dl-protect\.com", self.req.http.lastEffectiveURL):
+ return [self.req.http.lastEffectiveURL]
+
+ #id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ key = re.search(r'name="id_key" value="(.+?)"', self.html).group(1)
+
+ post_req = {"id_key": key, "submitform": ""}
+
+ if self.OFFLINE_PATTERN in self.html:
+ self.offline()
+ elif ">Please click on continue to see the content" in self.html:
+ post_req.update({"submitform": "Continue"})
+ else:
+ mstime = int(round(time() * 1000))
+ b64time = "_" + urlsafe_b64encode(str(mstime)).replace("=", "%3D")
+
+ post_req.update({"i": b64time, "submitform": "Decrypt+link"})
+
+ if ">Password :" in self.html:
+ post_req['pwd'] = self.getPassword()
+
+ if ">Security Code" in self.html:
+ captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
+ captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
+ captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
+
+ post_req['secure'] = captcha_code
+
+ self.html = self.load(self.pyfile.url, post=post_req)
+
+ for errmsg in (">The password is incorrect", ">The security code is incorrect"):
+ if errmsg in self.html:
+ self.fail(_(errmsg[1:]))
+
+ pattern = r'<a href="([^/].+?)" target="_blank">'
+ return re.findall(pattern, self.html)
diff --git a/pyload/plugins/crypter/DontKnowMe.py b/pyload/plugins/crypter/DontKnowMe.py
new file mode 100644
index 000000000..0e63233e8
--- /dev/null
+++ b/pyload/plugins/crypter/DontKnowMe.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class DontKnowMe(Crypter):
+ __name__ = "DontKnowMe"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?dontknow\.me/at/\?.+$'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """DontKnow.me decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("selaux", None)]
+
+
+ LINK_PATTERN = r'http://dontknow\.me/at/\?(.+)$'
+
+
+ def decrypt(self, pyfile):
+ link = re.findall(self.LINK_PATTERN, pyfile.url)[0]
+ self.urls = [unquote(link)]
diff --git a/pyload/plugins/crypter/DuckCryptInfo.py b/pyload/plugins/crypter/DuckCryptInfo.py
new file mode 100644
index 000000000..054783f3c
--- /dev/null
+++ b/pyload/plugins/crypter/DuckCryptInfo.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from BeautifulSoup import BeautifulSoup
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class DuckCryptInfo(Crypter):
+ __name__ = "DuckCryptInfo"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?duckcrypt\.info/(folder|wait|link)/(\w+)/?(\w*)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """DuckCrypt.info decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com")]
+
+
+ TIMER_PATTERN = r'<span id="timer">(.*)</span>'
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+
+ m = re.match(self.__pattern__, url)
+ if m is None:
+ self.fail(_("Weird error in link"))
+ if str(m.group(1)) == "link":
+ self.handleLink(url)
+ else:
+ self.handleFolder(m)
+
+
+ def handleFolder(self, m):
+ html = self.load("http://duckcrypt.info/ajax/auth.php?hash=" + str(m.group(2)))
+ m = re.match(self.__pattern__, html)
+ self.logDebug("Redirectet to " + str(m.group(0)))
+ html = self.load(str(m.group(0)))
+ soup = BeautifulSoup(html)
+ cryptlinks = soup.findAll("div", attrs={"class": "folderbox"})
+ self.logDebug("Redirectet to " + str(cryptlinks))
+ if not cryptlinks:
+ self.error(_("No link found"))
+ for clink in cryptlinks:
+ if clink.find("a"):
+ self.handleLink(clink.find("a")['href'])
+
+
+ def handleLink(self, url):
+ html = self.load(url)
+ soup = BeautifulSoup(html)
+ self.urls = [soup.find("iframe")['src']]
+ if not self.urls:
+ self.logInfo(_("No link found"))
diff --git a/pyload/plugins/crypter/DuploadOrg.py b/pyload/plugins/crypter/DuploadOrg.py
new file mode 100644
index 000000000..fdb51021b
--- /dev/null
+++ b/pyload/plugins/crypter/DuploadOrg.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class DuploadOrg(DeadCrypter):
+ __name__ = "DuploadOrg"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?dupload\.org/folder/\d+'
+ __config__ = []
+
+ __description__ = """Dupload.org folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
diff --git a/pyload/plugins/crypter/EasybytezCom.py b/pyload/plugins/crypter/EasybytezCom.py
new file mode 100644
index 000000000..4cd86e9b6
--- /dev/null
+++ b/pyload/plugins/crypter/EasybytezCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSCrypter import XFSCrypter
+
+
+class EasybytezCom(XFSCrypter):
+ __name__ = "EasybytezCom"
+ __type__ = "crypter"
+ __version__ = "0.10"
+
+ __pattern__ = r'http://(?:www\.)?easybytez\.com/users/\d+/\d+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Easybytez.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "easybytez.com"
+
+ LOGIN_ACCOUNT = True
diff --git a/pyload/plugins/crypter/EmbeduploadCom.py b/pyload/plugins/crypter/EmbeduploadCom.py
new file mode 100644
index 000000000..c6649890f
--- /dev/null
+++ b/pyload/plugins/crypter/EmbeduploadCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.network.HTTPRequest import BadHeader
+
+
+class EmbeduploadCom(Crypter):
+ __name__ = "EmbeduploadCom"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?embedupload\.com/\?d=.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True),
+ ("preferedHoster", "str", "Prefered hoster list (bar-separated)", "embedupload"),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated)", "")]
+
+ __description__ = """EmbedUpload.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ LINK_PATTERN = r'<div id="([^"]+)"[^>]*>\s*<a href="([^"]+)" target="_blank" (?:class="DownloadNow"|style="color:red")>'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ tmp_links = []
+
+ m = re.findall(self.LINK_PATTERN, self.html)
+ if m:
+ prefered_set = set(self.getConfig("preferedHoster").split('|'))
+ prefered_set = map(lambda s: s.lower().split('.')[0], prefered_set)
+
+ self.logDebug("PF: %s" % prefered_set)
+
+ tmp_links.extend([x[1] for x in m if x[0] in prefered_set])
+ self.urls = self.getLocation(tmp_links)
+
+ if not self.urls:
+ ignored_set = set(self.getConfig("ignoredHoster").split('|'))
+ ignored_set = map(lambda s: s.lower().split('.')[0], ignored_set)
+
+ self.logDebug("IG: %s" % ignored_set)
+
+ tmp_links.extend([x[1] for x in m if x[0] not in ignored_set])
+ self.urls = self.getLocation(tmp_links)
+
+
+ def getLocation(self, tmp_links):
+ new_links = []
+ for link in tmp_links:
+ try:
+ header = self.load(link, just_header=True)
+ if 'location' in header:
+ new_links.append(header['location'])
+ except BadHeader:
+ pass
+ return new_links
diff --git a/pyload/plugins/crypter/FilebeerInfo.py b/pyload/plugins/crypter/FilebeerInfo.py
new file mode 100644
index 000000000..75714c81a
--- /dev/null
+++ b/pyload/plugins/crypter/FilebeerInfo.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class FilebeerInfo(DeadCrypter):
+ __name__ = "FilebeerInfo"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?filebeer\.info/(\d+~f).*'
+ __config__ = []
+
+ __description__ = """Filebeer.info folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
diff --git a/pyload/plugins/crypter/FilecloudIo.py b/pyload/plugins/crypter/FilecloudIo.py
new file mode 100644
index 000000000..6c8369925
--- /dev/null
+++ b/pyload/plugins/crypter/FilecloudIo.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilecloudIo(SimpleCrypter):
+ __name__ = "FilecloudIo"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?(filecloud\.io|ifile\.it)/_\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Filecloud.io folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ LINK_PATTERN = r'href="(http://filecloud\.io/\w+)" title'
+ NAME_PATTERN = r'>(?P<N>.+?) - filecloud\.io<'
diff --git a/pyload/plugins/crypter/FilecryptCc.py b/pyload/plugins/crypter/FilecryptCc.py
new file mode 100644
index 000000000..db4a8c4ab
--- /dev/null
+++ b/pyload/plugins/crypter/FilecryptCc.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+
+from module.plugins.Crypter import Crypter
+
+
+class FilecryptCc(Crypter):
+ __name__ = "FilecryptCc"
+ __type__ = "crypter"
+ __version__ = "0.04"
+
+ __pattern__ = r'https?://(?:www\.)?filecrypt\.cc/Container/\w+'
+
+ __description__ = """Filecrypt.cc decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "")]
+
+
+ # URL_REPLACEMENTS = [(r'.html$', ""), (r'$', ".html")] #@TODO: Extend SimpleCrypter
+
+ DLC_LINK_PATTERN = r'<button class="dlcdownload" type="button" title="Download \*.dlc" onclick="DownloadDLC\(\'(.+)\'\);"><i></i><span>dlc<'
+ WEBLINK_PATTERN = r"openLink.?'([\w_-]*)',"
+
+ CAPTCHA_PATTERN = r'<img id="nc" src="(.+?)"'
+
+ MIRROR_PAGE_PATTERN = r'"[\w]*" href="(http://filecrypt.cc/Container/\w+\.html\?mirror=\d+)">'
+
+
+ def setup(self):
+ self.links = []
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, cookies=True)
+
+ if "content not found" in self.html:
+ self.offline()
+
+ self.handlePasswordProtection()
+ self.handleCaptcha()
+ self.handleMirrorPages()
+
+ for handle in (self.handleCNL, self.handleWeblinks, self.handleDlcContainer):
+ handle()
+ if self.links:
+ self.packages = [(pyfile.package().name, self.links, pyfile.package().name)]
+ return
+
+
+ def handleMirrorPages(self):
+ if "mirror=" not in self.siteWithLinks:
+ return
+
+ mirror = re.findall(self.MIRROR_PAGE_PATTERN, self.siteWithLinks)
+
+ self.logInfo(_("Found %d mirrors") % len(m))
+
+ for i in mirror[1:]:
+ self.siteWithLinks = self.siteWithLinks + self.load(i, cookies=True).decode("utf-8", "replace")
+
+
+ def handlePasswordProtection(self):
+ if '<input type="text" name="password"' not in self.html:
+ return
+
+ self.logInfo(_("Folder is password protected"))
+
+ if not self.pyfile.package().password:
+ self.fail(_("Please enter the password in package section and try again"))
+
+ self.html = self.load(self.pyfile.url, post={"password": self.password}, cookies=True)
+
+
+ def handleCaptcha(self):
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+
+ if m:
+ self.logDebug("Captcha-URL: %s" % m.group(1))
+ captcha_code = self.decryptCaptcha("http://filecrypt.cc" + m.group(1), forceUser=True, imgtype="gif")
+ self.siteWithLinks = self.load(self.pyfile.url, post={"recaptcha_response_field":captcha_code}, decode=True, cookies=True)
+ else:
+ self.logDebug("No captcha found")
+ self.siteWithLinks = self.html
+
+ if "recaptcha_response_field" in self.siteWithLinks:
+ self.invalidCaptcha()
+ self.retry()
+
+
+ def handleDlcContainer(self):
+ dlc = re.findall(self.DLC_LINK_PATTERN, self.siteWithLinks)
+
+ if not dlc:
+ return
+
+ for i in dlc:
+ self.links.append("http://filecrypt.cc/DLC/%s.dlc" % i)
+
+
+ def handleWeblinks(self):
+ try:
+ weblinks = re.findall(self.WEBLINK_PATTERN, self.siteWithLinks)
+
+ for link in weblinks:
+ response = self.load("http://filecrypt.cc/Link/%s.html" % link, cookies=True)
+ link2 = re.search('<iframe noresize src="(.*)"></iframe>', response)
+ response2 = self.load(link2.group(1), just_header=True, cookies=True)
+ self.links.append(response2['location'])
+
+ except Exception, e:
+ self.logDebug("Error decrypting weblinks: %s" % e)
+
+
+ def handleCNL(self):
+ try:
+ vjk = re.findall('<input type="hidden" name="jk" value="function f\(\){ return \'(.*)\';}">', self.siteWithLinks)
+ vcrypted = re.findall('<input type="hidden" name="crypted" value="(.*)">', self.siteWithLinks)
+
+ for i in range(0, len(vcrypted)):
+ self.links.extend(self._getLinks(vcrypted[i], vjk[i]))
+
+ except Exception, e:
+ self.logDebug("Error decrypting CNL: %s" % e)
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ key = binascii.unhexlify(str(jk))
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ links = filter(lambda x: x != "",
+ text.replace("\x00", "").replace("\r", "").split("\n"))
+
+ return links
diff --git a/pyload/plugins/crypter/FilefactoryCom.py b/pyload/plugins/crypter/FilefactoryCom.py
new file mode 100644
index 000000000..cc9563ed9
--- /dev/null
+++ b/pyload/plugins/crypter/FilefactoryCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilefactoryCom(SimpleCrypter):
+ __name__ = "FilefactoryCom"
+ __type__ = "crypter"
+ __version__ = "0.31"
+
+ __pattern__ = r'https?://(?:www\.)?filefactory\.com/(?:f|folder)/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Filefactory.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<td><a href="([^"]+)">'
+ NAME_PATTERN = r'<h1>Files in <span>(?P<N>.+)</span></h1>'
+ PAGES_PATTERN = r'data-paginator-totalPages="(\d+)"'
+
+ COOKIES = [("filefactory.com", "locale", "en_US.utf8")]
+
+
+ def loadPage(self, page_n):
+ return self.load(self.pyfile.url, get={'page': page_n})
diff --git a/pyload/plugins/crypter/FilerNet.py b/pyload/plugins/crypter/FilerNet.py
new file mode 100644
index 000000000..4d28dc0da
--- /dev/null
+++ b/pyload/plugins/crypter/FilerNet.py
@@ -0,0 +1,26 @@
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilerNet(SimpleCrypter):
+ __name__ = "FilerNet"
+ __type__ = "crypter"
+ __version__ = "0.41"
+
+ __pattern__ = r'https?://filer\.net/folder/\w{16}'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Filer.net decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("nath_schwarz", "nathan.notwhite@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'href="(/get/\w{16})">(?!<)'
+ NAME_PATTERN = r'<h3>(?P<N>.+?) - <small'
+
+
+ def getLinks(self):
+ return ['http://filer.net%s' % link for link in re.findall(self.LINK_PATTERN, self.html)]
diff --git a/pyload/plugins/crypter/FileserveCom.py b/pyload/plugins/crypter/FileserveCom.py
new file mode 100644
index 000000000..5ccccfc16
--- /dev/null
+++ b/pyload/plugins/crypter/FileserveCom.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class FileserveCom(Crypter):
+ __name__ = "FileserveCom"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?fileserve\.com/list/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """FileServe.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fionnc", "fionnc@gmail.com")]
+
+
+ FOLDER_PATTERN = r'<table class="file_list">(.*?)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" class="sheet_icon wbold">'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ new_links = []
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.S)
+ if folder is None:
+ self.error(_("FOLDER_PATTERN not found"))
+
+ new_links.extend(re.findall(self.LINK_PATTERN, folder.group(1)))
+
+ if new_links:
+ self.urls = [map(lambda s: "http://fileserve.com%s" % s, new_links)]
diff --git a/pyload/plugins/crypter/FilesonicCom.py b/pyload/plugins/crypter/FilesonicCom.py
new file mode 100644
index 000000000..33a99f0d5
--- /dev/null
+++ b/pyload/plugins/crypter/FilesonicCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class FilesonicCom(DeadCrypter):
+ __name__ = "FilesonicCom"
+ __type__ = "crypter"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?filesonic\.com/folder/\w+'
+
+ __description__ = """Filesonic.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
diff --git a/pyload/plugins/crypter/FilestubeCom.py b/pyload/plugins/crypter/FilestubeCom.py
new file mode 100644
index 000000000..5c97bca9e
--- /dev/null
+++ b/pyload/plugins/crypter/FilestubeCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FilestubeCom(SimpleCrypter):
+ __name__ = "FilestubeCom"
+ __type__ = "crypter"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?filestube\.(?:com|to)/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Filestube.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<a class=\"file-link-main(?: noref)?\" [^>]* href=\"(http://[^\"]+)'
+ NAME_PATTERN = r'<h1\s*> (?P<N>.+) download\s*</h1>'
diff --git a/pyload/plugins/crypter/FiletramCom.py b/pyload/plugins/crypter/FiletramCom.py
new file mode 100644
index 000000000..289642494
--- /dev/null
+++ b/pyload/plugins/crypter/FiletramCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FiletramCom(SimpleCrypter):
+ __name__ = "FiletramCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?filetram\.com/[^/]+/.+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Filetram.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("igel", "igelkun@myopera.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'\s+(http://.+)'
+ NAME_PATTERN = r'<title>(?P<N>.+?) - Free Download'
diff --git a/pyload/plugins/crypter/FiredriveCom.py b/pyload/plugins/crypter/FiredriveCom.py
new file mode 100644
index 000000000..7ef84c8ff
--- /dev/null
+++ b/pyload/plugins/crypter/FiredriveCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class FiredriveCom(DeadCrypter):
+ __name__ = "FiredriveCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?(firedrive|putlocker)\.com/share/.+'
+ __config__ = []
+
+ __description__ = """Firedrive.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
diff --git a/pyload/plugins/crypter/FourChanOrg.py b/pyload/plugins/crypter/FourChanOrg.py
new file mode 100644
index 000000000..37d205f73
--- /dev/null
+++ b/pyload/plugins/crypter/FourChanOrg.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+#
+# Based on 4chandl by Roland Beermann (https://gist.github.com/enkore/3492599)
+
+import re
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class FourChanOrg(Crypter):
+ __name__ = "FourChanOrg"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?boards\.4chan\.org/\w+/res/(\d+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """4chan.org folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+
+ def decrypt(self, pyfile):
+ pagehtml = self.load(pyfile.url)
+ images = set(re.findall(r'(images\.4chan\.org/[^/]*/src/[^"<]*)', pagehtml))
+ self.urls = ["http://" + image for image in images]
diff --git a/pyload/plugins/crypter/FreakhareCom.py b/pyload/plugins/crypter/FreakhareCom.py
new file mode 100644
index 000000000..792badeb4
--- /dev/null
+++ b/pyload/plugins/crypter/FreakhareCom.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FreakhareCom(SimpleCrypter):
+ __name__ = "FreakhareCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?freakshare\.com/folder/.+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Freakhare.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<a href="(http://freakshare\.com/files/[^"]+)" target="_blank">'
+ NAME_PATTERN = r'Folder:</b> (?P<N>.+)'
+ PAGES_PATTERN = r'Pages: +(\d+)'
+
+
+ def loadPage(self, page_n):
+ if not hasattr(self, 'f_id') and not hasattr(self, 'f_md5'):
+ m = re.search(r'http://freakshare.com/\?x=folder&f_id=(\d+)&f_md5=(\w+)', self.html)
+ if m:
+ self.f_id = m.group(1)
+ self.f_md5 = m.group(2)
+ return self.load('http://freakshare.com/', get={'x': 'folder',
+ 'f_id': self.f_id,
+ 'f_md5': self.f_md5,
+ 'entrys': '20',
+ 'page': page_n - 1,
+ 'order': ''}, decode=True)
diff --git a/pyload/plugins/crypter/FreetexthostCom.py b/pyload/plugins/crypter/FreetexthostCom.py
new file mode 100644
index 000000000..13cb33f84
--- /dev/null
+++ b/pyload/plugins/crypter/FreetexthostCom.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FreetexthostCom(SimpleCrypter):
+ __name__ = "FreetexthostCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?freetexthost\.com/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Freetexthost.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def getLinks(self):
+ m = re.search(r'<div id="contentsinner">\s*(.+)<div class="viewcount">', self.html, re.S)
+ if m is None:
+ self.error(_("Unable to extract links"))
+ links = m.group(1)
+ return links.strip().split("<br />\r\n")
diff --git a/pyload/plugins/crypter/FshareVn.py b/pyload/plugins/crypter/FshareVn.py
new file mode 100644
index 000000000..8b22b8bf2
--- /dev/null
+++ b/pyload/plugins/crypter/FshareVn.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class FshareVn(SimpleCrypter):
+ __name__ = "FshareVn"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?fshare\.vn/folder/.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Fshare.vn folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ LINK_PATTERN = r'<li class="w_80pc"><a href="([^"]+)" target="_blank">'
diff --git a/pyload/plugins/crypter/GooGl.py b/pyload/plugins/crypter/GooGl.py
new file mode 100644
index 000000000..b5fb40f34
--- /dev/null
+++ b/pyload/plugins/crypter/GooGl.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.utils import json_loads
+
+
+class GooGl(Crypter):
+ __name__ = "GooGl"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'https?://(?:www\.)?goo\.gl/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Goo.gl decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ API_URL = "https://www.googleapis.com/urlshortener/v1/url"
+
+
+ def decrypt(self, pyfile):
+ rep = self.load(self.API_URL, get={'shortUrl': pyfile.url})
+ self.logDebug("JSON data: " + rep)
+ rep = json_loads(rep)
+
+ if 'longUrl' in rep:
+ self.urls = [rep['longUrl']]
+ else:
+ self.fail(_("Unable to expand shortened link"))
diff --git a/pyload/plugins/crypter/HoerbuchIn.py b/pyload/plugins/crypter/HoerbuchIn.py
new file mode 100644
index 000000000..fa409a94c
--- /dev/null
+++ b/pyload/plugins/crypter/HoerbuchIn.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class HoerbuchIn(Crypter):
+ __name__ = "HoerbuchIn"
+ __type__ = "crypter"
+ __version__ = "0.6"
+
+ __pattern__ = r'http://(?:www\.)?hoerbuch\.in/(wp/horbucher/\d+/.+/|tp/out\.php\?.+|protection/folder_\d+\.html)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Hoerbuch.in decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de")]
+
+
+ article = re.compile("http://(?:www\.)?hoerbuch\.in/wp/horbucher/\d+/.+/")
+ protection = re.compile("http://(?:www\.)?hoerbuch\.in/protection/folder_\d+.html")
+
+
+ def decrypt(self, pyfile):
+ self.pyfile = pyfile
+
+ if self.article.match(pyfile.url):
+ html = self.load(pyfile.url)
+ soup = BeautifulSoup(html, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
+
+ abookname = soup.find("a", attrs={"rel": "bookmark"}).text
+ for a in soup.findAll("a", attrs={"href": self.protection}):
+ package = "%s (%s)" % (abookname, a.previousSibling.previousSibling.text[:-1])
+ links = self.decryptFolder(a['href'])
+
+ self.packages.append((package, links, package))
+ else:
+ self.urls = self.decryptFolder(pyfile.url)
+
+
+ def decryptFolder(self, url):
+ m = self.protection.search(url)
+ if m is None:
+ self.fail(_("Bad URL"))
+ url = m.group(0)
+
+ self.pyfile.url = url
+ html = self.load(url, post={"viewed": "adpg"})
+
+ links = []
+ pattern = re.compile("http://www\.hoerbuch\.in/protection/(\w+)/(.*?)\"")
+ for hoster, lid in pattern.findall(html):
+ self.req.lastURL = url
+ self.load("http://www.hoerbuch.in/protection/%s/%s" % (hoster, lid))
+ links.append(self.req.lastEffectiveURL)
+
+ return links
diff --git a/pyload/plugins/crypter/HotfileCom.py b/pyload/plugins/crypter/HotfileCom.py
new file mode 100644
index 000000000..1c1dcb76b
--- /dev/null
+++ b/pyload/plugins/crypter/HotfileCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class HotfileCom(DeadCrypter):
+ __name__ = "HotfileCom"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'https?://(?:www\.)?hotfile\.com/list/\w+/\w+'
+ __config__ = []
+
+ __description__ = """Hotfile.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
diff --git a/pyload/plugins/crypter/ILoadTo.py b/pyload/plugins/crypter/ILoadTo.py
new file mode 100644
index 000000000..e04e43a00
--- /dev/null
+++ b/pyload/plugins/crypter/ILoadTo.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class ILoadTo(DeadCrypter):
+ __name__ = "ILoadTo"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?iload\.to/go/\d+-[\w.-]+/'
+ __config__ = []
+
+ __description__ = """Iload.to decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("hzpz", None)]
diff --git a/pyload/plugins/crypter/ImgurComAlbum.py b/pyload/plugins/crypter/ImgurComAlbum.py
new file mode 100644
index 000000000..1dc717ca9
--- /dev/null
+++ b/pyload/plugins/crypter/ImgurComAlbum.py
@@ -0,0 +1,27 @@
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+from pyload.utils import uniqify
+
+
+class ImgurComAlbum(SimpleCrypter):
+ __name__ = "ImgurComAlbum"
+ __type__ = "crypter"
+ __version__ = "0.51"
+
+ __pattern__ = r'https?://(?:www\.|m\.)?imgur\.com/(a|gallery|)/?\w{5,7}'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Imgur.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("nath_schwarz", "nathan.notwhite@gmail.com")]
+
+
+ NAME_PATTERN = r'(?P<N>.+?) - Imgur'
+ LINK_PATTERN = r'i\.imgur\.com/\w{7}s?\.(?:jpeg|jpg|png|gif|apng)'
+
+
+ def getLinks(self):
+ f = lambda url: "http://" + re.sub(r'(\w{7})s\.', r'\1.', url)
+ return uniqify(map(f, re.findall(self.LINK_PATTERN, self.html)))
diff --git a/pyload/plugins/crypter/JunocloudMe.py b/pyload/plugins/crypter/JunocloudMe.py
new file mode 100644
index 000000000..3036be616
--- /dev/null
+++ b/pyload/plugins/crypter/JunocloudMe.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSCrypter import XFSCrypter
+
+
+class JunocloudMe(XFSCrypter):
+ __name__ = "JunocloudMe"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?junocloud\.me/folders/(?P<ID>\d+/\w+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Junocloud.me folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "junocloud.me"
diff --git a/pyload/plugins/crypter/LetitbitNet.py b/pyload/plugins/crypter/LetitbitNet.py
new file mode 100644
index 000000000..6942aa3ca
--- /dev/null
+++ b/pyload/plugins/crypter/LetitbitNet.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class LetitbitNet(Crypter):
+ __name__ = "LetitbitNet"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?letitbit\.net/folder/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Letitbit.net folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("DHMH", "webmaster@pcProfil.de"),
+ ("z00nx", "z00nx0@gmail.com")]
+
+
+ FOLDER_PATTERN = r'<table>(.*)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" target="_blank">'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.S)
+ if folder is None:
+ self.error(_("FOLDER_PATTERN not found"))
+
+ self.urls.extend(re.findall(self.LINK_PATTERN, folder.group(0)))
diff --git a/pyload/plugins/crypter/LinkCryptWs.py b/pyload/plugins/crypter/LinkCryptWs.py
new file mode 100644
index 000000000..f01c42268
--- /dev/null
+++ b/pyload/plugins/crypter/LinkCryptWs.py
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+import pycurl
+
+from Crypto.Cipher import AES
+
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.utils import html_unescape
+
+
+class LinkCryptWs(Crypter):
+ __name__ = "LinkCryptWs"
+ __type__ = "crypter"
+ __version__ = "0.06"
+
+ __pattern__ = r'http://(?:www\.)?linkcrypt\.ws/(dir|container)/(?P<ID>\w+)'
+
+ __description__ = """LinkCrypt.ws decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("kagenoshin", "kagenoshin[AT]gmx[DOT]ch"),
+ ("glukgluk", None)]
+
+
+ CRYPTED_KEY = "crypted"
+ JK_KEY = "jk"
+
+
+ def setup(self):
+ self.captcha = False
+ self.links = []
+ self.sources = ['cnl', 'web', 'dlc', 'rsdf', 'ccf']
+
+
+ def prepare(self):
+ # Init
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('ID')
+
+ self.req.cj.setCookie("linkcrypt.ws", "language", "en")
+
+ # Request package
+ self.req.http.c.setopt(pycurl.USERAGENT, "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko") #: better chance to not get those key-captchas
+ self.html = self.load(self.pyfile.url)
+
+
+ def decrypt(self, pyfile):
+ if not self.js:
+ self.fail(_("Missing JS Engine"))
+
+ self.prepare()
+
+ if not self.isOnline():
+ self.offline()
+
+ if self.isKeyCaptchaProtected():
+ self.retry(4, 30, _("Can't handle Key-Captcha"))
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleCaptchaErrors()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ # get unrar password
+ self.getunrarpw()
+
+ # Get package name and folder
+ package_name, folder_name = self.getPackageInfo()
+
+ #get the container definitions from script section
+ self.get_container_html()
+
+ # Extract package links
+ for type in self.sources:
+ links = self.handleLinkSource(type)
+
+ if links:
+ self.links.extend(links)
+ break
+
+ if self.links:
+ self.packages = [(package_name, self.links, folder_name)]
+
+
+ def isOnline(self):
+ if "<title>Linkcrypt.ws // Error 404</title>" in self.html:
+ self.logDebug("folder doesen't exist anymore")
+ return False
+ else:
+ return True
+
+
+ def isPasswordProtected(self):
+ if "Authorizing" in self.html:
+ self.logDebug("Links are password protected")
+ return True
+ else:
+ return False
+
+
+ def isCaptchaProtected(self):
+ if 'id="captcha">' in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ else:
+ return False
+
+
+ def isKeyCaptchaProtected(self):
+ if re.search(r'Key[ -]', self.html, re.I):
+ return True
+ else:
+ return False
+
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+
+ if password:
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ self.html = self.load(self.pyfile.url, post={"password": password, 'x': "0", 'y': "0"})
+ else:
+ self.fail(_("Folder is password protected"))
+
+
+ def unlockCaptchaProtection(self):
+ captcha_url = re.search(r'<form.*?id\s*?=\s*?"captcha"[^>]*?>.*?<\s*?input.*?src="([^"]*?)"', self.html, re.I | re.S).group(1)
+ captcha_code = self.decryptCaptcha(captcha_url, forceUser=True, imgtype="gif", result_type='positional')
+
+ self.html = self.load(self.pyfile.url, post={"x": captcha_code[0], "y": captcha_code[1]})
+
+
+ def getPackageInfo(self):
+ name = self.pyfile.package().name
+ folder = self.pyfile.package().folder
+
+ self.logDebug("Defaulting to pyfile name [%s] and folder [%s] for package" % (name, folder))
+
+ return name, folder
+
+
+ def getunrarpw(self):
+ sitein = self.html
+ indexi = sitein.find("|source|") + 8
+ indexe = sitein.find("|",indexi)
+
+ unrarpw = sitein[indexi:indexe]
+
+ if not (unrarpw == "Password" or "Dateipasswort") :
+ self.logDebug("File password set to: [%s]"% unrarpw)
+ self.pyfile.package().password = unrarpw
+
+
+ def handleErrors(self):
+ if self.isPasswordProtected():
+ self.fail(_("Incorrect password"))
+
+
+ def handleCaptchaErrors(self):
+ if self.captcha:
+ if "Your choice was wrong!" in self.html:
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+
+ def handleLinkSource(self, type):
+ if type == 'cnl':
+ return self.handleCNL2()
+
+ elif type == 'web':
+ return self.handleWebLinks()
+
+ elif type in ('rsdf', 'ccf', 'dlc'):
+ return self.handleContainer(type)
+
+ else:
+ self.fail(_("Unknown source type: %s") % type) #@TODO: Replace with self.error in 0.4.10
+
+
+ def handleWebLinks(self):
+ self.logDebug("Search for Web links ")
+
+ package_links = []
+ pattern = r'<form action="http://linkcrypt.ws/out.html"[^>]*?>.*?<input[^>]*?value="([^"]*?)"[^>]*?name="file"'
+ ids = re.findall(pattern, self.html, re.I | re.S)
+
+ self.logDebug("Decrypting %d Web links" % len(ids))
+
+ for idx, weblink_id in enumerate(ids):
+ try:
+ self.logDebug("Decrypting Web link %d, %s" % (idx + 1, weblink_id))
+
+ res = self.load("http://linkcrypt.ws/out.html", post = {'file':weblink_id})
+
+ indexs = res.find("window.location =") + 19
+ indexe = res.find('"', indexs)
+
+ link2 = res[indexs:indexe]
+
+ self.logDebug(link2)
+
+ link2 = html_unescape(link2)
+ package_links.append(link2)
+
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (weblink_id, detail))
+
+ return package_links
+
+
+ def get_container_html(self):
+ self.container_html = []
+
+ script = re.search(r'<div.*?id="ad_cont".*?<script.*?javascrip[^>]*?>(.*?)</script', self.html, re.I | re.S)
+
+ if script:
+ container_html_text = script.group(1)
+ container_html_text.strip()
+ self.container_html = container_html_text.splitlines()
+
+
+ def handle_javascript(self, line):
+ return self.js.eval(line.replace('{}))',"{}).replace('document.open();document.write','').replace(';document.close();',''))"))
+
+
+ def handleContainer(self, type):
+ package_links = []
+ type = type.lower()
+
+ self.logDebug('Search for %s Container links' % type.upper())
+
+ if not type.isalnum(): # check to prevent broken re-pattern (cnl2,rsdf,ccf,dlc,web are all alpha-numeric)
+ self.fail(_("Unknown container type: %s") % type) #@TODO: Replace with self.error in 0.4.10
+
+ for line in self.container_html:
+ if type in line:
+ jseval = self.handle_javascript(line)
+ clink = re.search(r'href=["\']([^"\']*?)["\']',jseval,re.I)
+
+ if not clink:
+ continue
+
+ self.logDebug("clink avaible")
+
+ package_name, folder_name = self.getPackageInfo()
+ self.logDebug("Added package with name %s.%s and container link %s" %( package_name, type, clink.group(1)))
+ self.core.api.uploadContainer( "%s.%s" %(package_name, type), self.load(clink.group(1)))
+ return "Found it"
+
+ return package_links
+
+
+ def handleCNL2(self):
+ self.logDebug("Search for CNL links")
+
+ package_links = []
+ cnl_line = None
+
+ for line in self.container_html:
+ if "cnl" in line:
+ cnl_line = line
+ break
+
+ if cnl_line:
+ self.logDebug("cnl_line gefunden")
+
+ try:
+ cnl_section = self.handle_javascript(cnl_line)
+ (vcrypted, vjk) = self._getCipherParams(cnl_section)
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.logError(_("Unable to decrypt CNL links (JS Error) try to get over links"))
+ return self.handleWebLinks()
+
+ return package_links
+
+
+ def _getCipherParams(self, cnl_section):
+ # Get jk
+ jk_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkCryptWs.JK_KEY
+ vjk = re.findall(jk_re, cnl_section)
+
+ # Get crypted
+ crypted_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkCryptWs.CRYPTED_KEY
+ vcrypted = re.findall(crypted_re, cnl_section)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ key = binascii.unhexlify(jreturn)
+
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+
+ return links
diff --git a/pyload/plugins/crypter/LinkSaveIn.py b/pyload/plugins/crypter/LinkSaveIn.py
new file mode 100644
index 000000000..2e0ac923e
--- /dev/null
+++ b/pyload/plugins/crypter/LinkSaveIn.py
@@ -0,0 +1,246 @@
+# -*- coding: utf-8 -*-
+#
+# * cnl2 and web links are skipped if JS is not available (instead of failing the package)
+# * only best available link source is used (priority: cnl2>rsdf>ccf>dlc>web
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+from pyload.utils import html_unescape
+
+
+class LinkSaveIn(SimpleCrypter):
+ __name__ = "LinkSaveIn"
+ __type__ = "crypter"
+ __version__ = "2.02"
+
+ __pattern__ = r'http://(?:www\.)?linksave\.in/(?P<id>\w+)$'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """LinkSave.in decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es")]
+
+
+ COOKIES = [("linksave.in", "Linksave_Language", "english")]
+
+ # Constants
+ _JK_KEY_ = "jk"
+ _CRYPTED_KEY_ = "crypted"
+
+
+ def setup(self):
+ self.fileid = None
+ self.captcha = False
+ self.package = None
+ self.preferred_sources = ["cnl2", "rsdf", "ccf", "dlc", "web"]
+
+
+ def decrypt(self, pyfile):
+ # Init
+ self.package = pyfile.package()
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+
+ # Request package
+ self.html = self.load(pyfile.url)
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ for type_ in self.preferred_sources:
+ package_links.extend(self.handleLinkSource(type_))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+
+
+ def isOnline(self):
+ if "<big>Error 404 - Folder not found!</big>" in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+
+ def isPasswordProtected(self):
+ if re.search(r'''<input.*?type="password"''', self.html):
+ self.logDebug("Links are password protected")
+ return True
+
+
+ def isCaptchaProtected(self):
+ if "<b>Captcha:</b>" in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ post = {"id": self.fileid, "besucherpasswort": password, 'login': 'submit'}
+ self.html = self.load(self.pyfile.url, post=post)
+
+
+ def unlockCaptchaProtection(self):
+ captcha_hash = re.search(r'name="hash" value="([^"]+)', self.html).group(1)
+ captcha_url = re.search(r'src=".(/captcha/cap.php\?hsh=[^"]+)', self.html).group(1)
+ captcha_code = self.decryptCaptcha("http://linksave.in" + captcha_url, forceUser=True)
+ self.html = self.load(self.pyfile.url, post={"id": self.fileid, "hash": captcha_hash, "code": captcha_code})
+
+
+ def getPackageInfo(self):
+ name = self.pyfile.package().name
+ folder = self.pyfile.package().folder
+ self.logDebug("Defaulting to pyfile name [%s] and folder [%s] for package" % (name, folder))
+ return name, folder
+
+
+ def handleErrors(self):
+ if "The visitorpassword you have entered is wrong" in self.html:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail(_("Incorrect password, please set right password on 'Edit package' form and retry"))
+
+ if self.captcha:
+ if "Wrong code. Please retry" in self.html:
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+
+ def handleLinkSource(self, type_):
+ if type_ == "cnl2":
+ return self.handleCNL2()
+ elif type_ in ("rsdf", "ccf", "dlc"):
+ return self.handleContainer(type_)
+ elif type_ == "web":
+ return self.handleWebLinks()
+ else:
+ self.error('Unknown source type "%s" (this is probably a bug)' % type_)
+
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Search for Web links")
+ if not self.js:
+ self.logDebug("No JS -> skip Web links")
+ else:
+ #@TODO: Gather paginated web links
+ pattern = r'<a href="http://linksave\.in/(\w{43})"'
+ ids = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(ids))
+ for i, weblink_id in enumerate(ids):
+ try:
+ webLink = "http://linksave.in/%s" % weblink_id
+
+ self.logDebug("Decrypting Web link %d, %s" % (i + 1, webLink))
+
+ fwLink = "http://linksave.in/fw-%s" % weblink_id
+ res = self.load(fwLink)
+
+ jscode = re.findall(r'<script type="text/javascript">(.*)</script>', res)[-1]
+ jseval = self.js.eval("document = { write: function(e) { return e; } }; %s" % jscode)
+ dlLink = re.search(r'http://linksave\.in/dl-\w+', jseval).group(0)
+ self.logDebug("JsEngine returns value [%s] for redirection link" % dlLink)
+
+ res = self.load(dlLink)
+ link = html_unescape(re.search(r'<iframe src="(.+?)"', res).group(1))
+
+ package_links.append(link)
+
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (webLink, detail))
+
+ return package_links
+
+
+ def handleContainer(self, type_):
+ package_links = []
+ type_ = type_.lower()
+ self.logDebug("Seach for %s Container links" % type_.upper())
+ if not type_.isalnum(): # check to prevent broken re-pattern (cnl2,rsdf,ccf,dlc,web are all alpha-numeric)
+ self.error('Unknown container type "%s" (this is probably a bug)' % type_)
+ pattern = r'\(\'%s_link\'\).href=unescape\(\'(.*?\.%s)\'\)' % (type_, type_)
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Found %d %s Container links" % (len(containersLinks), type_.upper()))
+ for containerLink in containersLinks:
+ link = "http://linksave.in/%s" % html_unescape(containerLink)
+ package_links.append(link)
+ return package_links
+
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Search for CNL2 links")
+ if not self.js:
+ self.logDebug("No JS -> skip CNL2 links")
+ elif 'cnl2_load' in self.html:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail(_("Unable to decrypt CNL2 links"))
+ return package_links
+
+
+ def _getCipherParams(self):
+ # Get jk
+ jk_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._JK_KEY_
+ vjk = re.findall(jk_re, self.html)
+
+ # Get crypted
+ crypted_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._CRYPTED_KEY_
+ vcrypted = re.findall(crypted_re, self.html)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/LinkdecrypterCom.py b/pyload/plugins/crypter/LinkdecrypterCom.py
new file mode 100644
index 000000000..57045c9d0
--- /dev/null
+++ b/pyload/plugins/crypter/LinkdecrypterCom.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class LinkdecrypterCom(Crypter):
+ __name__ = "LinkdecrypterCom"
+ __type__ = "crypter"
+ __version__ = "0.27"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Linkdecrypter.com"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("flowlee", None)]
+
+
+ TEXTAREA_PATTERN = r'<textarea name="links" wrap="off" readonly="1" class="caja_des">(.+)</textarea>'
+ PASSWORD_PATTERN = r'<input type="text" name="password"'
+ CAPTCHA_PATTERN = r'<img class="captcha" src="(.+?)"(.*?)>'
+ REDIR_PATTERN = r'<i>(Click <a href="./">here</a> if your browser does not redirect you).</i>'
+
+
+ def decrypt(self, pyfile):
+ self.passwords = self.getPassword().splitlines()
+
+ # API not working anymore
+ self.urls = self.decryptHTML()
+
+
+ def decryptAPI(self):
+ get_dict = {"t": "link", "url": self.pyfile.url, "lcache": "1"}
+ self.html = self.load('http://linkdecrypter.com/api', get=get_dict)
+ if self.html.startswith('http://'):
+ return self.html.splitlines()
+
+ if self.html == 'INTERRUPTION(PASSWORD)':
+ for get_dict['pass'] in self.passwords:
+ self.html = self.load('http://linkdecrypter.com/api', get=get_dict)
+ if self.html.startswith('http://'):
+ return self.html.splitlines()
+
+ self.logError("API", self.html)
+ if self.html == 'INTERRUPTION(PASSWORD)':
+ self.fail(_("No or incorrect password"))
+
+ return None
+
+
+ def decryptHTML(self):
+ retries = 5
+
+ post_dict = {"link_cache": "on", "pro_links": self.pyfile.url, "modo_links": "text"}
+ self.html = self.load('http://linkdecrypter.com/', post=post_dict, cookies=True, decode=True)
+
+ while self.passwords or retries:
+ m = re.search(self.TEXTAREA_PATTERN, self.html, flags=re.S)
+ if m:
+ return [x for x in m.group(1).splitlines() if '[LINK-ERROR]' not in x]
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_url = 'http://linkdecrypter.com/' + m.group(1)
+ result_type = "positional" if "getPos" in m.group(2) else "textual"
+
+ m = re.search(r"<p><i><b>([^<]+)</b></i></p>", self.html)
+ msg = m.group(1) if m else ""
+ self.logInfo(_("Captcha protected link"), result_type, msg)
+
+ captcha = self.decryptCaptcha(captcha_url, result_type=result_type)
+ if result_type == "positional":
+ captcha = "%d|%d" % captcha
+ self.html = self.load('http://linkdecrypter.com/', post={"captcha": captcha}, decode=True)
+ retries -= 1
+
+ elif self.PASSWORD_PATTERN in self.html:
+ if self.passwords:
+ password = self.passwords.pop(0)
+ self.logInfo(_("Password protected link, trying ") + password)
+ self.html = self.load('http://linkdecrypter.com/', post={'password': password}, decode=True)
+ else:
+ self.fail(_("No or incorrect password"))
+
+ else:
+ retries -= 1
+ self.html = self.load('http://linkdecrypter.com/', cookies=True, decode=True)
+
+ return None
diff --git a/pyload/plugins/crypter/LixIn.py b/pyload/plugins/crypter/LixIn.py
new file mode 100644
index 000000000..2394a17b8
--- /dev/null
+++ b/pyload/plugins/crypter/LixIn.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class LixIn(Crypter):
+ __name__ = "LixIn"
+ __type__ = "crypter"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(?:www\.)?lix\.in/(?P<ID>.+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Lix.in decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org")]
+
+
+ CAPTCHA_PATTERN = r'<img src="(?P<image>captcha_img\.php\?.*?)"'
+ SUBMIT_PATTERN = r'value=\'continue.*?\''
+ LINK_PATTERN = r'name="ifram" src="(?P<link>.*?)"'
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+
+ m = re.match(self.__pattern__, url)
+ if m is None:
+ self.error(_("Unable to identify file ID"))
+
+ id = m.group("ID")
+ self.logDebug("File id is %s" % id)
+
+ self.html = self.load(url, decode=True)
+
+ m = re.search(self.SUBMIT_PATTERN, self.html)
+ if m is None:
+ self.error(_("Link doesn't seem valid"))
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ for _i in xrange(5):
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ self.logDebug("Trying captcha")
+ captcharesult = self.decryptCaptcha("http://lix.in/" + m.group("image"))
+ self.html = self.load(url, decode=True,
+ post={"capt": captcharesult, "submit": "submit", "tiny": id})
+ else:
+ self.logDebug("No captcha/captcha solved")
+ else:
+ self.html = self.load(url, decode=True, post={"submit": "submit", "tiny": id})
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Unable to find destination url"))
+ else:
+ self.urls = [m.group("link")]
+ self.logDebug("Found link %s, adding to package" % self.urls[0])
diff --git a/pyload/plugins/crypter/LofCc.py b/pyload/plugins/crypter/LofCc.py
new file mode 100644
index 000000000..955ae56d7
--- /dev/null
+++ b/pyload/plugins/crypter/LofCc.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class LofCc(DeadCrypter):
+ __name__ = "LofCc"
+ __type__ = "crypter"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?lof\.cc/(.*)'
+ __config__ = []
+
+ __description__ = """Lof.cc decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
diff --git a/pyload/plugins/crypter/MBLinkInfo.py b/pyload/plugins/crypter/MBLinkInfo.py
new file mode 100644
index 000000000..98d4c09e6
--- /dev/null
+++ b/pyload/plugins/crypter/MBLinkInfo.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class MBLinkInfo(DeadCrypter):
+ __name__ = "MBLinkInfo"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?mblink\.info/?\?id=(\d+)'
+ __config__ = []
+
+ __description__ = """MBLink.info decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Gummibaer", "Gummibaer@wiki-bierkiste.de"),
+ ("stickell", "l.stickell@yahoo.it")]
diff --git a/pyload/plugins/crypter/MediafireCom.py b/pyload/plugins/crypter/MediafireCom.py
new file mode 100644
index 000000000..392f59c52
--- /dev/null
+++ b/pyload/plugins/crypter/MediafireCom.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.plugins.hoster.MediafireCom import checkHTMLHeader
+from pyload.utils import json_loads
+
+
+class MediafireCom(Crypter):
+ __name__ = "MediafireCom"
+ __type__ = "crypter"
+ __version__ = "0.14"
+
+ __pattern__ = r'http://(?:www\.)?mediafire\.com/(folder/|\?sharekey=|\?\w{13}($|[/#]))'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Mediafire.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ FOLDER_KEY_PATTERN = r'var afI= \'(\w+)'
+ LINK_PATTERN = r'<meta property="og:url" content="http://www\.mediafire\.com/\?(\w+)"/>'
+
+
+ def decrypt(self, pyfile):
+ url, result = checkHTMLHeader(pyfile.url)
+ self.logDebug("Location (%d): %s" % (result, url))
+
+ if result == 0:
+ # load and parse html
+ html = self.load(pyfile.url)
+ m = re.search(self.LINK_PATTERN, html)
+ if m:
+ # file page
+ self.urls.append("http://www.mediafire.com/file/%s" % m.group(1))
+ else:
+ # folder page
+ m = re.search(self.FOLDER_KEY_PATTERN, html)
+ if m:
+ folder_key = m.group(1)
+ self.logDebug("FOLDER KEY: %s" % folder_key)
+
+ json_resp = json_loads(self.load(
+ "http://www.mediafire.com/api/folder/get_info.php?folder_key=%s&response_format=json&version=1" % folder_key))
+ #self.logInfo(json_resp)
+ if json_resp['response']['result'] == "Success":
+ for link in json_resp['response']['folder_info']['files']:
+ self.urls.append("http://www.mediafire.com/file/%s" % link['quickkey'])
+ else:
+ self.fail(json_resp['response']['message'])
+ elif result == 1:
+ self.offline()
+ else:
+ self.urls.append(url)
diff --git a/pyload/plugins/crypter/MegaRapidCz.py b/pyload/plugins/crypter/MegaRapidCz.py
new file mode 100644
index 000000000..da797965e
--- /dev/null
+++ b/pyload/plugins/crypter/MegaRapidCz.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class MegaRapidCz(SimpleCrypter):
+ __name__ = "MegaRapidCz"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?(share|mega)rapid\.cz/slozka/\d+/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Share-Rapid.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ LINK_PATTERN = r'<td class="soubor"[^>]*><a href="([^"]+)">'
diff --git a/pyload/plugins/crypter/MegauploadCom.py b/pyload/plugins/crypter/MegauploadCom.py
new file mode 100644
index 000000000..57c08bd53
--- /dev/null
+++ b/pyload/plugins/crypter/MegauploadCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class MegauploadCom(DeadCrypter):
+ __name__ = "MegauploadCom"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?megaupload\.com/(\?f|xml/folderfiles\.php\?.*&?folderid)=\w+'
+
+ __description__ = """Megaupload.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
diff --git a/pyload/plugins/crypter/Movie2kTo.py b/pyload/plugins/crypter/Movie2kTo.py
new file mode 100644
index 000000000..bb00e2eed
--- /dev/null
+++ b/pyload/plugins/crypter/Movie2kTo.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class Movie2kTo(DeadCrypter):
+ __name__ = "Movie2kTo"
+ __type__ = "crypter"
+ __version__ = "0.51"
+
+ __pattern__ = r'http://(?:www\.)?movie2k\.to/(.*)\.html'
+ __config__ = []
+
+ __description__ = """Movie2k.to decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("4Christopher", "4Christopher@gmx.de")]
diff --git a/pyload/plugins/crypter/MultiUpOrg.py b/pyload/plugins/crypter/MultiUpOrg.py
new file mode 100644
index 000000000..ba5549683
--- /dev/null
+++ b/pyload/plugins/crypter/MultiUpOrg.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+import re
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class MultiUpOrg(SimpleCrypter):
+ __name__ = "MultiUpOrg"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?multiup\.org/(en|fr)/(?P<TYPE>project|download|miror)/\w+(/\w+)?'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """MultiUp.org decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<title>.*(?:Project|Projet|ownload|élécharger) (?P<N>.+?) (\(|- )'
+
+
+ def getLinks(self):
+ m_type = re.match(self.__pattern__, self.pyfile.url).group("TYPE")
+
+ if m_type == "project":
+ pattern = r'\n(http://www\.multiup\.org/(?:en|fr)/download/.*)'
+ else:
+ pattern = r'style="width:97%;text-align:left".*\n.*href="(.*)"'
+ if m_type == "download":
+ dl_pattern = r'href="(.*)">.*\n.*<h5>DOWNLOAD</h5>'
+ miror_page = urljoin("http://www.multiup.org", re.search(dl_pattern, self.html).group(1))
+ self.html = self.load(miror_page)
+
+ return re.findall(pattern, self.html)
diff --git a/pyload/plugins/crypter/MultiloadCz.py b/pyload/plugins/crypter/MultiloadCz.py
new file mode 100644
index 000000000..02b60876e
--- /dev/null
+++ b/pyload/plugins/crypter/MultiloadCz.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class MultiloadCz(Crypter):
+ __name__ = "MultiloadCz"
+ __type__ = "crypter"
+ __version__ = "0.4"
+
+ __pattern__ = r'http://(?:[^/]*\.)?multiload\.cz/(stahnout|slozka)/.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True),
+ ("usedHoster", "str", "Prefered hoster list (bar-separated)", ""),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated)", "")]
+
+ __description__ = """Multiload.cz decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ FOLDER_PATTERN = r'<form action="" method="get"><textarea[^>]*>([^>]*)</textarea></form>'
+ LINK_PATTERN = r'<p class="manager-server"><strong>([^<]+)</strong></p><p class="manager-linky"><a href="([^"]+)">'
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.match(self.__pattern__, pyfile.url).group(1) == "slozka":
+ m = re.search(self.FOLDER_PATTERN, self.html)
+ if m:
+ self.urls.extend(m.group(1).split())
+ else:
+ m = re.findall(self.LINK_PATTERN, self.html)
+ if m:
+ prefered_set = set(self.getConfig("usedHoster").split('|'))
+ self.urls.extend([x[1] for x in m if x[0] in prefered_set])
+
+ if not self.urls:
+ ignored_set = set(self.getConfig("ignoredHoster").split('|'))
+ self.urls.extend([x[1] for x in m if x[0] not in ignored_set])
diff --git a/pyload/plugins/crypter/MultiuploadCom.py b/pyload/plugins/crypter/MultiuploadCom.py
new file mode 100644
index 000000000..713d67c18
--- /dev/null
+++ b/pyload/plugins/crypter/MultiuploadCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class MultiuploadCom(DeadCrypter):
+ __name__ = "MultiuploadCom"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?multiupload\.(com|nl)/\w+'
+
+ __description__ = """MultiUpload.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
diff --git a/pyload/plugins/crypter/NCryptIn.py b/pyload/plugins/crypter/NCryptIn.py
new file mode 100644
index 000000000..edb664d32
--- /dev/null
+++ b/pyload/plugins/crypter/NCryptIn.py
@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+class NCryptIn(Crypter):
+ __name__ = "NCryptIn"
+ __type__ = "crypter"
+ __version__ = "1.33"
+
+ __pattern__ = r'http://(?:www\.)?ncrypt\.in/(?P<type>folder|link|frame)-([^/\?]+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """NCrypt.in decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ JK_KEY = "jk"
+ CRYPTED_KEY = "crypted"
+
+ NAME_PATTERN = r'<meta name="description" content="(?P<N>[^"]+)"'
+
+
+ def setup(self):
+ self.package = None
+ self.cleanedHtml = None
+ self.links_source_order = ["cnl2", "rsdf", "ccf", "dlc", "web"]
+ self.protection_type = None
+
+
+ def decrypt(self, pyfile):
+ # Init
+ self.package = pyfile.package()
+ package_links = []
+ package_name = self.package.name
+ folder_name = self.package.folder
+
+ # Deal with single links
+ if self.isSingleLink():
+ package_links.extend(self.handleSingleLink())
+
+ # Deal with folders
+ else:
+
+ # Request folder home
+ self.html = self.requestFolderHome()
+ self.cleanedHtml = self.removeHtmlCrap(self.html)
+ if not self.isOnline():
+ self.offline()
+
+ # Check for folder protection
+ if self.isProtected():
+ self.html = self.unlockProtection()
+ self.cleanedHtml = self.removeHtmlCrap(self.html)
+ self.handleErrors()
+
+ # Prepare package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ for link_source_type in self.links_source_order:
+ package_links.extend(self.handleLinkSource(link_source_type))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack and return links
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+
+
+ def isSingleLink(self):
+ link_type = re.match(self.__pattern__, self.pyfile.url).group('type')
+ return link_type in ("link", "frame")
+
+
+ def requestFolderHome(self):
+ return self.load(self.pyfile.url, decode=True)
+
+
+ def removeHtmlCrap(self, content):
+ patterns = (r'(type="hidden".*?(name=".*?")?.*?value=".*?")',
+ r'display:none;">(.*?)</(div|span)>',
+ r'<div\s+class="jdownloader"(.*?)</div>',
+ r'<table class="global">(.*?)</table>',
+ r'<iframe\s+style="display:none(.*?)</iframe>')
+ for pattern in patterns:
+ rexpr = re.compile(pattern, re.S)
+ content = re.sub(rexpr, "", content)
+ return content
+
+
+ def isOnline(self):
+ if "Your folder does not exist" in self.cleanedHtml:
+ self.logDebug("File not m")
+ return False
+ return True
+
+
+ def isProtected(self):
+ form = re.search(r'<form.*?name.*?protected.*?>(.*?)</form>', self.cleanedHtml, re.S)
+ if form is not None:
+ content = form.group(1)
+ for keyword in ("password", "captcha"):
+ if keyword in content:
+ self.protection_type = keyword
+ self.logDebug("Links are %s protected" % self.protection_type)
+ return True
+ return False
+
+
+ def getPackageInfo(self):
+ m = re.search(self.NAME_PATTERN, self.html)
+ if m:
+ name = folder = m.group('N').strip()
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+ else:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not m, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+ return name, folder
+
+
+ def unlockProtection(self):
+ postData = {}
+
+ form = re.search(r'<form name="protected"(.*?)</form>', self.cleanedHtml, re.S).group(1)
+
+ # Submit package password
+ if "password" in form:
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ postData['password'] = password
+
+ # Resolve anicaptcha
+ if "anicaptcha" in form:
+ self.logDebug("Captcha protected")
+ captchaUri = re.search(r'src="(/temp/anicaptcha/[^"]+)', form).group(1)
+ captcha = self.decryptCaptcha("http://ncrypt.in" + captchaUri)
+ self.logDebug("Captcha resolved [%s]" % captcha)
+ postData['captcha'] = captcha
+
+ # Resolve recaptcha
+ if "recaptcha" in form:
+ self.logDebug("ReCaptcha protected")
+ captcha_key = re.search(r'\?k=(.*?)"', form).group(1)
+ self.logDebug("Resolving ReCaptcha with key [%s]" % captcha_key)
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(captcha_key)
+ postData['recaptcha_challenge_field'] = challenge
+ postData['recaptcha_response_field'] = code
+
+ # Resolve circlecaptcha
+ if "circlecaptcha" in form:
+ self.logDebug("CircleCaptcha protected")
+ captcha_img_url = "http://ncrypt.in/classes/captcha/circlecaptcha.php"
+ coords = self.decryptCaptcha(captcha_img_url, forceUser=True, imgtype="png", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+ postData['circle.x'] = coords[0]
+ postData['circle.y'] = coords[1]
+
+ # Unlock protection
+ postData['submit_protected'] = 'Continue to folder'
+ return self.load(self.pyfile.url, post=postData, decode=True)
+
+
+ def handleErrors(self):
+ if self.protection_type == "password":
+ if "This password is invalid!" in self.cleanedHtml:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail(_("Incorrect password, please set right password on 'Edit package' form and retry"))
+
+ if self.protection_type == "captcha":
+ if "The securitycheck was wrong!" in self.cleanedHtml:
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+
+ def handleLinkSource(self, link_source_type):
+ # Check for JS engine
+ require_js_engine = link_source_type in ("cnl2", "rsdf", "ccf", "dlc")
+ if require_js_engine and not self.js:
+ self.logDebug("No JS engine available, skip %s links" % link_source_type)
+ return []
+
+ # Select suitable handler
+ if link_source_type == 'single':
+ return self.handleSingleLink()
+ if link_source_type == 'cnl2':
+ return self.handleCNL2()
+ elif link_source_type in ("rsdf", "ccf", "dlc"):
+ return self.handleContainer(link_source_type)
+ elif link_source_type == "web":
+ return self.handleWebLinks()
+ else:
+ self.error('Unknown source type "%s" (this is probably a bug)' % link_source_type)
+
+
+ def handleSingleLink(self):
+ self.logDebug("Handling Single link")
+ package_links = []
+
+ # Decrypt single link
+ decrypted_link = self.decryptLink(self.pyfile.url)
+ if decrypted_link:
+ package_links.append(decrypted_link)
+
+ return package_links
+
+
+ def handleCNL2(self):
+ self.logDebug("Handling CNL2 links")
+ package_links = []
+
+ if 'cnl2_output' in self.cleanedHtml:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail(_("Unable to decrypt CNL2 links"))
+
+ return package_links
+
+
+ def handleContainers(self):
+ self.logDebug("Handling Container links")
+ package_links = []
+
+ pattern = r'/container/(rsdf|dlc|ccf)/(\w+)'
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Container links" % len(containersLinks))
+ for containerLink in containersLinks:
+ link = "http://ncrypt.in/container/%s/%s.%s" % (containerLink[0], containerLink[1], containerLink[0])
+ package_links.append(link)
+
+ return package_links
+
+
+ def handleWebLinks(self):
+ self.logDebug("Handling Web links")
+ pattern = r'(http://ncrypt\.in/link-.*?=)'
+ links = re.findall(pattern, self.html)
+
+ package_links = []
+ self.logDebug("Decrypting %d Web links" % len(links))
+ for i, link in enumerate(links):
+ self.logDebug("Decrypting Web link %d, %s" % (i + 1, link))
+ decrypted_link = self.decrypt(link)
+ if decrypted_link:
+ package_links.append(decrypted_link)
+
+ return package_links
+
+
+ def decryptLink(self, link):
+ try:
+ url = link.replace("link-", "frame-")
+ link = self.load(url, just_header=True)['location']
+ return link
+ except Exception, detail:
+ self.logDebug("Error decrypting link %s, %s" % (link, detail))
+
+
+ def _getCipherParams(self):
+ pattern = r'<input.*?name="%s".*?value="(.*?)"'
+
+ # Get jk
+ jk_re = pattern % NCryptIn.JK_KEY
+ vjk = re.findall(jk_re, self.html)
+
+ # Get crypted
+ crypted_re = pattern % NCryptIn.CRYPTED_KEY
+ vcrypted = re.findall(crypted_re, self.html)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Block has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/NetfolderIn.py b/pyload/plugins/crypter/NetfolderIn.py
new file mode 100644
index 000000000..bba72c047
--- /dev/null
+++ b/pyload/plugins/crypter/NetfolderIn.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class NetfolderIn(SimpleCrypter):
+ __name__ = "NetfolderIn"
+ __type__ = "crypter"
+ __version__ = "0.72"
+
+ __pattern__ = r'http://(?:www\.)?netfolder\.in/((?P<id1>\w+)/\w+|folder\.php\?folder_id=(?P<id2>\w+))'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """NetFolder.in decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("fragonib", "fragonib[AT]yahoo[DOT]es")]
+
+
+ NAME_PATTERN = r'<div class="Text">Inhalt des Ordners <span.*>(?P<N>.+)</span></div>'
+
+
+ def prepare(self):
+ super(NetfolderIn, self).prepare()
+
+ # Check for password protection
+ if self.isPasswordProtected():
+ self.html = self.submitPassword()
+ if not self.html:
+ self.fail(_("Incorrect password, please set right password on Add package form and retry"))
+
+
+ def isPasswordProtected(self):
+ if '<input type="password" name="password"' in self.html:
+ self.logDebug("Links are password protected")
+ return True
+ return False
+
+
+ def submitPassword(self):
+ # Gather data
+ try:
+ m = re.match(self.__pattern__, self.pyfile.url)
+ id = max(m.group('id1'), m.group('id2'))
+ except AttributeError:
+ self.logDebug("Unable to get package id from url [%s]" % self.pyfile.url)
+ return
+ url = "http://netfolder.in/folder.php?folder_id=" + id
+ password = self.getPassword()
+
+ # Submit package password
+ post = {'password': password, 'save': 'Absenden'}
+ self.logDebug("Submitting password [%s] for protected links with id [%s]" % (password, id))
+ html = self.load(url, {}, post)
+
+ # Check for invalid password
+ if '<div class="InPage_Error">' in html:
+ self.logDebug("Incorrect password, please set right password on Edit package form and retry")
+ return None
+
+ return html
+
+
+ def getLinks(self):
+ links = re.search(r'name="list" value="(.*?)"', self.html).group(1).split(",")
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/NosvideoCom.py b/pyload/plugins/crypter/NosvideoCom.py
new file mode 100644
index 000000000..5bd3b16a0
--- /dev/null
+++ b/pyload/plugins/crypter/NosvideoCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class NosvideoCom(SimpleCrypter):
+ __name__ = "NosvideoCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?nosvideo\.com/\?v=\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Nosvideo.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("igel", "igelkun@myopera.com")]
+
+
+ LINK_PATTERN = r'href="(http://(?:w{3}\.)?nosupload\.com/\?d=\w+)"'
+ NAME_PATTERN = r'<[tT]itle>Watch (?P<N>.+?)<'
diff --git a/pyload/plugins/crypter/OneKhDe.py b/pyload/plugins/crypter/OneKhDe.py
new file mode 100644
index 000000000..ff15b0baa
--- /dev/null
+++ b/pyload/plugins/crypter/OneKhDe.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import html_unescape
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class OneKhDe(Crypter):
+ __name__ = "OneKhDe"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?1kh\.de/f/'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """1kh.de decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org")]
+
+
+ def __init__(self, parent):
+ Crypter.__init__(self, parent)
+ self.parent = parent
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ return True
+
+
+ def proceed(self, url, location):
+ url = self.parent.url
+ self.html = self.load(url)
+ link_ids = re.findall(r"<a id=\"DownloadLink_(\d*)\" href=\"http://1kh.de/", self.html)
+ for id in link_ids:
+ new_link = html_unescape(re.search("width=\"100%\" src=\"(.*)\"></iframe>", self.load("http://1kh.de/l/" + id)).group(1))
+ self.urls.append(new_link)
diff --git a/pyload/plugins/crypter/OronCom.py b/pyload/plugins/crypter/OronCom.py
new file mode 100644
index 000000000..c92666a5b
--- /dev/null
+++ b/pyload/plugins/crypter/OronCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class OronCom(DeadCrypter):
+ __name__ = "OronCom"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?oron\.com/folder/\w+'
+ __config__ = []
+
+ __description__ = """Oron.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("DHMH", "webmaster@pcProfil.de")]
diff --git a/pyload/plugins/crypter/PastebinCom.py b/pyload/plugins/crypter/PastebinCom.py
new file mode 100644
index 000000000..e47698752
--- /dev/null
+++ b/pyload/plugins/crypter/PastebinCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class PastebinCom(SimpleCrypter):
+ __name__ = "PastebinCom"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?pastebin\.com/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Pastebin.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<div class="de\d+">(https?://[^ <]+)(?:[^<]*)</div>'
+ NAME_PATTERN = r'<div class="paste_box_line1" title="(?P<N>[^"]+)">'
diff --git a/pyload/plugins/crypter/QuickshareCz.py b/pyload/plugins/crypter/QuickshareCz.py
new file mode 100644
index 000000000..156c5feeb
--- /dev/null
+++ b/pyload/plugins/crypter/QuickshareCz.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class QuickshareCz(Crypter):
+ __name__ = "QuickshareCz"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?quickshare\.cz/slozka-\d+.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Quickshare.cz folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ FOLDER_PATTERN = r'<textarea[^>]*>(.*?)</textarea>'
+ LINK_PATTERN = r'(http://www\.quickshare\.cz/\S+)'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ m = re.search(self.FOLDER_PATTERN, html, re.S)
+ if m is None:
+ self.error(_("FOLDER_PATTERN not found"))
+ self.urls.extend(re.findall(self.LINK_PATTERN, m.group(1)))
diff --git a/pyload/plugins/crypter/RSLayerCom.py b/pyload/plugins/crypter/RSLayerCom.py
new file mode 100644
index 000000000..70c04a10c
--- /dev/null
+++ b/pyload/plugins/crypter/RSLayerCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class RSLayerCom(DeadCrypter):
+ __name__ = "RSLayerCom"
+ __type__ = "crypter"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?rs-layer\.com/directory-'
+ __config__ = []
+
+ __description__ = """RS-Layer.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("hzpz", None)]
diff --git a/pyload/plugins/crypter/RapidfileshareNet.py b/pyload/plugins/crypter/RapidfileshareNet.py
new file mode 100644
index 000000000..bc318a01e
--- /dev/null
+++ b/pyload/plugins/crypter/RapidfileshareNet.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSCrypter import XFSCrypter
+
+
+class RapidfileshareNet(XFSCrypter):
+ __name__ = "RapidfileshareNet"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?rapidfileshare\.net/users/\w+/\d+/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Rapidfileshare.net folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "rapidfileshare.net"
diff --git a/pyload/plugins/crypter/RelinkUs.py b/pyload/plugins/crypter/RelinkUs.py
new file mode 100644
index 000000000..c27d790b8
--- /dev/null
+++ b/pyload/plugins/crypter/RelinkUs.py
@@ -0,0 +1,282 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+import os
+
+from Crypto.Cipher import AES
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class RelinkUs(Crypter):
+ __name__ = "RelinkUs"
+ __type__ = "crypter"
+ __version__ = "3.1"
+
+ __pattern__ = r'http://(?:www\.)?relink\.us/(f/|((view|go)\.php\?id=))(?P<id>.+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Relink.us decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
+ ("AndroKev", "neureither.kevin@gmail.com")]
+
+
+ PREFERRED_LINK_SOURCES = ["cnl2", "dlc", "web"]
+
+ OFFLINE_TOKEN = r'<title>Tattooside'
+
+ PASSWORD_TOKEN = r'container_password.php'
+ PASSWORD_ERROR_ROKEN = r'You have entered an incorrect password'
+ PASSWORD_SUBMIT_URL = r'http://www.relink.us/container_password.php'
+
+ CAPTCHA_TOKEN = r'container_captcha.php'
+ CAPTCHA_ERROR_ROKEN = r'You have solved the captcha wrong'
+ CAPTCHA_IMG_URL = r'http://www.relink.us/core/captcha/circlecaptcha.php'
+ CAPTCHA_SUBMIT_URL = r'http://www.relink.us/container_captcha.php'
+
+ FILE_TITLE_REGEX = r'<th>Title</th><td>(.*)</td></tr>'
+ FILE_NOTITLE = r'No title'
+
+ CNL2_FORM_REGEX = r'<form id="cnl_form-(.*?)</form>'
+ CNL2_FORMINPUT_REGEX = r'<input.*?name="%s".*?value="(.*?)"'
+ CNL2_JK_KEY = "jk"
+ CNL2_CRYPTED_KEY = "crypted"
+
+ DLC_LINK_REGEX = r'<a href=".*?" class="dlc_button" target="_blank">'
+ DLC_DOWNLOAD_URL = r'http://www.relink.us/download.php'
+
+ WEB_FORWARD_REGEX = r'getFile\(\'(?P<link>.+)\'\)'
+ WEB_FORWARD_URL = r'http://www.relink.us/frame.php'
+ WEB_LINK_REGEX = r'<iframe name="Container" height="100%" frameborder="no" width="100%" src="(?P<link>.+)"></iframe>'
+
+
+ def setup(self):
+ self.fileid = None
+ self.package = None
+ self.password = None
+ self.captcha = False
+
+
+ def decrypt(self, pyfile):
+ # Init
+ self.initPackage(pyfile)
+
+ # Request package
+ self.requestPackage()
+
+ # Check for online
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ for sources in self.PREFERRED_LINK_SOURCES:
+ package_links.extend(self.handleLinkSource(sources))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+
+
+ def initPackage(self, pyfile):
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+ self.package = pyfile.package()
+ self.password = self.getPassword()
+
+
+ def requestPackage(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+
+ def isOnline(self):
+ if self.OFFLINE_TOKEN in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+
+ def isPasswordProtected(self):
+ if self.PASSWORD_TOKEN in self.html:
+ self.logDebug("Links are password protected")
+ return True
+
+
+ def isCaptchaProtected(self):
+ if self.CAPTCHA_TOKEN in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+
+ def unlockPasswordProtection(self):
+ self.logDebug("Submitting password [%s] for protected links" % self.password)
+ passwd_url = self.PASSWORD_SUBMIT_URL + "?id=%s" % self.fileid
+ passwd_data = {'id': self.fileid, 'password': self.password, 'pw': 'submit'}
+ self.html = self.load(passwd_url, post=passwd_data, decode=True)
+
+
+ def unlockCaptchaProtection(self):
+ self.logDebug("Request user positional captcha resolving")
+ captcha_img_url = self.CAPTCHA_IMG_URL + "?id=%s" % self.fileid
+ coords = self.decryptCaptcha(captcha_img_url, forceUser=True, imgtype="png", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+ captcha_post_url = self.CAPTCHA_SUBMIT_URL + "?id=%s" % self.fileid
+ captcha_post_data = {'button.x': coords[0], 'button.y': coords[1], 'captcha': 'submit'}
+ self.html = self.load(captcha_post_url, post=captcha_post_data, decode=True)
+
+
+ def getPackageInfo(self):
+ name = folder = None
+
+ # Try to get info from web
+ m = re.search(self.FILE_TITLE_REGEX, self.html)
+ if m is not None:
+ title = m.group(1).strip()
+ if not self.FILE_NOTITLE in title:
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+
+ # Fallback to defaults
+ if not name or not folder:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+
+ # Return package info
+ return name, folder
+
+
+ def handleErrors(self):
+ if self.PASSWORD_ERROR_ROKEN in self.html:
+ msg = "Incorrect password, please set right password on 'Edit package' form and retry"
+ self.logDebug(msg)
+ self.fail(_(msg))
+
+ if self.captcha:
+ if self.CAPTCHA_ERROR_ROKEN in self.html:
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+
+ def handleLinkSource(self, source):
+ if source == 'cnl2':
+ return self.handleCNL2Links()
+ elif source == 'dlc':
+ return self.handleDLCLinks()
+ elif source == 'web':
+ return self.handleWEBLinks()
+ else:
+ self.error('Unknown source type "%s" (this is probably a bug)' % source)
+
+
+ def handleCNL2Links(self):
+ self.logDebug("Search for CNL2 links")
+ package_links = []
+ m = re.search(self.CNL2_FORM_REGEX, self.html, re.S)
+ if m is not None:
+ cnl2_form = m.group(1)
+ try:
+ (vcrypted, vjk) = self._getCipherParams(cnl2_form)
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.logDebug("Unable to decrypt CNL2 links")
+ return package_links
+
+
+ def handleDLCLinks(self):
+ self.logDebug("Search for DLC links")
+ package_links = []
+ m = re.search(self.DLC_LINK_REGEX, self.html)
+ if m is not None:
+ container_url = self.DLC_DOWNLOAD_URL + "?id=%s&dlc=1" % self.fileid
+ self.logDebug("Downloading DLC container link [%s]" % container_url)
+ try:
+ dlc = self.load(container_url)
+ dlc_filename = self.fileid + ".dlc"
+ dlc_filepath = os.path.join(self.config['general']['download_folder'], dlc_filename)
+ with open(dlc_filepath, "wb") as f:
+ f.write(dlc)
+ package_links.append(dlc_filepath)
+ except:
+ self.fail("Unable to download DLC container")
+ return package_links
+
+
+ def handleWEBLinks(self):
+ self.logDebug("Search for WEB links")
+ package_links = []
+ fw_params = re.findall(self.WEB_FORWARD_REGEX, self.html)
+ self.logDebug("Decrypting %d Web links" % len(fw_params))
+ for index, fw_param in enumerate(fw_params):
+ try:
+ fw_url = self.WEB_FORWARD_URL + "?%s" % fw_param
+ self.logDebug("Decrypting Web link %d, %s" % (index + 1, fw_url))
+ fw_response = self.load(fw_url, decode=True)
+ dl_link = re.search(self.WEB_LINK_REGEX, fw_response).group('link')
+ package_links.append(dl_link)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (index, detail))
+ self.setWait(4)
+ self.wait()
+ return package_links
+
+
+ def _getCipherParams(self, cnl2_form):
+ # Get jk
+ jk_re = self.CNL2_FORMINPUT_REGEX % self.CNL2_JK_KEY
+ vjk = re.findall(jk_re, cnl2_form, re.I)
+
+ # Get crypted
+ crypted_re = self.CNL2_FORMINPUT_REGEX % RelinkUs.CNL2_CRYPTED_KEY
+ vcrypted = re.findall(crypted_re, cnl2_form, re.I)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/SafelinkingNet.py b/pyload/plugins/crypter/SafelinkingNet.py
new file mode 100644
index 000000000..38eb36ea5
--- /dev/null
+++ b/pyload/plugins/crypter/SafelinkingNet.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+
+from BeautifulSoup import BeautifulSoup
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.plugins.internal.CaptchaService import SolveMedia
+
+
+class SafelinkingNet(Crypter):
+ __name__ = "SafelinkingNet"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'https?://(?:www\.)?safelinking\.net/([pd])/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Safelinking.net decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("quareevo", "quareevo@arcor.de")]
+
+
+ SOLVEMEDIA_PATTERN = "solvemediaApiKey = '([\w.-]+)';"
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+
+ if re.match(self.__pattern__, url).group(1) == "d":
+
+ header = self.load(url, just_header=True)
+ if 'location' in header:
+ self.urls = [header['location']]
+ else:
+ self.error(_("Couldn't find forwarded Link"))
+
+ else:
+ postData = {"post-protect": "1"}
+
+ if "link-password" in self.html:
+ postData['link-password'] = self.getPassword()
+
+ if "altcaptcha" in self.html:
+ for _i in xrange(5):
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captchaKey = m.group(1)
+ captcha = SolveMedia(self)
+ captchaProvider = "Solvemedia"
+ else:
+ self.fail(_("Error parsing captcha"))
+
+ challenge, response = captcha.challenge(captchaKey)
+ postData['adcopy_challenge'] = challenge
+ postData['adcopy_response'] = response
+
+ self.html = self.load(url, post=postData)
+ if "The password you entered was incorrect" in self.html:
+ self.fail(_("Incorrect Password"))
+ if not "The CAPTCHA code you entered was wrong" in self.html:
+ break
+
+ pyfile.package().password = ""
+ soup = BeautifulSoup(self.html)
+ scripts = soup.findAll("script")
+ for s in scripts:
+ if "d_links" in s.text:
+ break
+ m = re.search('d_links":(\[.*?\])', s.text)
+ if m:
+ linkDict = json_loads(m.group(1))
+ for link in linkDict:
+ if not "http://" in link['full']:
+ self.urls.append("https://safelinking.net/d/" + link['full'])
+ else:
+ self.urls.append(link['full'])
diff --git a/pyload/plugins/crypter/SecuredIn.py b/pyload/plugins/crypter/SecuredIn.py
new file mode 100644
index 000000000..21ebff060
--- /dev/null
+++ b/pyload/plugins/crypter/SecuredIn.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class SecuredIn(DeadCrypter):
+ __name__ = "SecuredIn"
+ __type__ = "crypter"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?secured\.in/download-[\d]+-\w{8}\.html'
+ __config__ = []
+
+ __description__ = """Secured.in decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
diff --git a/pyload/plugins/crypter/SexuriaCom.py b/pyload/plugins/crypter/SexuriaCom.py
new file mode 100644
index 000000000..dd9b01bcd
--- /dev/null
+++ b/pyload/plugins/crypter/SexuriaCom.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class SexuriaCom(Crypter):
+ __name__ = "SexuriaCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?sexuria\.com/(v1/)?(Pornos_Kostenlos_.+?_(\d+)\.html|dl_links_\d+_\d+\.html|id=\d+\&part=\d+\&link=\d+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Sexuria.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("NETHead", "NETHead.AT.gmx.DOT.net")]
+
+
+ PATTERN_SUPPORTED_MAIN = re.compile(r'http://(www\.)?sexuria\.com/(v1/)?Pornos_Kostenlos_.+?_(\d+)\.html', flags=re.I)
+ PATTERN_SUPPORTED_CRYPT = re.compile(r'http://(www\.)?sexuria\.com/(v1/)?dl_links_\d+_(?P<ID>\d+)\.html', flags=re.I)
+ PATTERN_SUPPORTED_REDIRECT = re.compile(r'http://(www\.)?sexuria\.com/out\.php\?id=(?P<ID>\d+)\&part=\d+\&link=\d+', flags=re.I)
+ PATTERN_TITLE = re.compile(r'<title> - (?P<TITLE>.*) Sexuria - Kostenlose Pornos - Rapidshare XXX Porn</title>', flags=re.I)
+ PATTERN_PASSWORD = re.compile(r'<strong>Passwort: </strong></div></td>.*?bgcolor="#EFEFEF">(?P<PWD>.*?)</td>', flags=re.I | re.S)
+ PATTERN_DL_LINK_PAGE = re.compile(r'"(dl_links_\d+_\d+\.html)"', flags=re.I)
+ PATTERN_REDIRECT_LINKS = re.compile(r'value="(http://sexuria\.com/out\.php\?id=\d+\&part=\d+\&link=\d+)" readonly', flags=re.I)
+
+
+ def decrypt(self, pyfile):
+ # Init
+ self.pyfile = pyfile
+ self.package = pyfile.package()
+
+ # Get package links
+ package_name, self.links, folder_name, package_pwd = self.decryptLinks(self.pyfile.url)
+ self.packages = [(package_name, self.links, folder_name)]
+
+
+ def decryptLinks(self, url):
+ linklist = []
+ name = self.package.name
+ folder = self.package.folder
+ password = None
+
+ if re.match(self.PATTERN_SUPPORTED_MAIN, url):
+ # Processing main page
+ html = self.load(url)
+ links = re.findall(self.PATTERN_DL_LINK_PAGE, html)
+ for link in links:
+ linklist.append("http://sexuria.com/v1/" + link)
+
+ elif re.match(self.PATTERN_SUPPORTED_REDIRECT, url):
+ # Processing direct redirect link (out.php), redirecting to main page
+ id = re.search(self.PATTERN_SUPPORTED_REDIRECT, url).group('ID')
+ if id:
+ linklist.append("http://sexuria.com/v1/Pornos_Kostenlos_liebe_%s.html" % id)
+
+ elif re.match(self.PATTERN_SUPPORTED_CRYPT, url):
+ # Extract info from main file
+ id = re.search(self.PATTERN_SUPPORTED_CRYPT, url).group('ID')
+ html = self.load("http://sexuria.com/v1/Pornos_Kostenlos_info_%s.html" % id, decode=True)
+
+ title = re.search(self.PATTERN_TITLE, html).group('TITLE').strip()
+ if title:
+ name = folder = title
+ self.logDebug("Package info found, name [%s] and folder [%s]" % (name, folder))
+
+ pwd = re.search(self.PATTERN_PASSWORD, html).group('PWD')
+ if pwd:
+ password = pwd.strip()
+ self.logDebug("Password info [%s] found" % password)
+
+ # Process link (dl_link)
+ html = self.load(url)
+ links = re.findall(self.PATTERN_REDIRECT_LINKS, html)
+ if len(links) == 0:
+ self.LogError("Broken for link %s" % link)
+ else:
+ for link in links:
+ link = link.replace("http://sexuria.com/", "http://www.sexuria.com/")
+ finallink = self.load(link, just_header=True)['location']
+ if not finallink or "sexuria.com/" in finallink:
+ self.LogError("Broken for link %s" % link)
+ else:
+ linklist.append(finallink)
+
+ # Debug log
+ self.logDebug("%d supported links" % len(linklist))
+ for i, link in enumerate(linklist):
+ self.logDebug("Supported link %d, %s" % (i + 1, link))
+
+ return name, linklist, folder, password
diff --git a/pyload/plugins/crypter/ShareLinksBiz.py b/pyload/plugins/crypter/ShareLinksBiz.py
new file mode 100644
index 000000000..7aa5dd17b
--- /dev/null
+++ b/pyload/plugins/crypter/ShareLinksBiz.py
@@ -0,0 +1,286 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class ShareLinksBiz(Crypter):
+ __name__ = "ShareLinksBiz"
+ __type__ = "crypter"
+ __version__ = "1.14"
+
+ __pattern__ = r'http://(?:www\.)?(share-links|s2l)\.biz/(?P<ID>_?\w+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Share-Links.biz decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es")]
+
+
+ def setup(self):
+ self.baseUrl = None
+ self.fileId = None
+ self.package = None
+ self.captcha = False
+
+
+ def decrypt(self, pyfile):
+ # Init
+ self.initFile(pyfile)
+
+ # Request package
+ url = self.baseUrl + '/' + self.fileId
+ self.html = self.load(url, decode=True)
+
+ # Unblock server (load all images)
+ self.unblockServer()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Extract package links
+ package_links = []
+ package_links.extend(self.handleWebLinks())
+ package_links.extend(self.handleContainers())
+ package_links.extend(self.handleCNL2())
+ package_links = set(package_links)
+
+ # Get package info
+ package_name, package_folder = self.getPackageInfo()
+
+ # Pack
+ self.packages = [(package_name, package_links, package_folder)]
+
+
+ def initFile(self, pyfile):
+ url = pyfile.url
+ if 's2l.biz' in url:
+ url = self.load(url, just_header=True)['location']
+ self.baseUrl = "http://www.%s.biz" % re.match(self.__pattern__, url).group(1)
+ self.fileId = re.match(self.__pattern__, url).group('ID')
+ self.package = pyfile.package()
+
+
+ def isOnline(self):
+ if "No usable content was found" in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+
+ def isPasswordProtected(self):
+ if re.search(r'''<form.*?id="passwordForm".*?>''', self.html):
+ self.logDebug("Links are protected")
+ return True
+ return False
+
+
+ def isCaptchaProtected(self):
+ if '<map id="captchamap"' in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+
+ def unblockServer(self):
+ imgs = re.findall(r"(/template/images/.*?\.gif)", self.html)
+ for img in imgs:
+ self.load(self.baseUrl + img)
+
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ post = {"password": password, 'login': 'Submit form'}
+ url = self.baseUrl + '/' + self.fileId
+ self.html = self.load(url, post=post, decode=True)
+
+
+ def unlockCaptchaProtection(self):
+ # Get captcha map
+ captchaMap = self._getCaptchaMap()
+ self.logDebug("Captcha map with [%d] positions" % len(captchaMap.keys()))
+
+ # Request user for captcha coords
+ m = re.search(r'<img src="/captcha.gif\?d=(.*?)&amp;PHPSESSID=(.*?)&amp;legend=1"', self.html)
+ captchaUrl = self.baseUrl + '/captcha.gif?d=%s&PHPSESSID=%s' % (m.group(1), m.group(2))
+ self.logDebug("Waiting user for correct position")
+ coords = self.decryptCaptcha(captchaUrl, forceUser=True, imgtype="gif", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+
+ # Resolve captcha
+ href = self._resolveCoords(coords, captchaMap)
+ if href is None:
+ self.invalidCaptcha()
+ self.retry(wait_time=5)
+ url = self.baseUrl + href
+ self.html = self.load(url, decode=True)
+
+
+ def _getCaptchaMap(self):
+ mapp = {}
+ for m in re.finditer(r'<area shape="rect" coords="(.*?)" href="(.*?)"', self.html):
+ rect = eval('(' + m.group(1) + ')')
+ href = m.group(2)
+ mapp[rect] = href
+ return mapp
+
+
+ def _resolveCoords(self, coords, captchaMap):
+ x, y = coords
+ for rect, href in captchaMap.iteritems():
+ x1, y1, x2, y2 = rect
+ if (x >= x1 and x <= x2) and (y >= y1 and y <= y2):
+ return href
+
+
+ def handleErrors(self):
+ if "The inserted password was wrong" in self.html:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail(_("Incorrect password, please set right password on 'Edit package' form and retry"))
+
+ if self.captcha:
+ if "Your choice was wrong" in self.html:
+ self.invalidCaptcha()
+ self.retry(wait_time=5)
+ else:
+ self.correctCaptcha()
+
+
+ def getPackageInfo(self):
+ name = folder = None
+
+ # Extract from web package header
+ title_re = r'<h2><img.*?/>(.*)</h2>'
+ m = re.search(title_re, self.html, re.S)
+ if m is not None:
+ title = m.group(1).strip()
+ if 'unnamed' not in title:
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+
+ # Fallback to defaults
+ if not name or not folder:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+
+ # Return package info
+ return name, folder
+
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Handling Web links")
+
+ #@TODO: Gather paginated web links
+ pattern = r'javascript:_get\(\'(.*?)\', \d+, \'\'\)'
+ ids = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(ids))
+ for i, ID in enumerate(ids):
+ try:
+ self.logDebug("Decrypting Web link %d, [%s]" % (i + 1, ID))
+
+ dwLink = self.baseUrl + "/get/lnk/" + ID
+ res = self.load(dwLink)
+
+ code = re.search(r'frm/(\d+)', res).group(1)
+ fwLink = self.baseUrl + "/get/frm/" + code
+ res = self.load(fwLink)
+
+ jscode = re.search(r'<script language="javascript">\s*eval\((.*)\)\s*</script>', res, re.S).group(1)
+ jscode = self.js.eval("f = %s" % jscode)
+ jslauncher = "window=''; parent={frames:{Main:{location:{href:''}}},location:''}; %s; parent.frames.Main.location.href"
+
+ dlLink = self.js.eval(jslauncher % jscode)
+
+ self.logDebug("JsEngine returns value [%s] for redirection link" % dlLink)
+
+ package_links.append(dlLink)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link [%s], %s" % (ID, detail))
+ return package_links
+
+
+ def handleContainers(self):
+ package_links = []
+ self.logDebug("Handling Container links")
+
+ pattern = r'javascript:_get\(\'(.*?)\', 0, \'(rsdf|ccf|dlc)\'\)'
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Container links" % len(containersLinks))
+ for containerLink in containersLinks:
+ link = "%s/get/%s/%s" % (self.baseUrl, containerLink[1], containerLink[0])
+ package_links.append(link)
+ return package_links
+
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Handling CNL2 links")
+
+ if '/lib/cnl2/ClicknLoad.swf' in self.html:
+ try:
+ (crypted, jk) = self._getCipherParams()
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail(_("Unable to decrypt CNL2 links"))
+ return package_links
+
+
+ def _getCipherParams(self):
+ # Request CNL2
+ code = re.search(r'ClicknLoad.swf\?code=(.*?)"', self.html).group(1)
+ url = "%s/get/cnl2/%s" % (self.baseUrl, code)
+ res = self.load(url)
+ params = res.split(";;")
+
+ # Get jk
+ strlist = list(base64.standard_b64decode(params[1]))
+ strlist.reverse()
+ jk = ''.join(strlist)
+
+ # Get crypted
+ strlist = list(base64.standard_b64decode(params[2]))
+ strlist.reverse()
+ crypted = ''.join(strlist)
+
+ # Log and return
+ return crypted, jk
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Block has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/SharingmatrixCom.py b/pyload/plugins/crypter/SharingmatrixCom.py
new file mode 100644
index 000000000..28906ae7b
--- /dev/null
+++ b/pyload/plugins/crypter/SharingmatrixCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class SharingmatrixCom(DeadCrypter):
+ __name__ = "SharingmatrixCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?sharingmatrix\.com/folder/\w+'
+
+ __description__ = """Sharingmatrix.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
diff --git a/pyload/plugins/crypter/SpeedLoadOrg.py b/pyload/plugins/crypter/SpeedLoadOrg.py
new file mode 100644
index 000000000..46c9b8552
--- /dev/null
+++ b/pyload/plugins/crypter/SpeedLoadOrg.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class SpeedLoadOrg(DeadCrypter):
+ __name__ = "SpeedLoadOrg"
+ __type__ = "crypter"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?speedload\.org/(\d+~f$|folder/\d+/)'
+ __config__ = []
+
+ __description__ = """Speedload decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
diff --git a/pyload/plugins/crypter/StealthTo.py b/pyload/plugins/crypter/StealthTo.py
new file mode 100644
index 000000000..6177a116a
--- /dev/null
+++ b/pyload/plugins/crypter/StealthTo.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class StealthTo(DeadCrypter):
+ __name__ = "StealthTo"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?stealth\.to/folder/.+'
+ __config__ = []
+
+ __description__ = """Stealth.to decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org")]
diff --git a/pyload/plugins/crypter/TnyCz.py b/pyload/plugins/crypter/TnyCz.py
new file mode 100644
index 000000000..b2a777da2
--- /dev/null
+++ b/pyload/plugins/crypter/TnyCz.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+import re
+
+
+class TnyCz(SimpleCrypter):
+ __name__ = "TnyCz"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?tny\.cz/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Tny.cz decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<title>(?P<N>.+) - .+</title>'
+
+
+ def getLinks(self):
+ m = re.search(r'<a id=\'save_paste\' href="(.+save\.php\?hash=.+)">', self.html)
+ return re.findall(".+", self.load(m.group(1), decode=True)) if m else None
diff --git a/pyload/plugins/crypter/TrailerzoneInfo.py b/pyload/plugins/crypter/TrailerzoneInfo.py
new file mode 100644
index 000000000..deee3e23b
--- /dev/null
+++ b/pyload/plugins/crypter/TrailerzoneInfo.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class TrailerzoneInfo(DeadCrypter):
+ __name__ = "TrailerzoneInfo"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?trailerzone\.info/.*?'
+ __config__ = []
+
+ __description__ = """TrailerZone.info decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com")]
diff --git a/pyload/plugins/crypter/TurbobitNet.py b/pyload/plugins/crypter/TurbobitNet.py
new file mode 100644
index 000000000..e038b9a34
--- /dev/null
+++ b/pyload/plugins/crypter/TurbobitNet.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+from pyload.utils import json_loads
+
+
+class TurbobitNet(SimpleCrypter):
+ __name__ = "TurbobitNet"
+ __type__ = "crypter"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?turbobit\.net/download/folder/(?P<ID>\w+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Turbobit.net folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'src=\'/js/lib/grid/icon/folder.png\'> <span>(?P<N>.+?)</span>'
+
+
+ def _getLinks(self, id, page=1):
+ gridFile = self.load("http://turbobit.net/downloadfolder/gridFile",
+ get={"rootId": id, "rows": 200, "page": page}, decode=True)
+ grid = json_loads(gridFile)
+
+ if grid['rows']:
+ for i in grid['rows']:
+ yield i['id']
+ for id in self._getLinks(id, page + 1):
+ yield id
+ else:
+ return
+
+
+ def getLinks(self):
+ id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ fixurl = lambda id: "http://turbobit.net/%s.html" % id
+ return map(fixurl, self._getLinks(id))
diff --git a/pyload/plugins/crypter/TusfilesNet.py b/pyload/plugins/crypter/TusfilesNet.py
new file mode 100644
index 000000000..d8f0b604a
--- /dev/null
+++ b/pyload/plugins/crypter/TusfilesNet.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import math
+import re
+from urlparse import urljoin
+
+from pyload.plugins.internal.XFSCrypter import XFSCrypter
+
+
+class TusfilesNet(XFSCrypter):
+ __name__ = "TusfilesNet"
+ __type__ = "crypter"
+ __version__ = "0.06"
+
+ __pattern__ = r'https?://(?:www\.)?tusfiles\.net/go/(?P<ID>\w+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Tusfiles.net folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "tusfiles.net"
+
+ PAGES_PATTERN = r'>\((\d+) \w+\)<'
+
+ URL_REPLACEMENTS = [(__pattern__, r'https://www.tusfiles.net/go/\g<ID>/')]
+
+
+ def loadPage(self, page_n):
+ return self.load(urljoin(self.pyfile.url, str(page_n)), decode=True)
+
+
+ def handleMultiPages(self):
+ pages = re.search(self.PAGES_PATTERN, self.html)
+ if pages:
+ pages = int(math.ceil(int(pages.group('pages')) / 25.0))
+ else:
+ return
+
+ for p in xrange(2, pages + 1):
+ self.html = self.loadPage(p)
+ self.links += self.getLinks()
diff --git a/pyload/plugins/crypter/UlozTo.py b/pyload/plugins/crypter/UlozTo.py
new file mode 100644
index 000000000..8538736ca
--- /dev/null
+++ b/pyload/plugins/crypter/UlozTo.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class UlozTo(Crypter):
+ __name__ = "UlozTo"
+ __type__ = "crypter"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj\.cz|zachowajto\.pl)/(m|soubory)/.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Uloz.to folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ FOLDER_PATTERN = r'<ul class="profile_files">(.*?)</ul>'
+ LINK_PATTERN = r'<br /><a href="/([^"]+)">[^<]+</a>'
+ NEXT_PAGE_PATTERN = r'<a class="next " href="/([^"]+)">&nbsp;</a>'
+
+
+ def decrypt(self, pyfile):
+ html = self.load(pyfile.url)
+
+ new_links = []
+ for i in xrange(1, 100):
+ self.logInfo(_("Fetching links from page %i") % i)
+ m = re.search(self.FOLDER_PATTERN, html, re.S)
+ if m is None:
+ self.error(_("FOLDER_PATTERN not found"))
+
+ new_links.extend(re.findall(self.LINK_PATTERN, m.group(1)))
+ m = re.search(self.NEXT_PAGE_PATTERN, html)
+ if m:
+ html = self.load("http://ulozto.net/" + m.group(1))
+ else:
+ break
+ else:
+ self.logInfo(_("Limit of 99 pages reached, aborting"))
+
+ if new_links:
+ self.urls = [map(lambda s: "http://ulozto.net/%s" % s, new_links)]
diff --git a/pyload/plugins/crypter/UploadableCh.py b/pyload/plugins/crypter/UploadableCh.py
new file mode 100644
index 000000000..4edc08846
--- /dev/null
+++ b/pyload/plugins/crypter/UploadableCh.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class UploadableCh(SimpleCrypter):
+ __name__ = "UploadableCh"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?uploadable\.ch/list/\w+'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Uploadable.ch folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ LINK_PATTERN = r'"(.+?)" class="icon_zipfile">'
+ NAME_PATTERN = r'<div class="folder"><span>&nbsp;</span>(?P<N>.+?)</div>'
+ OFFLINE_PATTERN = r'We are sorry... The URL you entered cannot be found on the server.'
+ TEMP_OFFLINE_PATTERN = r'<div class="icon_err">'
diff --git a/pyload/plugins/crypter/UploadedTo.py b/pyload/plugins/crypter/UploadedTo.py
new file mode 100644
index 000000000..1fbed5f86
--- /dev/null
+++ b/pyload/plugins/crypter/UploadedTo.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class UploadedTo(SimpleCrypter):
+ __name__ = "UploadedTo"
+ __type__ = "crypter"
+ __version__ = "0.42"
+
+ __pattern__ = r'http://(?:www\.)?(uploaded|ul)\.(to|net)/(f|folder|list)/(?P<id>\w+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """UploadedTo decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ PLAIN_PATTERN = r'<small class="date"><a href="(?P<plain>[\w/]+)" onclick='
+ NAME_PATTERN = r'<title>(?P<N>.+?)<'
+
+
+ def getLinks(self):
+ m = re.search(self.PLAIN_PATTERN, self.html)
+ if m is None:
+ self.error(_("PLAIN_PATTERN not found"))
+
+ plain_link = urljoin("http://uploaded.net/", m.group('plain'))
+ return self.load(plain_link).split('\n')[:-1]
diff --git a/pyload/plugins/crypter/WiiReloadedOrg.py b/pyload/plugins/crypter/WiiReloadedOrg.py
new file mode 100644
index 000000000..3c48044b0
--- /dev/null
+++ b/pyload/plugins/crypter/WiiReloadedOrg.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class WiiReloadedOrg(DeadCrypter):
+ __name__ = "WiiReloadedOrg"
+ __type__ = "crypter"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?wii-reloaded\.org/protect/get\.php\?i=.+'
+ __config__ = []
+
+ __description__ = """Wii-Reloaded.org decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("hzpz", None)]
diff --git a/pyload/plugins/crypter/WuploadCom.py b/pyload/plugins/crypter/WuploadCom.py
new file mode 100644
index 000000000..dd1c4a168
--- /dev/null
+++ b/pyload/plugins/crypter/WuploadCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadCrypter import DeadCrypter
+
+
+class WuploadCom(DeadCrypter):
+ __name__ = "WuploadCom"
+ __type__ = "crypter"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?wupload\.com/folder/\w+'
+
+ __description__ = """Wupload.com folder decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
diff --git a/pyload/plugins/crypter/XFileSharingPro.py b/pyload/plugins/crypter/XFileSharingPro.py
new file mode 100644
index 000000000..27de7f997
--- /dev/null
+++ b/pyload/plugins/crypter/XFileSharingPro.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.XFSCrypter import XFSCrypter
+
+
+class XFileSharingPro(XFSCrypter):
+ __name__ = "XFileSharingPro"
+ __type__ = "crypter"
+ __version__ = "0.03"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """XFileSharingPro dummy folder decrypter plugin for hook"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ def _log(self, type, args):
+ msg = " | ".join([str(a).strip() for a in args if a])
+ logger = getattr(self.log, type)
+ logger("%s: %s: %s" % (self.__name__, self.HOSTER_NAME, msg or _("%s MARK" % type.upper())))
+
+
+ def init(self):
+ super(XFileSharingPro, self).init()
+
+ self.__pattern__ = self.core.pluginManager.crypterPlugins[self.__name__]['pattern']
+
+ self.HOSTER_DOMAIN = re.match(self.__pattern__, self.pyfile.url).group(1).lower()
+ self.HOSTER_NAME = "".join([str.capitalize() for str in self.HOSTER_DOMAIN.split('.')])
+
+ account = self.core.accountManager.getAccountPlugin(self.HOSTER_NAME)
+
+ if account and account.canUse():
+ self.account = account
+ elif self.account:
+ self.account.HOSTER_DOMAIN = self.HOSTER_DOMAIN
+ else:
+ return
+
+ self.user, data = self.account.selectAccount()
+ self.req = self.account.getAccountRequest(self.user)
+ self.premium = self.account.isPremium(self.user)
diff --git a/pyload/plugins/crypter/XupPl.py b/pyload/plugins/crypter/XupPl.py
new file mode 100644
index 000000000..7be72cf9e
--- /dev/null
+++ b/pyload/plugins/crypter/XupPl.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Crypter import Crypter
+
+
+class XupPl(Crypter):
+ __name__ = "XupPl"
+ __type__ = "crypter"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?xup\.pl/.*'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Xup.pl decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("z00nx", "z00nx0@gmail.com")]
+
+
+ def decrypt(self, pyfile):
+ header = self.load(pyfile.url, just_header=True)
+ if 'location' in header:
+ self.urls = [header['location']]
+ else:
+ self.fail(_("Unable to find link"))
diff --git a/pyload/plugins/crypter/YoutubeBatch.py b/pyload/plugins/crypter/YoutubeBatch.py
new file mode 100644
index 000000000..d25f991e8
--- /dev/null
+++ b/pyload/plugins/crypter/YoutubeBatch.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.utils import safe_join
+
+
+class YoutubeBatch(Crypter):
+ __name__ = "YoutubeBatch"
+ __type__ = "crypter"
+ __version__ = "1.01"
+
+ __pattern__ = r'https?://(?:www\.|m\.)?youtube\.com/(?P<TYPE>user|playlist|view_play_list)(/|.*?[?&](?:list|p)=)(?P<ID>[\w-]+)'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True),
+ ("likes", "bool", "Grab user (channel) liked videos", False),
+ ("favorites", "bool", "Grab user (channel) favorite videos", False),
+ ("uploads", "bool", "Grab channel unplaylisted videos", True)]
+
+ __description__ = """Youtube.com channel & playlist decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ API_KEY = "AIzaSyCKnWLNlkX-L4oD1aEzqqhRw1zczeD6_k0"
+
+
+ def api_response(self, ref, req):
+ req.update({"key": self.API_KEY})
+ url = urljoin("https://www.googleapis.com/youtube/v3/", ref)
+ page = self.load(url, get=req)
+ return json_loads(page)
+
+
+ def getChannel(self, user):
+ channels = self.api_response("channels", {"part": "id,snippet,contentDetails", "forUsername": user, "maxResults": "50"})
+ if channels['items']:
+ channel = channels['items'][0]
+ return {"id": channel['id'],
+ "title": channel['snippet']['title'],
+ "relatedPlaylists": channel['contentDetails']['relatedPlaylists'],
+ "user": user} # One lone channel for user?
+
+
+ def getPlaylist(self, p_id):
+ playlists = self.api_response("playlists", {"part": "snippet", "id": p_id})
+ if playlists['items']:
+ playlist = playlists['items'][0]
+ return {"id": p_id,
+ "title": playlist['snippet']['title'],
+ "channelId": playlist['snippet']['channelId'],
+ "channelTitle": playlist['snippet']['channelTitle']}
+
+
+ def _getPlaylists(self, id, token=None):
+ req = {"part": "id", "maxResults": "50", "channelId": id}
+ if token:
+ req.update({"pageToken": token})
+
+ playlists = self.api_response("playlists", req)
+
+ for playlist in playlists['items']:
+ yield playlist['id']
+
+ if "nextPageToken" in playlists:
+ for item in self._getPlaylists(id, playlists['nextPageToken']):
+ yield item
+
+
+ def getPlaylists(self, ch_id):
+ return map(self.getPlaylist, self._getPlaylists(ch_id))
+
+
+ def _getVideosId(self, id, token=None):
+ req = {"part": "contentDetails", "maxResults": "50", "playlistId": id}
+ if token:
+ req.update({"pageToken": token})
+
+ playlist = self.api_response("playlistItems", req)
+
+ for item in playlist['items']:
+ yield item['contentDetails']['videoId']
+
+ if "nextPageToken" in playlist:
+ for item in self._getVideosId(id, playlist['nextPageToken']):
+ yield item
+
+
+ def getVideosId(self, p_id):
+ return list(self._getVideosId(p_id))
+
+
+ def decrypt(self, pyfile):
+ m = re.match(self.__pattern__, pyfile.url)
+ m_id = m.group("ID")
+ m_type = m.group("TYPE")
+
+ if m_type == "user":
+ self.logDebug("Url recognized as Channel")
+ user = m_id
+ channel = self.getChannel(user)
+
+ if channel:
+ playlists = self.getPlaylists(channel['id'])
+ self.logDebug("%s playlist\s found on channel \"%s\"" % (len(playlists), channel['title']))
+
+ relatedplaylist = {p_name: self.getPlaylist(p_id) for p_name, p_id in channel['relatedPlaylists'].iteritems()}
+ self.logDebug("Channel's related playlists found = %s" % relatedplaylist.keys())
+
+ relatedplaylist['uploads']['title'] = "Unplaylisted videos"
+ relatedplaylist['uploads']['checkDups'] = True #: checkDups flag
+
+ for p_name, p_data in relatedplaylist.iteritems():
+ if self.getConfig(p_name):
+ p_data['title'] += " of " + user
+ playlists.append(p_data)
+ else:
+ playlists = []
+ else:
+ self.logDebug("Url recognized as Playlist")
+ playlists = [self.getPlaylist(m_id)]
+
+ if not playlists:
+ self.fail(_("No playlist available"))
+
+ addedvideos = []
+ urlize = lambda x: "https://www.youtube.com/watch?v=" + x
+ for p in playlists:
+ p_name = p['title']
+ p_videos = self.getVideosId(p['id'])
+ p_folder = safe_join(self.config['general']['download_folder'], p['channelTitle'], p_name)
+ self.logDebug("%s video\s found on playlist \"%s\"" % (len(p_videos), p_name))
+
+ if not p_videos:
+ continue
+ elif "checkDups" in p:
+ p_urls = [urlize(v_id) for v_id in p_videos if v_id not in addedvideos]
+ self.logDebug("%s video\s available on playlist \"%s\" after duplicates cleanup" % (len(p_urls), p_name))
+ else:
+ p_urls = map(urlize, p_videos)
+
+ self.packages.append((p_name, p_urls, p_folder)) #: folder is NOT recognized by pyload 0.4.9!
+
+ addedvideos.extend(p_videos)
diff --git a/pyload/plugins/crypter/__init__.py b/pyload/plugins/crypter/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/crypter/__init__.py
diff --git a/pyload/plugins/hook/AlldebridCom.py b/pyload/plugins/hook/AlldebridCom.py
new file mode 100644
index 000000000..9ed80f101
--- /dev/null
+++ b/pyload/plugins/hook/AlldebridCom.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class AlldebridCom(MultiHoster):
+ __name__ = "AlldebridCom"
+ __type__ = "hook"
+ __version__ = "0.13"
+
+ __config__ = [("https", "bool", "Enable HTTPS", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Alldebrid.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Andy Voigt", "spamsales@online.de")]
+
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://www.alldebrid.com/api.php?action=get_host").replace("\"", "").strip()
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/hook/BypassCaptcha.py b/pyload/plugins/hook/BypassCaptcha.py
new file mode 100644
index 000000000..112066353
--- /dev/null
+++ b/pyload/plugins/hook/BypassCaptcha.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.internal.Addon import Hook
+
+
+class BypassCaptchaException(Exception):
+
+ def __init__(self, err):
+ self.err = err
+
+
+ def getCode(self):
+ return self.err
+
+
+ def __str__(self):
+ return "<BypassCaptchaException %s>" % self.err
+
+
+ def __repr__(self):
+ return "<BypassCaptchaException %s>" % self.err
+
+
+class BypassCaptcha(Hook):
+ __name__ = "BypassCaptcha"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("force", "bool", "Force BC even if client is connected", False),
+ ("passkey", "password", "Passkey", "")]
+
+ __description__ = """Send captchas to BypassCaptcha.com"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("Godofdream", "soilfcition@gmail.com"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ PYLOAD_KEY = "4f771155b640970d5607f919a615bdefc67e7d32"
+
+ SUBMIT_URL = "http://bypasscaptcha.com/upload.php"
+ RESPOND_URL = "http://bypasscaptcha.com/check_value.php"
+ GETCREDITS_URL = "http://bypasscaptcha.com/ex_left.php"
+
+
+ def getCredits(self):
+ res = getURL(self.GETCREDITS_URL, post={"key": self.getConfig("passkey")})
+
+ data = dict([x.split(' ', 1) for x in res.splitlines()])
+ return int(data['Left'])
+
+
+ def submit(self, captcha, captchaType="file", match=None):
+ req = getRequest()
+
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ res = req.load(self.SUBMIT_URL,
+ post={'vendor_key': self.PYLOAD_KEY,
+ 'key': self.getConfig("passkey"),
+ 'gen_task_id': "1",
+ 'file': (FORM_FILE, captcha)},
+ multipart=True)
+ finally:
+ req.close()
+
+ data = dict([x.split(' ', 1) for x in res.splitlines()])
+ if not data or "Value" not in data:
+ raise BypassCaptchaException(res)
+
+ result = data['Value']
+ ticket = data['TaskId']
+ self.logDebug("Result %s : %s" % (ticket, result))
+
+ return ticket, result
+
+
+ def respond(self, ticket, success):
+ try:
+ res = getURL(self.RESPOND_URL, post={"task_id": ticket, "key": self.getConfig("passkey"),
+ "cv": 1 if success else 0})
+ except BadHeader, e:
+ self.logError(_("Could not send response"), e)
+
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ self.processCaptcha(task)
+ else:
+ self.logInfo(_("Your %s account has not enough credits") % self.__name__)
+
+
+ def captchaCorrect(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ self.respond(task.data['ticket'], True)
+
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ self.respond(task.data['ticket'], False)
+
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except BypassCaptchaException, e:
+ task.error = e.getCode()
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hook/Captcha9kw.py b/pyload/plugins/hook/Captcha9kw.py
new file mode 100644
index 000000000..fa4710542
--- /dev/null
+++ b/pyload/plugins/hook/Captcha9kw.py
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from base64 import b64encode
+from time import sleep
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getURL
+
+from pyload.plugins.internal.Addon import Hook
+
+
+class Captcha9kw(Hook):
+ __name__ = "Captcha9kw"
+ __type__ = "hook"
+ __version__ = "0.24"
+
+ __config__ = [("ssl", "bool", "Use HTTPS", True),
+ ("force", "bool", "Force captcha resolving even if client is connected", True),
+ ("confirm", "bool", "Confirm Captcha (cost +6 credits)", False),
+ ("captchaperhour", "int", "Captcha per hour", "9999"),
+ ("prio", "int", "Priority (max 10)(cost +0 -> +10 credits)", "0"),
+ ("queue", "int", "Max. Queue (max 999)", "50"),
+ ("hoster_options", "string", "Hoster options (format: pluginname:prio=1:selfsolfe=1:confirm=1:timeout=900|...)", "ShareonlineBiz:prio=0:timeout=999 | UploadedTo:prio=0:timeout=999"),
+ ("selfsolve", "bool", "Selfsolve (manually solve your captcha in your 9kw client if active)", "0"),
+ ("passkey", "password", "API key", ""),
+ ("timeout", "int", "Timeout in seconds (min 60, max 3999)", "900")]
+
+ __description__ = """Send captchas to 9kw.eu"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ API_URL = "http://www.9kw.eu/index.cgi"
+
+
+ def coreReady(self):
+ if self.getConfig("ssl"):
+ self.API_URL = self.API_URL.replace("http://", "https://")
+
+
+ def getCredits(self):
+ res = getURL(self.API_URL,
+ get={'apikey': self.getConfig("passkey"),
+ 'pyload': "1",
+ 'source': "pyload",
+ 'action': "usercaptchaguthaben"})
+
+ if res.isdigit():
+ self.logInfo(_("%s credits left") % res)
+ credits = self.info["credits"] = int(res)
+ return credits
+ else:
+ self.logError(res)
+ return 0
+
+
+ def _processCaptcha(self, task):
+ try:
+ with open(task.captchaFile, 'rb') as f:
+ data = f.read()
+
+ except IOError, e:
+ self.logError(e)
+ return
+
+ data = b64encode(data)
+ mouse = 1 if task.isPositional() else 0
+ pluginname = re.search(r'_([^_]*)_\d+.\w+', task.captchaFile).group(1)
+
+ option = {'min' : 2,
+ 'max' : 50,
+ 'phrase' : 0,
+ 'numeric' : 0,
+ 'case_sensitive': 0,
+ 'math' : 0,
+ 'prio' : min(max(self.getConfig("prio"), 0), 10),
+ 'confirm' : self.getConfig("confirm"),
+ 'timeout' : min(max(self.getConfig("timeout"), 300), 3999),
+ 'selfsolve' : self.getConfig("selfsolve"),
+ 'cph' : self.getConfig("captchaperhour")}
+
+ for opt in str(self.getConfig("hoster_options").split('|')):
+
+ details = map(str.strip, opt.split(':'))
+
+ if not details or details[0].lower() != pluginname.lower():
+ continue
+
+ for d in details:
+ hosteroption = d.split("=")
+
+ if len(hosteroption) < 2 or not hosteroption[1].isdigit():
+ continue
+
+ o = hosteroption[0].lower()
+ if o in option:
+ option[o] = hosteroption[1]
+
+ break
+
+ post_data = {'apikey' : self.getConfig("passkey"),
+ 'prio' : option['prio'],
+ 'confirm' : option['confirm'],
+ 'maxtimeout' : option['timeout'],
+ 'selfsolve' : option['selfsolve'],
+ 'captchaperhour': option['cph'],
+ 'case-sensitive': option['case_sensitive'],
+ 'min_len' : option['min'],
+ 'max_len' : option['max'],
+ 'phrase' : option['phrase'],
+ 'numeric' : option['numeric'],
+ 'math' : option['math'],
+ 'oldsource' : pluginname,
+ 'pyload' : "1",
+ 'source' : "pyload",
+ 'base64' : "1",
+ 'mouse' : mouse,
+ 'file-upload-01': data,
+ 'action' : "usercaptchaupload"}
+
+ for _i in xrange(5):
+ try:
+ res = getURL(self.API_URL, post=post_data)
+ except BadHeader, e:
+ sleep(3)
+ else:
+ if res and res.isdigit():
+ break
+ else:
+ self.logError(_("Bad upload: %s") % res)
+ return
+
+ self.logDebug(_("NewCaptchaID ticket: %s") % res, task.captchaFile)
+
+ task.data["ticket"] = res
+
+ for _i in xrange(int(self.getConfig("timeout") / 5)):
+ result = getURL(self.API_URL,
+ get={'apikey': self.getConfig("passkey"),
+ 'id' : res,
+ 'pyload': "1",
+ 'info' : "1",
+ 'source': "pyload",
+ 'action': "usercaptchacorrectdata"})
+
+ if not result or result == "NO DATA":
+ sleep(5)
+ else:
+ break
+ else:
+ self.logDebug("Could not send request: %s" % res)
+ result = None
+
+ self.logInfo(_("Captcha result for ticket %s: %s") % (res, result))
+
+ task.setResult(result)
+
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual() and not task.isPositional():
+ return
+
+ if not self.getConfig("passkey"):
+ return
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return
+
+ credits = self.getCredits()
+
+ if not credits:
+ self.logError(_("Your captcha 9kw.eu account has not enough credits"))
+ return
+
+ queue = min(self.getConfig("queue"), 999)
+ timeout = min(max(self.getConfig("timeout"), 300), 3999)
+ pluginname = re.search(r'_([^_]*)_\d+.\w+', task.captchaFile).group(1)
+
+ for _i in xrange(5):
+ servercheck = getURL("http://www.9kw.eu/grafik/servercheck.txt")
+ if queue < re.search(r'queue=(\d+)', servercheck).group(1):
+ break
+
+ sleep(10)
+ else:
+ self.fail(_("Too many captchas in queue"))
+
+ for opt in str(self.getConfig("hoster_options").split('|')):
+ details = map(str.strip, opt.split(':'))
+
+ if not details or details[0].lower() != pluginname.lower():
+ continue
+
+ for d in details:
+ hosteroption = d.split("=")
+
+ if (len(hosteroption) > 1
+ and hosteroption[0].lower() == 'timeout'
+ and hosteroption[1].isdigit()):
+ timeout = int(hosteroption[1])
+
+ break
+
+ task.handler.append(self)
+
+ task.setWaiting(timeout)
+
+ self._processCaptcha(task)
+
+
+ def _captchaResponse(self, task, correct):
+ type = "correct" if correct else "refund"
+
+ if 'ticket' not in task.data:
+ self.logDebug("No CaptchaID for %s request (task: %s)" % (type, task))
+ return
+
+ passkey = self.getConfig("passkey")
+
+ for _i in xrange(3):
+ res = getURL(self.API_URL,
+ get={'action' : "usercaptchacorrectback",
+ 'apikey' : passkey,
+ 'api_key': passkey,
+ 'correct': "1" if correct else "2",
+ 'pyload' : "1",
+ 'source' : "pyload",
+ 'id' : task.data["ticket"]})
+
+ self.logDebug("Request %s: %s" % (type, res))
+
+ if res == "OK":
+ break
+
+ sleep(5)
+ else:
+ self.logDebug("Could not send %s request: %s" % (type, res))
+
+
+ def captchaCorrect(self, task):
+ self._captchaResponse(task, True)
+
+
+ def captchaInvalid(self, task):
+ self._captchaResponse(task, False)
diff --git a/pyload/plugins/hook/CaptchaBrotherhood.py b/pyload/plugins/hook/CaptchaBrotherhood.py
new file mode 100644
index 000000000..8c037009a
--- /dev/null
+++ b/pyload/plugins/hook/CaptchaBrotherhood.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import StringIO
+import pycurl
+
+try:
+ from PIL import Image
+except ImportError:
+ import Image
+
+from time import sleep
+from urllib import urlencode
+
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.internal.Addon import Hook
+
+
+class CaptchaBrotherhoodException(Exception):
+
+ def __init__(self, err):
+ self.err = err
+
+
+ def getCode(self):
+ return self.err
+
+
+ def __str__(self):
+ return "<CaptchaBrotherhoodException %s>" % self.err
+
+
+ def __repr__(self):
+ return "<CaptchaBrotherhoodException %s>" % self.err
+
+
+class CaptchaBrotherhood(Hook):
+ __name__ = "CaptchaBrotherhood"
+ __type__ = "hook"
+ __version__ = "0.05"
+
+ __config__ = [("username", "str", "Username", ""),
+ ("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Password", "")]
+
+ __description__ = """Send captchas to CaptchaBrotherhood.com"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ API_URL = "http://www.captchabrotherhood.com/"
+
+
+ def getCredits(self):
+ res = getURL(self.API_URL + "askCredits.aspx",
+ get={"username": self.getConfig("username"), "password": self.getConfig("passkey")})
+ if not res.startswith("OK"):
+ raise CaptchaBrotherhoodException(res)
+ else:
+ credits = int(res[3:])
+ self.logInfo(_("%d credits left") % credits)
+ self.info['credits'] = credits
+ return credits
+
+
+ def submit(self, captcha, captchaType="file", match=None):
+ try:
+ img = Image.open(captcha)
+ output = StringIO.StringIO()
+ self.logDebug("CAPTCHA IMAGE", img, img.format, img.mode)
+ if img.format in ("GIF", "JPEG"):
+ img.save(output, img.format)
+ else:
+ if img.mode != "RGB":
+ img = img.convert("RGB")
+ img.save(output, "JPEG")
+ data = output.getvalue()
+ output.close()
+ except Exception, e:
+ raise CaptchaBrotherhoodException("Reading or converting captcha image failed: %s" % e)
+
+ req = getRequest()
+
+ url = "%ssendNewCaptcha.aspx?%s" % (self.API_URL,
+ urlencode({"username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "captchaSource": "pyLoad",
+ "timeout": "80"}))
+
+ req.c.setopt(pycurl.URL, url)
+ req.c.setopt(pycurl.POST, 1)
+ req.c.setopt(pycurl.POSTFIELDS, data)
+ req.c.setopt(pycurl.HTTPHEADER, ["Content-Type: text/html"])
+
+ try:
+ req.c.perform()
+ res = req.getResponse()
+ except Exception, e:
+ raise CaptchaBrotherhoodException("Submit captcha image failed")
+
+ req.close()
+
+ if not res.startswith("OK"):
+ raise CaptchaBrotherhoodException(res[1])
+
+ ticket = res[3:]
+
+ for _i in xrange(15):
+ sleep(5)
+ res = self.get_api("askCaptchaResult", ticket)
+ if res.startswith("OK-answered"):
+ return ticket, res[12:]
+
+ raise CaptchaBrotherhoodException("No solution received in time")
+
+
+ def get_api(self, api, ticket):
+ res = getURL("%s%s.aspx" % (self.API_URL, api),
+ get={"username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "captchaID": ticket})
+ if not res.startswith("OK"):
+ raise CaptchaBrotherhoodException("Unknown response: %s" % res)
+
+ return res
+
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 10:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ self.processCaptcha(task)
+ else:
+ self.logInfo(_("Your CaptchaBrotherhood Account has not enough credits"))
+
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ res = self.get_api("complainCaptcha", task.data['ticket'])
+
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except CaptchaBrotherhoodException, e:
+ task.error = e.getCode()
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hook/DeathByCaptcha.py b/pyload/plugins/hook/DeathByCaptcha.py
new file mode 100644
index 000000000..429258f89
--- /dev/null
+++ b/pyload/plugins/hook/DeathByCaptcha.py
@@ -0,0 +1,213 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from base64 import b64encode
+from pycurl import FORM_FILE, HTTPHEADER
+from time import sleep
+
+from pyload.utils import json_loads
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getRequest
+from pyload.plugins.internal.Addon import Hook
+
+
+class DeathByCaptchaException(Exception):
+ DBC_ERRORS = {'not-logged-in': 'Access denied, check your credentials',
+ 'invalid-credentials': 'Access denied, check your credentials',
+ 'banned': 'Access denied, account is suspended',
+ 'insufficient-funds': 'Insufficient account balance to decrypt CAPTCHA',
+ 'invalid-captcha': 'CAPTCHA is not a valid image',
+ 'service-overload': 'CAPTCHA was rejected due to service overload, try again later',
+ 'invalid-request': 'Invalid request',
+ 'timed-out': 'No CAPTCHA solution received in time'}
+
+
+ def __init__(self, err):
+ self.err = err
+
+
+ def getCode(self):
+ return self.err
+
+
+ def getDesc(self):
+ if self.err in self.DBC_ERRORS.keys():
+ return self.DBC_ERRORS[self.err]
+ else:
+ return self.err
+
+
+ def __str__(self):
+ return "<DeathByCaptchaException %s>" % self.err
+
+
+ def __repr__(self):
+ return "<DeathByCaptchaException %s>" % self.err
+
+
+class DeathByCaptcha(Hook):
+ __name__ = "DeathByCaptcha"
+ __type__ = "hook"
+ __version__ = "0.03"
+
+ __config__ = [("username", "str", "Username", ""),
+ ("passkey", "password", "Password", ""),
+ ("force", "bool", "Force DBC even if client is connected", False)]
+
+ __description__ = """Send captchas to DeathByCaptcha.com"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ API_URL = "http://api.dbcapi.me/api/"
+
+
+ def call_api(self, api="captcha", post=False, multipart=False):
+ req = getRequest()
+ req.c.setopt(HTTPHEADER, ["Accept: application/json", "User-Agent: pyLoad %s" % self.core.version])
+
+ if post:
+ if not isinstance(post, dict):
+ post = {}
+ post.update({"username": self.getConfig("username"),
+ "password": self.getConfig("passkey")})
+
+ res = None
+ try:
+ json = req.load("%s%s" % (self.API_URL, api),
+ post=post,
+ multipart=multipart)
+ self.logDebug(json)
+ res = json_loads(json)
+
+ if "error" in res:
+ raise DeathByCaptchaException(res['error'])
+ elif "status" not in res:
+ raise DeathByCaptchaException(str(res))
+
+ except BadHeader, e:
+ if 403 == e.code:
+ raise DeathByCaptchaException('not-logged-in')
+ elif 413 == e.code:
+ raise DeathByCaptchaException('invalid-captcha')
+ elif 503 == e.code:
+ raise DeathByCaptchaException('service-overload')
+ elif e.code in (400, 405):
+ raise DeathByCaptchaException('invalid-request')
+ else:
+ raise
+
+ finally:
+ req.close()
+
+ return res
+
+
+ def getCredits(self):
+ res = self.call_api("user", True)
+
+ if 'is_banned' in res and res['is_banned']:
+ raise DeathByCaptchaException('banned')
+ elif 'balance' in res and 'rate' in res:
+ self.info.update(res)
+ else:
+ raise DeathByCaptchaException(res)
+
+
+ def getStatus(self):
+ res = self.call_api("status", False)
+
+ if 'is_service_overloaded' in res and res['is_service_overloaded']:
+ raise DeathByCaptchaException('service-overload')
+
+
+ def submit(self, captcha, captchaType="file", match=None):
+ #workaround multipart-post bug in HTTPRequest.py
+ if re.match("^\w*$", self.getConfig("passkey")):
+ multipart = True
+ data = (FORM_FILE, captcha)
+ else:
+ multipart = False
+ with open(captcha, 'rb') as f:
+ data = f.read()
+ data = "base64:" + b64encode(data)
+
+ res = self.call_api("captcha", {"captchafile": data}, multipart)
+
+ if "captcha" not in res:
+ raise DeathByCaptchaException(res)
+ ticket = res['captcha']
+
+ for _i in xrange(24):
+ sleep(5)
+ res = self.call_api("captcha/%d" % ticket, False)
+ if res['text'] and res['is_correct']:
+ break
+ else:
+ raise DeathByCaptchaException('timed-out')
+
+ result = res['text']
+ self.logDebug("Result %s : %s" % (ticket, result))
+
+ return ticket, result
+
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ try:
+ self.getStatus()
+ self.getCredits()
+ except DeathByCaptchaException, e:
+ self.logError(e.getDesc())
+ return False
+
+ balance, rate = self.info['balance'], self.info['rate']
+ self.logInfo(_("Account balance"),
+ _("US$%.3f (%d captchas left at %.2f cents each)") % (balance / 100,
+ balance // rate, rate))
+
+ if balance > rate:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(180)
+ self.processCaptcha(task)
+
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ try:
+ res = self.call_api("captcha/%d/report" % task.data['ticket'], True)
+
+ except DeathByCaptchaException, e:
+ self.logError(e.getDesc())
+
+ except Exception, e:
+ self.logError(e)
+
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except DeathByCaptchaException, e:
+ task.error = e.getCode()
+ self.logError(e.getDesc())
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hook/DebridItaliaCom.py b/pyload/plugins/hook/DebridItaliaCom.py
new file mode 100644
index 000000000..9c8f866f0
--- /dev/null
+++ b/pyload/plugins/hook/DebridItaliaCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class DebridItaliaCom(MultiHoster):
+ __name__ = "DebridItaliaCom"
+ __type__ = "hook"
+ __version__ = "0.07"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Debriditalia.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def getHoster(self):
+ return ["netload.in", "hotfile.com", "rapidshare.com", "multiupload.com",
+ "uploading.com", "megashares.com", "crocko.com", "filepost.com",
+ "bitshare.com", "share-links.biz", "putlocker.com", "uploaded.to",
+ "speedload.org", "rapidgator.net", "likeupload.net", "cyberlocker.ch",
+ "depositfiles.com", "extabit.com", "filefactory.com", "sharefiles.co",
+ "ryushare.com", "tusfiles.net", "nowvideo.co", "cloudzer.net", "letitbit.net",
+ "easybytez.com", "uptobox.com", "ddlstorage.com"]
diff --git a/pyload/plugins/hook/EasybytezCom.py b/pyload/plugins/hook/EasybytezCom.py
new file mode 100644
index 000000000..15033c8e7
--- /dev/null
+++ b/pyload/plugins/hook/EasybytezCom.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class EasybytezCom(MultiHoster):
+ __name__ = "EasybytezCom"
+ __type__ = "hook"
+ __version__ = "0.03"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """EasyBytez.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def getHoster(self):
+ self.account = self.core.accountManager.getAccountPlugin(self.__name__)
+ user = self.account.selectAccount()[0]
+
+ try:
+ req = self.account.getAccountRequest(user)
+ page = req.load("http://www.easybytez.com")
+
+ m = re.search(r'</textarea>\s*Supported sites:(.*)', page)
+ return m.group(1).split(',')
+ except Exception, e:
+ self.logDebug(e)
+ self.logWarning(_("Unable to load supported hoster list, using last known"))
+ return ["bitshare.com", "crocko.com", "ddlstorage.com", "depositfiles.com", "extabit.com", "hotfile.com",
+ "mediafire.com", "netload.in", "rapidgator.net", "rapidshare.com", "uploading.com", "uload.to",
+ "uploaded.to"]
diff --git a/pyload/plugins/hook/ExpertDecoders.py b/pyload/plugins/hook/ExpertDecoders.py
new file mode 100644
index 000000000..658a55288
--- /dev/null
+++ b/pyload/plugins/hook/ExpertDecoders.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+from base64 import b64encode
+from pycurl import LOW_SPEED_TIME
+from uuid import uuid4
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.internal.Addon import Hook
+
+
+class ExpertDecoders(Hook):
+ __name__ = "ExpertDecoders"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Access key", "")]
+
+ __description__ = """Send captchas to expertdecoders.com"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ API_URL = "http://www.fasttypers.org/imagepost.ashx"
+
+
+ def getCredits(self):
+ res = getURL(self.API_URL, post={"key": self.getConfig("passkey"), "action": "balance"})
+
+ if res.isdigit():
+ self.logInfo(_("%s credits left") % res)
+ self.info['credits'] = credits = int(res)
+ return credits
+ else:
+ self.logError(res)
+ return 0
+
+
+ def processCaptcha(self, task):
+ task.data['ticket'] = ticket = uuid4()
+ result = None
+
+ with open(task.captchaFile, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+
+ req = getRequest()
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ result = req.load(self.API_URL, post={"action": "upload", "key": self.getConfig("passkey"),
+ "file": data, "gen_task_id": ticket})
+ finally:
+ req.close()
+
+ self.logDebug("Result %s : %s" % (ticket, result))
+ task.setResult(result)
+
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.setWaiting(100)
+ self.processCaptcha(task)
+ else:
+ self.logInfo(_("Your ExpertDecoders Account has not enough credits"))
+
+
+ def captchaInvalid(self, task):
+ if "ticket" in task.data:
+
+ try:
+ res = getURL(self.API_URL,
+ post={'action': "refund", 'key': self.getConfig("passkey"), 'gen_task_id': task.data['ticket']})
+ self.logInfo(_("Request refund", res)
+
+ except BadHeader, e:
+ self.logError(_("Could not send refund request"), e)
diff --git a/pyload/plugins/hook/FastixRu.py b/pyload/plugins/hook/FastixRu.py
new file mode 100644
index 000000000..a7a5e6b8c
--- /dev/null
+++ b/pyload/plugins/hook/FastixRu.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class FastixRu(MultiHoster):
+ __name__ = "FastixRu"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Fastix.ru hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Massimo Rosamilia", "max@spiritix.eu")]
+
+
+ def getHoster(self):
+ page = getURL(
+ "http://fastix.ru/api_v2/?apikey=5182964c3f8f9a7f0b00000a_kelmFB4n1IrnCDYuIFn2y&sub=allowed_sources")
+ host_list = json_loads(page)
+ host_list = host_list['allow']
+ return host_list
diff --git a/pyload/plugins/hook/FreeWayMe.py b/pyload/plugins/hook/FreeWayMe.py
new file mode 100644
index 000000000..b9955c90a
--- /dev/null
+++ b/pyload/plugins/hook/FreeWayMe.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class FreeWayMe(MultiHoster):
+ __name__ = "FreeWayMe"
+ __type__ = "hook"
+ __version__ = "0.11"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """FreeWay.me hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Nicolas Giese", "james@free-way.me")]
+
+
+ def getHoster(self):
+ hostis = getURL("https://www.free-way.me/ajax/jd.php", get={"id": 3}).replace("\"", "").strip()
+ self.logDebug("Hosters", hostis)
+ return [x.strip() for x in hostis.split(",") if x.strip()]
diff --git a/pyload/plugins/hook/ImageTyperz.py b/pyload/plugins/hook/ImageTyperz.py
new file mode 100644
index 000000000..57a734884
--- /dev/null
+++ b/pyload/plugins/hook/ImageTyperz.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from base64 import b64encode
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+
+from pyload.network.RequestFactory import getURL, getRequest
+from pyload.plugins.internal.Addon import Hook
+
+
+class ImageTyperzException(Exception):
+
+ def __init__(self, err):
+ self.err = err
+
+
+ def getCode(self):
+ return self.err
+
+
+ def __str__(self):
+ return "<ImageTyperzException %s>" % self.err
+
+
+ def __repr__(self):
+ return "<ImageTyperzException %s>" % self.err
+
+
+class ImageTyperz(Hook):
+ __name__ = "ImageTyperz"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("username", "str", "Username", ""),
+ ("passkey", "password", "Password", ""),
+ ("force", "bool", "Force IT even if client is connected", False)]
+
+ __description__ = """Send captchas to ImageTyperz.com"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ SUBMIT_URL = "http://captchatypers.com/Forms/UploadFileAndGetTextNEW.ashx"
+ RESPOND_URL = "http://captchatypers.com/Forms/SetBadImage.ashx"
+ GETCREDITS_URL = "http://captchatypers.com/Forms/RequestBalance.ashx"
+
+
+ def getCredits(self):
+ res = getURL(self.GETCREDITS_URL,
+ post={'action': "REQUESTBALANCE",
+ 'username': self.getConfig("username"),
+ 'password': self.getConfig("passkey")})
+
+ if res.startswith('ERROR'):
+ raise ImageTyperzException(res)
+
+ try:
+ balance = float(res)
+ except:
+ raise ImageTyperzException("Invalid response")
+
+ self.logInfo(_("Account balance: $%s left") % res)
+ return balance
+
+
+ def submit(self, captcha, captchaType="file", match=None):
+ req = getRequest()
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ #workaround multipart-post bug in HTTPRequest.py
+ if re.match("^\w*$", self.getConfig("passkey")):
+ multipart = True
+ data = (FORM_FILE, captcha)
+ else:
+ multipart = False
+ with open(captcha, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+
+ res = req.load(self.SUBMIT_URL,
+ post={'action': "UPLOADCAPTCHA",
+ 'username': self.getConfig("username"),
+ 'password': self.getConfig("passkey"), "file": data},
+ multipart=multipart)
+ finally:
+ req.close()
+
+ if res.startswith("ERROR"):
+ raise ImageTyperzException(res)
+ else:
+ data = res.split('|')
+ if len(data) == 2:
+ ticket, result = data
+ else:
+ raise ImageTyperzException("Unknown response: %s" % res)
+
+ return ticket, result
+
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ self.processCaptcha(task)
+ else:
+ self.logInfo(_("Your %s account has not enough credits") % self.__name__)
+
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ res = getURL(self.RESPOND_URL,
+ post={'action': "SETBADIMAGE",
+ 'username': self.getConfig("username"),
+ 'password': self.getConfig("passkey"),
+ 'imageid': task.data['ticket']})
+
+ if res == "SUCCESS":
+ self.logInfo(_("Bad captcha solution received, requested refund"))
+ else:
+ self.logError(_("Bad captcha solution received, refund request failed"), res)
+
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except ImageTyperzException, e:
+ task.error = e.getCode()
+ return
+
+ task.data['ticket'] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/hook/LinkdecrypterCom.py b/pyload/plugins/hook/LinkdecrypterCom.py
new file mode 100644
index 000000000..95145d462
--- /dev/null
+++ b/pyload/plugins/hook/LinkdecrypterCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Addon import Hook
+from pyload.utils import remove_chars
+
+
+class LinkdecrypterCom(Hook):
+ __name__ = "LinkdecrypterCom"
+ __type__ = "hook"
+ __version__ = "0.20"
+
+ __description__ = """Linkdecrypter.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def coreReady(self):
+ try:
+ self.loadPatterns()
+ except Exception, e:
+ self.logError(e)
+
+
+ def loadPatterns(self):
+ html = getURL("http://linkdecrypter.com/")
+
+ m = re.search(r'<title>', html)
+ if m is None:
+ self.logError(_("Linkdecrypter site is down"))
+ return
+
+ m = re.search(r'<b>Supported\(\d+\)</b>: <i>([^+<]*)', html)
+ if m is None:
+ self.logError(_("Crypter list not found"))
+ return
+
+ builtin = [name.lower() for name in self.core.pluginManager.crypterPlugins.keys()]
+ builtin.append("downloadserienjunkiesorg")
+
+ crypter_pattern = re.compile("(\w[\w.-]+)")
+ online = []
+ for crypter in m.group(1).split(', '):
+ m = re.match(crypter_pattern, crypter)
+ if m and remove_chars(m.group(1), "-.") not in builtin:
+ online.append(m.group(1).replace(".", "\\."))
+
+ if not online:
+ self.logError(_("Crypter list is empty"))
+ return
+
+ regexp = r'https?://([^.]+\.)*?(%s)/.*' % '|'.join(online)
+
+ dict = self.core.pluginManager.crypterPlugins[self.__name__]
+ dict['pattern'] = regexp
+ dict['re'] = re.compile(regexp)
+
+ self.logDebug("Loaded pattern: %s" % regexp)
diff --git a/pyload/plugins/hook/LinksnappyCom.py b/pyload/plugins/hook/LinksnappyCom.py
new file mode 100644
index 000000000..0957b6a91
--- /dev/null
+++ b/pyload/plugins/hook/LinksnappyCom.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class LinksnappyCom(MultiHoster):
+ __name__ = "LinksnappyCom"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Linksnappy.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def getHoster(self):
+ json_data = getURL('http://gen.linksnappy.com/lseAPI.php?act=FILEHOSTS')
+ json_data = json_loads(json_data)
+
+ return json_data['return'].keys()
diff --git a/pyload/plugins/hook/MegaDebridEu.py b/pyload/plugins/hook/MegaDebridEu.py
new file mode 100644
index 000000000..6c3e2b03a
--- /dev/null
+++ b/pyload/plugins/hook/MegaDebridEu.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class MegaDebridEu(MultiHoster):
+ __name__ = "MegaDebridEu"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("unloadFailing", "bool", "Revert to standard download if download fails", False)]
+
+ __description__ = """mega-debrid.eu hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("D.Ducatel", "dducatel@je-geek.fr")]
+
+
+ def getHoster(self):
+ reponse = getURL('http://www.mega-debrid.eu/api.php?action=getHosters')
+ json_data = json_loads(reponse)
+
+ if json_data['response_code'] == "ok":
+ host_list = [element[0] for element in json_data['hosters']]
+ else:
+ self.logError(_("Unable to retrieve hoster list"))
+ host_list = list()
+
+ return host_list
diff --git a/pyload/plugins/hook/MultishareCz.py b/pyload/plugins/hook/MultishareCz.py
new file mode 100644
index 000000000..f1a12acb8
--- /dev/null
+++ b/pyload/plugins/hook/MultishareCz.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class MultishareCz(MultiHoster):
+ __name__ = "MultishareCz"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "uloz.to")]
+
+ __description__ = """MultiShare.cz hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_PATTERN = r'<img class="logo-shareserveru"[^>]*?alt="([^"]+)"></td>\s*<td class="stav">[^>]*?alt="OK"'
+
+
+ def getHoster(self):
+ page = getURL("http://www.multishare.cz/monitoring/")
+ return re.findall(self.HOSTER_PATTERN, page)
diff --git a/pyload/plugins/hook/MyfastfileCom.py b/pyload/plugins/hook/MyfastfileCom.py
new file mode 100644
index 000000000..affaa2261
--- /dev/null
+++ b/pyload/plugins/hook/MyfastfileCom.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+from pyload.utils import json_loads
+
+
+class MyfastfileCom(MultiHoster):
+ __name__ = "MyfastfileCom"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Myfastfile.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+
+ def getHoster(self):
+ json_data = getURL('http://myfastfile.com/api.php?hosts', decode=True)
+ self.logDebug("JSON data", json_data)
+ json_data = json_loads(json_data)
+
+ return json_data['hosts']
diff --git a/pyload/plugins/hook/OverLoadMe.py b/pyload/plugins/hook/OverLoadMe.py
new file mode 100644
index 000000000..2766165fd
--- /dev/null
+++ b/pyload/plugins/hook/OverLoadMe.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class OverLoadMe(MultiHoster):
+ __name__ = "OverLoadMe"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("https", "bool", "Enable HTTPS", True),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 12)]
+
+ __description__ = """Over-Load.me hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("marley", "marley@over-load.me")]
+
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://api.over-load.me/hoster.php",
+ get={"auth": "0001-cb1f24dadb3aa487bda5afd3b76298935329be7700cd7-5329be77-00cf-1ca0135f"}
+ ).replace("\"", "").strip()
+ self.logDebug("Hosterlist", page)
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/hook/PremiumTo.py b/pyload/plugins/hook/PremiumTo.py
new file mode 100644
index 000000000..e3c9823f6
--- /dev/null
+++ b/pyload/plugins/hook/PremiumTo.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class PremiumTo(MultiHoster):
+ __name__ = "PremiumTo"
+ __type__ = "hook"
+ __version__ = "0.04"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for downloads from supported hosters:", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """Premium.to hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+
+ def getHoster(self):
+ page = getURL("http://premium.to/api/hosters.php",
+ get={'username': self.account.username, 'password': self.account.password})
+ return [x.strip() for x in page.replace("\"", "").split(";")]
+
+
+ def coreReady(self):
+ self.account = self.core.accountManager.getAccountPlugin("PremiumTo")
+
+ user = self.account.selectAccount()[0]
+
+ if not user:
+ self.logError(_("Please add your premium.to account first and restart pyLoad"))
+ return
+
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hook/PremiumizeMe.py b/pyload/plugins/hook/PremiumizeMe.py
new file mode 100644
index 000000000..a751e7b61
--- /dev/null
+++ b/pyload/plugins/hook/PremiumizeMe.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class PremiumizeMe(MultiHoster):
+ __name__ = "PremiumizeMe"
+ __type__ = "hook"
+ __version__ = "0.12"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Premiumize.me hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Florian Franzen", "FlorianFranzen@gmail.com")]
+
+
+ def getHoster(self):
+ # If no accounts are available there will be no hosters available
+ if not self.account or not self.account.canUse():
+ return []
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ # Get supported hosters list from premiumize.me using the
+ # json API v1 (see https://secure.premiumize.me/?show=api)
+ answer = getURL("https://api.premiumize.me/pm-api/v1.php?method=hosterlist&params[login]=%s&params[pass]=%s" % (
+ user, data['password']))
+ data = json_loads(answer)
+
+ # If account is not valid thera are no hosters available
+ if data['status'] != 200:
+ return []
+
+ # Extract hosters from json file
+ return data['result']['hosterlist']
+
+
+ def coreReady(self):
+ # Get account plugin and check if there is a valid account available
+ self.account = self.core.accountManager.getAccountPlugin("PremiumizeMe")
+ if not self.account.canUse():
+ self.account = None
+ self.logError(_("Please add a valid premiumize.me account first and restart pyLoad"))
+ return
+
+ # Run the overwriten core ready which actually enables the multihoster hook
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hook/RPNetBiz.py b/pyload/plugins/hook/RPNetBiz.py
new file mode 100644
index 000000000..3bbdcf839
--- /dev/null
+++ b/pyload/plugins/hook/RPNetBiz.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class RPNetBiz(MultiHoster):
+ __name__ = "RPNetBiz"
+ __type__ = "hook"
+ __version__ = "0.1"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """RPNet.biz hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Dman", "dmanugm@gmail.com")]
+
+
+ def getHoster(self):
+ # No hosts supported if no account
+ if not self.account or not self.account.canUse():
+ return []
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ res = getURL("https://premium.rpnet.biz/client_api.php",
+ get={"username": user, "password": data['password'], "action": "showHosterList"})
+ hoster_list = json_loads(res)
+
+ # If account is not valid thera are no hosters available
+ if 'error' in hoster_list:
+ return []
+
+ # Extract hosters from json file
+ return hoster_list['hosters']
+
+
+ def coreReady(self):
+ # Get account plugin and check if there is a valid account available
+ self.account = self.core.accountManager.getAccountPlugin("RPNetBiz")
+ if not self.account.canUse():
+ self.account = None
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rpnet")
+ return
+
+ # Run the overwriten core ready which actually enables the multihoster hook
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hook/RealdebridCom.py b/pyload/plugins/hook/RealdebridCom.py
new file mode 100644
index 000000000..a6f79a97d
--- /dev/null
+++ b/pyload/plugins/hook/RealdebridCom.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class RealdebridCom(MultiHoster):
+ __name__ = "RealdebridCom"
+ __type__ = "hook"
+ __version__ = "0.43"
+
+ __config__ = [("https", "bool", "Enable HTTPS", False),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Real-Debrid.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Devirex Hazzard", "naibaf_11@yahoo.de")]
+
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://real-debrid.com/api/hosters.php").replace("\"", "").strip()
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/hook/RehostTo.py b/pyload/plugins/hook/RehostTo.py
new file mode 100644
index 000000000..2c8739869
--- /dev/null
+++ b/pyload/plugins/hook/RehostTo.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class RehostTo(MultiHoster):
+ __name__ = "RehostTo"
+ __type__ = "hook"
+ __version__ = "0.43"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24)]
+
+ __description__ = """Rehost.to hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def getHoster(self):
+ page = getURL("http://rehost.to/api.php?cmd=get_supported_och_dl&long_ses=%s" % self.long_ses)
+ return [x.strip() for x in page.replace("\"", "").split(",")]
+
+
+ def coreReady(self):
+ self.account = self.core.accountManager.getAccountPlugin("RehostTo")
+
+ user = self.account.selectAccount()[0]
+
+ if not user:
+ self.logError(_("Please add your rehost.to account first and restart pyLoad"))
+ return
+
+ data = self.account.getAccountInfo(user)
+ self.ses = data['ses']
+ self.long_ses = data['long_ses']
+
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hook/SimplyPremiumCom.py b/pyload/plugins/hook/SimplyPremiumCom.py
new file mode 100644
index 000000000..9945cce38
--- /dev/null
+++ b/pyload/plugins/hook/SimplyPremiumCom.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class SimplyPremiumCom(MultiHoster):
+ __name__ = "SimplyPremiumCom"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __description__ = """Simply-Premium.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("EvolutionClip", "evolutionclip@live.de")]
+
+
+ def getHoster(self):
+ json_data = getURL('http://www.simply-premium.com/api/hosts.php?format=json&online=1')
+ json_data = json_loads(json_data)
+
+ host_list = [element['regex'] for element in json_data['result']]
+
+ return host_list
diff --git a/pyload/plugins/hook/SimplydebridCom.py b/pyload/plugins/hook/SimplydebridCom.py
new file mode 100644
index 000000000..4668da45b
--- /dev/null
+++ b/pyload/plugins/hook/SimplydebridCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class SimplydebridCom(MultiHoster):
+ __name__ = "SimplydebridCom"
+ __type__ = "hook"
+ __version__ = "0.01"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """Simply-Debrid.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Kagenoshin", "kagenoshin@gmx.ch")]
+
+
+ def getHoster(self):
+ page = getURL("http://simply-debrid.com/api.php?list=1")
+ return [x.strip() for x in page.rstrip(';').replace("\"", "").split(";")]
diff --git a/pyload/plugins/hook/UnrestrictLi.py b/pyload/plugins/hook/UnrestrictLi.py
new file mode 100644
index 000000000..cfe580048
--- /dev/null
+++ b/pyload/plugins/hook/UnrestrictLi.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class UnrestrictLi(MultiHoster):
+ __name__ = "UnrestrictLi"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", False),
+ ("interval", "int", "Reload interval in hours (0 to disable)", 24),
+ ("history", "bool", "Delete History", False)]
+
+ __description__ = """Unrestrict.li hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def getHoster(self):
+ json_data = getURL('http://unrestrict.li/api/jdownloader/hosts.php?format=json')
+ json_data = json_loads(json_data)
+
+ host_list = [element['host'] for element in json_data['result']]
+
+ return host_list
diff --git a/pyload/plugins/hook/XFileSharingPro.py b/pyload/plugins/hook/XFileSharingPro.py
new file mode 100644
index 000000000..520e30964
--- /dev/null
+++ b/pyload/plugins/hook/XFileSharingPro.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Addon import Hook
+
+
+class XFileSharingPro(Hook):
+ __name__ = "XFileSharingPro"
+ __type__ = "hook"
+ __version__ = "0.22"
+
+ __config__ = [("activated", "bool", "Activated", True),
+ ("use_hoster_list", "bool", "Load listed hosters only", False),
+ ("use_crypter_list", "bool", "Load listed crypters only", False),
+ ("use_builtin_list", "bool", "Load built-in plugin list", True),
+ ("hoster_list", "str", "Hoster list (comma separated)", ""),
+ ("crypter_list", "str", "Crypter list (comma separated)", "")]
+
+ __description__ = """Load XFileSharingPro based hosters and crypter which don't need a own plugin to run"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ # event_list = ["pluginConfigChanged"]
+ regexp = {'hoster' : (r'https?://(?:www\.)?([\w^_]+(?:\.[a-zA-Z]{2,})+(?:\:\d+)?)/(?:embed-)?\w{12}',
+ r'https?://(?:[^/]+\.)?(%s)/(?:embed-)?\w{12}\W?'),
+ 'crypter': (r'https?://(?:www\.)?([\w^_]+(?:\.[a-zA-Z]{2,})+(?:\:\d+)?)/(?:user|folder)s?/\w+',
+ r'https?://(?:[^/]+\.)?(%s)/(?:user|folder)s?/\w+')}
+
+ HOSTER_LIST = [#WORKING HOSTERS:
+ "eyesfile.ca", "file4safe.com", "fileband.com", "filedwon.com", "filevice.com", "hostingbulk.com",
+ "linestorage.com", "ravishare.com", "sharesix.com", "thefile.me", "verzend.be", "xvidstage.com",
+ #NOT TESTED:
+ "101shared.com", "4upfiles.com", "filemaze.ws", "filenuke.com", "linkzhost.com", "mightyupload.com",
+ "rockdizfile.com", "sharebeast.com", "sharerepo.com", "shareswift.com", "uploadbaz.com", "uploadc.com",
+ "vidbull.com", "zalaa.com", "zomgupload.com",
+ #NOT WORKING:
+ "amonshare.com", "banicrazy.info", "boosterking.com", "host4desi.com", "laoupload.com", "rd-fs.com"]
+ CRYPTER_LIST = []
+
+
+ # def pluginConfigChanged(self.__name__, plugin, name, value):
+ # self.loadPattern()
+
+
+ def coreReady(self):
+ self.loadPattern()
+
+
+ def loadPattern(self):
+ use_builtin_list = self.getConfig('use_builtin_list')
+
+ for type in ("hoster", "crypter"):
+ every_plugin = not self.getConfig("use_%s_list" % type)
+
+ if every_plugin:
+ self.logInfo(_("Handling any %s I can!") % type)
+ pattern = self.regexp[type][0]
+ else:
+ s = self.getConfig('%s_list' % type).replace('\\', '').replace('|', ',').replace(';', ',').lower()
+ plugin_list = set([x.strip() for x in s.split(',')])
+
+ if use_builtin_list:
+ plugin_list |= set([x.lower() for x in getattr(self, "%s_LIST" % type.upper())])
+
+ plugin_list -= set(('', u''))
+
+ if not plugin_list:
+ self.logInfo(_("No %s to handle") % type)
+ self._unload(type)
+ return
+
+ match_list = '|'.join(sorted(plugin_list))
+
+ len_match_list = len(plugin_list)
+ self.logInfo(_("Handling %d %s%s: %s") % (len_match_list, type, "" if len_match_list is 1 else "s", match_list.replace('|', ', ')))
+
+ pattern = self.regexp[type][1] % match_list.replace('.', '\.')
+
+ dict = self.core.pluginManager.plugins[type]["XFileSharingPro"]
+ dict['pattern'] = pattern
+ dict['re'] = re.compile(pattern)
+
+ self.logDebug("Loaded %s pattern: %s" % (type, pattern))
+
+
+ def _unload(self, type):
+ dict = self.core.pluginManager.plugins[type]["XFileSharingPro"]
+ dict['pattern'] = r'^unmatchable$'
+ dict['re'] = re.compile(dict['pattern'])
+
+
+ def unload(self):
+ for type in ("hoster", "crypter"):
+ self._unload(type, "XFileSharingPro")
diff --git a/pyload/plugins/hook/ZeveraCom.py b/pyload/plugins/hook/ZeveraCom.py
new file mode 100644
index 000000000..09e3953a2
--- /dev/null
+++ b/pyload/plugins/hook/ZeveraCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.MultiHoster import MultiHoster
+
+
+class ZeveraCom(MultiHoster):
+ __name__ = "ZeveraCom"
+ __type__ = "hook"
+ __version__ = "0.02"
+
+ __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __description__ = """Real-Debrid.com hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def getHoster(self):
+ page = getURL("http://www.zevera.com/jDownloader.ashx?cmd=gethosters")
+ return [x.strip() for x in page.replace("\"", "").split(",")]
diff --git a/pyload/plugins/hook/__init__.py b/pyload/plugins/hook/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/hook/__init__.py
diff --git a/pyload/plugins/hoster/AlldebridCom.py b/pyload/plugins/hoster/AlldebridCom.py
new file mode 100644
index 000000000..7e5adf8ba
--- /dev/null
+++ b/pyload/plugins/hoster/AlldebridCom.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import unquote
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class AlldebridCom(Hoster):
+ __name__ = "AlldebridCom"
+ __type__ = "hoster"
+ __version__ = "0.34"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?alldebrid\..*'
+
+ __description__ = """Alldebrid.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Andy Voigt", "spamsales@online.de")]
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): # incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+
+ def setup(self):
+ self.chunkLimit = 16
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "AllDebrid")
+ self.fail(_("No AllDebrid account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ password = self.getPassword().splitlines()
+ password = "" if not password else password[0]
+
+ url = "http://www.alldebrid.com/service.php?link=%s&json=true&pw=%s" % (pyfile.url, password)
+ page = self.load(url)
+ data = json_loads(page)
+
+ self.logDebug("Json data", data)
+
+ if data['error']:
+ if data['error'] == "This link isn't available on the hoster website.":
+ self.offline()
+ else:
+ self.logWarning(data['error'])
+ self.tempOffline()
+ else:
+ if pyfile.name and not pyfile.name.endswith('.tmp'):
+ pyfile.name = data['filename']
+ pyfile.size = parseFileSize(data['filesize'])
+ new_url = data['link']
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({'error': "<title>An error occured while processing your request</title>",
+ 'empty': re.compile(r"^$")})
+
+ if check == "error":
+ self.retry(wait_time=60, reason=_("An error occured while generating link"))
+ elif check == "empty":
+ self.retry(wait_time=60, reason=_("Downloaded File was empty"))
diff --git a/pyload/plugins/hoster/BayfilesCom.py b/pyload/plugins/hoster/BayfilesCom.py
new file mode 100644
index 000000000..f3fa87d89
--- /dev/null
+++ b/pyload/plugins/hoster/BayfilesCom.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class BayfilesCom(SimpleHoster):
+ __name__ = "BayfilesCom"
+ __type__ = "hoster"
+ __version__ = "0.08"
+
+ __pattern__ = r'https?://(?:www\.)?bayfiles\.(com|net)/file/(?P<ID>\w+/\w+/[^/]+)'
+
+ __description__ = """Bayfiles.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ INFO_PATTERN = r'<p title="(?P<N>[^"]+)">[^<]*<strong>(?P<S>[\d .,]+)(?P<U>[\w^_]+)</strong></p>'
+ OFFLINE_PATTERN = r'(<p>The requested file could not be found.</p>|<title>404 Not Found</title>)'
+
+ WAIT_PATTERN = r'>Your IP [\d.]* has recently downloaded a file\. Upgrade to premium or wait (\d+) minutes\.<'
+ VARS_PATTERN = r'var vfid = (\d+);\s*var delay = (\d+);'
+ FREE_LINK_PATTERN = r'javascript:window\.location\.href = \'(.+?)\';'
+ PREMIUM_LINK_PATTERN = r'(?:<a class="highlighted-btn" href="|(?=http://s\d+\.baycdn\.com/dl/))(.*?)"'
+
+
+ def handleFree(self):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.retry(wait_time=int(m.group(1)) * 60)
+
+ # Get download token
+ m = re.search(self.VARS_PATTERN, self.html)
+ if m is None:
+ self.error(_("VARS_PATTERN not found"))
+ vfid, delay = m.groups()
+
+ res = json_loads(self.load('http://bayfiles.com/ajax_download',
+ get={"_": time() * 1000,
+ "action": "startTimer",
+ "vfid": vfid}, decode=True))
+
+ if not "token" in res or not res['token']:
+ self.fail(_("No token"))
+
+ self.wait(int(delay))
+
+ self.html = self.load('http://bayfiles.com/ajax_download', get={
+ "token": res['token'],
+ "action": "getLink",
+ "vfid": vfid})
+
+ # Get final link and download
+ m = re.search(self.FREE_LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Free link"))
+ self.startDownload(m.group(1))
+
+
+ def handlePremium(self):
+ m = re.search(self.PREMIUM_LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Premium link"))
+ self.startDownload(m.group(1))
+
+
+ def startDownload(self, url):
+ self.logDebug("%s URL: %s" % ("Premium" if self.premium else "Free", url))
+ self.download(url)
+ # check download
+ check = self.checkDownload({
+ "waitforfreeslots": re.compile(r"<title>BayFiles</title>"),
+ "notfound": re.compile(r"<title>404 Not Found</title>")
+ })
+ if check == "waitforfreeslots":
+ self.retry(30, 5 * 60, "Wait for free slot")
+ elif check == "notfound":
+ self.retry(30, 5 * 60, "404 Not found")
+
+
+getInfo = create_getInfo(BayfilesCom)
diff --git a/pyload/plugins/hoster/BezvadataCz.py b/pyload/plugins/hoster/BezvadataCz.py
new file mode 100644
index 000000000..2f2afc6ac
--- /dev/null
+++ b/pyload/plugins/hoster/BezvadataCz.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class BezvadataCz(SimpleHoster):
+ __name__ = "BezvadataCz"
+ __type__ = "hoster"
+ __version__ = "0.25"
+
+ __pattern__ = r'http://(?:www\.)?bezvadata\.cz/stahnout/.*'
+
+ __description__ = """BezvaData.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<p><b>Soubor: (?P<N>[^<]+)</b></p>'
+ SIZE_PATTERN = r'<li><strong>Velikost:</strong> (?P<S>[^<]+)</li>'
+ OFFLINE_PATTERN = r'<title>BezvaData \| Soubor nenalezen</title>'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+
+
+ def handleFree(self):
+ #download button
+ m = re.search(r'<a class="stahnoutSoubor".*?href="(.*?)"', self.html)
+ if m is None:
+ self.error(_("Page 1 URL not found"))
+ url = "http://bezvadata.cz%s" % m.group(1)
+
+ #captcha form
+ self.html = self.load(url)
+ self.checkErrors()
+ for _i in xrange(5):
+ action, inputs = self.parseHtmlForm('frm-stahnoutFreeForm')
+ if not inputs:
+ self.error(_("FreeForm"))
+
+ m = re.search(r'<img src="data:image/png;base64,(.*?)"', self.html)
+ if m is None:
+ self.error(_("Wrong captcha image"))
+
+ #captcha image is contained in html page as base64encoded data but decryptCaptcha() expects image url
+ self.load, proper_load = self.loadcaptcha, self.load
+ try:
+ inputs['captcha'] = self.decryptCaptcha(m.group(1), imgtype='png')
+ finally:
+ self.load = proper_load
+
+ if '<img src="data:image/png;base64' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("No valid captcha code entered"))
+
+ #download url
+ self.html = self.load("http://bezvadata.cz%s" % action, post=inputs)
+ self.checkErrors()
+ m = re.search(r'<a class="stahnoutSoubor2" href="(.*?)">', self.html)
+ if m is None:
+ self.error(_("Page 2 URL not found"))
+ url = "http://bezvadata.cz%s" % m.group(1)
+ self.logDebug("DL URL %s" % url)
+
+ #countdown
+ m = re.search(r'id="countdown">(\d\d):(\d\d)<', self.html)
+ wait_time = (int(m.group(1)) * 60 + int(m.group(2))) if m else 120
+ self.wait(wait_time, False)
+
+ self.download(url)
+
+
+ def checkErrors(self):
+ if 'images/button-download-disable.png' in self.html:
+ self.longWait(5 * 60, 24) #: parallel dl limit
+ elif '<div class="infobox' in self.html:
+ self.tempOffline()
+
+
+ def loadcaptcha(self, data, *args, **kwargs):
+ return data.decode("base64")
+
+
+getInfo = create_getInfo(BezvadataCz)
diff --git a/pyload/plugins/hoster/BillionuploadsCom.py b/pyload/plugins/hoster/BillionuploadsCom.py
new file mode 100644
index 000000000..d5d8c6c0f
--- /dev/null
+++ b/pyload/plugins/hoster/BillionuploadsCom.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class BillionuploadsCom(XFSHoster):
+ __name__ = "BillionuploadsCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?billionuploads\.com/\w{12}'
+
+ __description__ = """Billionuploads.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "billionuploads.com"
+
+ NAME_PATTERN = r'<td class="dofir" title="(?P<N>.+?)"'
+ SIZE_PATTERN = r'<td class="dofir">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+
+getInfo = create_getInfo(BillionuploadsCom)
diff --git a/pyload/plugins/hoster/BitshareCom.py b/pyload/plugins/hoster/BitshareCom.py
new file mode 100644
index 000000000..a557a43b0
--- /dev/null
+++ b/pyload/plugins/hoster/BitshareCom.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class BitshareCom(SimpleHoster):
+ __name__ = "BitshareCom"
+ __type__ = "hoster"
+ __version__ = "0.51"
+
+ __pattern__ = r'http://(?:www\.)?bitshare\.com/(files/(?P<id1>\w+)(/(?P<name>.*?)\.html)?|\?f=(?P<id2>\w+))'
+
+ __description__ = """Bitshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Paul King", None),
+ ("fragonib", "fragonib[AT]yahoo[DOT]es")]
+
+
+ INFO_PATTERN = r'Downloading (?P<N>.+) - (?P<S>[\d.,]+) (?P<U>[\w^_]+)</h1>'
+ OFFLINE_PATTERN = r'(>We are sorry, but the requested file was not found in our database|>Error - File not available<|The file was deleted either by the uploader, inactivity or due to copyright claim)'
+
+ COOKIES = [("bitshare.com", "language_selection", "EN")]
+
+ AJAXID_PATTERN = r'var ajaxdl = "(.*?)";'
+ TRAFFIC_USED_UP = r'Your Traffic is used up for today. Upgrade to premium to continue!'
+
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.chunkLimit = 1
+
+
+ def process(self, pyfile):
+ if self.premium:
+ self.account.relogin(self.user)
+
+ self.pyfile = pyfile
+
+ # File id
+ m = re.match(self.__pattern__, pyfile.url)
+ self.file_id = max(m.group('id1'), m.group('id2'))
+ self.logDebug("File id is [%s]" % self.file_id)
+
+ # Load main page
+ self.html = self.load(pyfile.url, ref=False, decode=True)
+
+ # Check offline
+ if re.search(self.OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ # Check Traffic used up
+ if re.search(self.TRAFFIC_USED_UP, self.html):
+ self.logInfo(_("Your Traffic is used up for today"))
+ self.wait(30 * 60, True)
+ self.retry()
+
+ # File name
+ m = re.match(self.__pattern__, pyfile.url)
+ name1 = m.group('name') if m else None
+ m = re.search(self.INFO_PATTERN, self.html)
+ name2 = m.group('N') if m else None
+ pyfile.name = max(name1, name2)
+
+ # Ajax file id
+ self.ajaxid = re.search(self.AJAXID_PATTERN, self.html).group(1)
+ self.logDebug("File ajax id is [%s]" % self.ajaxid)
+
+ # This may either download our file or forward us to an error page
+ url = self.getDownloadUrl()
+ self.download(url)
+
+ check = self.checkDownload({"404": ">404 Not Found<", "Error": ">Error occured<"})
+ if check == "404":
+ self.retry(3, 60, 'Error 404')
+ elif check == "error":
+ self.retry(5, 5 * 60, "Bitshare host : Error occured")
+
+
+ def getDownloadUrl(self):
+ # Return location if direct download is active
+ if self.premium:
+ header = self.load(self.pyfile.url, cookies=True, just_header=True)
+ if 'location' in header:
+ return header['location']
+
+ # Get download info
+ self.logDebug("Getting download info")
+ res = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request": "generateID", "ajaxid": self.ajaxid})
+ self.handleErrors(res, ':')
+ parts = res.split(":")
+ filetype = parts[0]
+ wait = int(parts[1])
+ captcha = int(parts[2])
+ self.logDebug("Download info [type: '%s', waiting: %d, captcha: %d]" % (filetype, wait, captcha))
+
+ # Waiting
+ if wait > 0:
+ self.logDebug("Waiting %d seconds." % wait)
+ if wait < 120:
+ self.wait(wait, False)
+ else:
+ self.wait(wait - 55, True)
+ self.retry()
+
+ # Resolve captcha
+ if captcha == 1:
+ self.logDebug("File is captcha protected")
+ recaptcha = ReCaptcha(self)
+
+ # Try up to 3 times
+ for i in xrange(3):
+ challenge, code = recaptcha.challenge()
+ res = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request": "validateCaptcha", "ajaxid": self.ajaxid,
+ "recaptcha_challenge_field": challenge, "recaptcha_response_field": code})
+ if self.handleCaptchaErrors(res):
+ break
+
+ # Get download URL
+ self.logDebug("Getting download url")
+ res = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request": "getDownloadURL", "ajaxid": self.ajaxid})
+ self.handleErrors(res, '#')
+ url = res.split("#")[-1]
+
+ return url
+
+
+ def handleErrors(self, res, separator):
+ self.logDebug("Checking response [%s]" % res)
+ if "ERROR:Session timed out" in res:
+ self.retry()
+ elif "ERROR" in res:
+ msg = res.split(separator)[-1]
+ self.fail(msg)
+
+
+ def handleCaptchaErrors(self, res):
+ self.logDebug("Result of captcha resolving [%s]" % res)
+ if "SUCCESS" in res:
+ self.correctCaptcha()
+ return True
+ elif "ERROR:SESSION ERROR" in res:
+ self.retry()
+
+ self.invalidCaptcha()
+
+
+getInfo = create_getInfo(BitshareCom)
diff --git a/pyload/plugins/hoster/BoltsharingCom.py b/pyload/plugins/hoster/BoltsharingCom.py
new file mode 100644
index 000000000..4726fc769
--- /dev/null
+++ b/pyload/plugins/hoster/BoltsharingCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class BoltsharingCom(DeadHoster):
+ __name__ = "BoltsharingCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?boltsharing\.com/\w{12}'
+
+ __description__ = """Boltsharing.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(BoltsharingCom)
diff --git a/pyload/plugins/hoster/CatShareNet.py b/pyload/plugins/hoster/CatShareNet.py
new file mode 100644
index 000000000..c6600f4b4
--- /dev/null
+++ b/pyload/plugins/hoster/CatShareNet.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class CatShareNet(SimpleHoster):
+ __name__ = "CatShareNet"
+ __type__ = "hoster"
+ __version__ = "0.08"
+
+ __pattern__ = r'http://(?:www\.)?catshare\.net/\w{16}'
+
+ __description__ = """CatShare.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("z00nx", "z00nx0@gmail.com"),
+ ("prOq", None),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ TEXT_ENCODING = True
+
+ INFO_PATTERN = r'<title>(?P<N>.+) \((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)<'
+ OFFLINE_PATTERN = ur'Podany plik został usunięty\s*</div>'
+
+ IP_BLOCKED_PATTERN = ur'>Nasz serwis wykrył ÅŒe Twój adres IP nie pochodzi z Polski.<'
+ SECONDS_PATTERN = 'var\scount\s=\s(\d+);'
+ LINK_PATTERN = r'<form action="(.+?)" method="GET">'
+
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.resumeDownload = True
+
+
+ def getFileInfo(self):
+ m = re.search(self.IP_BLOCKED_PATTERN, self.html)
+ if m:
+ self.fail(_("Only connections from Polish IP address are allowed"))
+ return super(CatShareNet, self).getFileInfo()
+
+
+ def handleFree(self):
+ m = re.search(self.SECONDS_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1))
+ self.wait(wait_time, True)
+
+ recaptcha = ReCaptcha(self)
+
+ challenge, code = recaptcha.challenge()
+ self.html = self.load(self.pyfile.url,
+ post={'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': code})
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.invalidCaptcha()
+ self.retry(reason=_("Wrong captcha entered"))
+
+ dl_link = m.group(1)
+ self.download(dl_link, disposition=True)
+
+
+getInfo = create_getInfo(CatShareNet)
diff --git a/pyload/plugins/hoster/CloudzerNet.py b/pyload/plugins/hoster/CloudzerNet.py
new file mode 100644
index 000000000..ce16f6fce
--- /dev/null
+++ b/pyload/plugins/hoster/CloudzerNet.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class CloudzerNet(DeadHoster):
+ __name__ = "CloudzerNet"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:www\.)?(cloudzer\.net/file/|clz\.to/(file/)?)\w+'
+
+ __description__ = """Cloudzer.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("gs", "I-_-I-_-I@web.de"),
+ ("z00nx", "z00nx0@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(CloudzerNet)
diff --git a/pyload/plugins/hoster/CramitIn.py b/pyload/plugins/hoster/CramitIn.py
new file mode 100644
index 000000000..2d2c0cbbc
--- /dev/null
+++ b/pyload/plugins/hoster/CramitIn.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class CramitIn(XFSHoster):
+ __name__ = "CramitIn"
+ __type__ = "hoster"
+ __version__ = "0.07"
+
+ __pattern__ = r'http://(?:www\.)?cramit\.in/\w{12}'
+
+ __description__ = """Cramit.in hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "cramit.in"
+
+ INFO_PATTERN = r'<span class=t2>\s*(?P<N>.*?)</span>.*?<small>\s*\((?P<S>.*?)\)'
+ LINK_PATTERN = r'href="(http://cramit\.in/file_download/.*?)"'
+
+
+getInfo = create_getInfo(CramitIn)
diff --git a/pyload/plugins/hoster/CrockoCom.py b/pyload/plugins/hoster/CrockoCom.py
new file mode 100644
index 000000000..012fb7f0b
--- /dev/null
+++ b/pyload/plugins/hoster/CrockoCom.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class CrockoCom(SimpleHoster):
+ __name__ = "CrockoCom"
+ __type__ = "hoster"
+ __version__ = "0.17"
+
+ __pattern__ = r'http://(?:www\.)?(crocko|easy-share)\.com/\w+'
+
+ __description__ = """Crocko hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<span class="fz24">Download:\s*<strong>(?P<N>.*)'
+ SIZE_PATTERN = r'<span class="tip1"><span class="inner">(?P<S>[^<]+)</span></span>'
+ OFFLINE_PATTERN = r'<h1>Sorry,<br />the page you\'re looking for <br />isn\'t here.</h1>|File not found'
+
+ CAPTCHA_PATTERN = re.compile(r"u='(/file_contents/captcha/\w+)';\s*w='(\d+)';")
+
+ FORM_PATTERN = r'<form method="post" action="([^"]+)">(.*?)</form>'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="?([^" ]+)"? value="?([^" ]+)"?[^>]*>'
+
+ NAME_REPLACEMENTS = [(r'<[^>]*>', '')]
+
+
+ def handleFree(self):
+ if "You need Premium membership to download this file." in self.html:
+ self.fail(_("You need Premium membership to download this file"))
+
+ for _i in xrange(5):
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ url, wait_time = 'http://crocko.com' + m.group(1), m.group(2)
+ self.wait(wait_time)
+ self.html = self.load(url)
+ else:
+ break
+
+ m = re.search(self.FORM_PATTERN, self.html, re.S)
+ if m is None:
+ self.error(_("FORM_PATTERN not found"))
+
+ action, form = m.groups()
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge()
+ self.download(action, post=inputs)
+
+ check = self.checkDownload({
+ "captcha_err": recaptcha.KEY_AJAX_PATTERN
+ })
+
+ if check == "captcha_err":
+ self.invalidCaptcha()
+ else:
+ break
+ else:
+ self.fail(_("No valid captcha solution received"))
+
+
+getInfo = create_getInfo(CrockoCom)
diff --git a/pyload/plugins/hoster/CyberlockerCh.py b/pyload/plugins/hoster/CyberlockerCh.py
new file mode 100644
index 000000000..48109101d
--- /dev/null
+++ b/pyload/plugins/hoster/CyberlockerCh.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class CyberlockerCh(DeadHoster):
+ __name__ = "CyberlockerCh"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?cyberlocker\.ch/\w+'
+
+ __description__ = """Cyberlocker.ch hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(CyberlockerCh)
diff --git a/pyload/plugins/hoster/CzshareCom.py b/pyload/plugins/hoster/CzshareCom.py
new file mode 100644
index 000000000..92a38f49d
--- /dev/null
+++ b/pyload/plugins/hoster/CzshareCom.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://czshare.com/5278880/random.bin
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pyload.utils import parseFileSize
+
+
+class CzshareCom(SimpleHoster):
+ __name__ = "CzshareCom"
+ __type__ = "hoster"
+ __version__ = "0.95"
+
+ __pattern__ = r'http://(?:www\.)?(czshare|sdilej)\.(com|cz)/(\d+/|download\.php\?).*'
+
+ __description__ = """CZshare.com hoster plugin, now Sdilej.cz"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<div class="tab" id="parameters">\s*<p>\s*Cel. n.zev: <a href=[^>]*>(?P<N>[^<]+)</a>'
+ SIZE_PATTERN = r'<div class="tab" id="category">(?:\s*<p>[^\n]*</p>)*\s*Velikost:\s*(?P<S>[\d .,]+)(?P<U>[\w^_]+)\s*</div>'
+ OFFLINE_PATTERN = r'<div class="header clearfix">\s*<h2 class="red">'
+
+ SIZE_REPLACEMENTS = [(' ', '')]
+ URL_REPLACEMENTS = [(r'http://[^/]*/download.php\?.*?id=(\w+).*', r'http://sdilej.cz/\1/x/')]
+
+ FORCE_CHECK_TRAFFIC = True
+
+ FREE_URL_PATTERN = r'<a href="([^"]+)" class="page-download">[^>]*alt="([^"]+)" /></a>'
+ FREE_FORM_PATTERN = r'<form action="download\.php" method="post">\s*<img src="captcha\.php" id="captcha" />(.*?)</form>'
+ PREMIUM_FORM_PATTERN = r'<form action="/profi_down\.php" method="post">(.*?)</form>'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="([^"]+)" value="([^"]+)"[^>]*/>'
+ MULTIDL_PATTERN = r'<p><font color=\'red\'>Z[^<]*PROFI.</font></p>'
+ USER_CREDIT_PATTERN = r'<div class="credit">\s*kredit: <strong>([\d .,]+)(\w+)</strong>\s*</div><!-- .credit -->'
+
+
+ def checkTrafficLeft(self):
+ # check if user logged in
+ m = re.search(self.USER_CREDIT_PATTERN, self.html)
+ if m is None:
+ self.account.relogin(self.user)
+ self.html = self.load(self.pyfile.url, cookies=True, decode=True)
+ m = re.search(self.USER_CREDIT_PATTERN, self.html)
+ if m is None:
+ return False
+
+ # check user credit
+ try:
+ credit = parseFileSize(m.group(1).replace(' ', ''), m.group(2))
+ self.logInfo(_("Premium download for %i KiB of Credit") % (self.pyfile.size / 1024))
+ self.logInfo(_("User %s has %i KiB left") % (self.user, credit / 1024))
+ if credit < self.pyfile.size:
+ self.logInfo(_("Not enough credit to download file: %s") % self.pyfile.name)
+ return False
+ except Exception, e:
+ # let's continue and see what happens...
+ self.logError(e)
+
+ return True
+
+
+ def handlePremium(self):
+ # parse download link
+ try:
+ form = re.search(self.PREMIUM_FORM_PATTERN, self.html, re.S).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ except Exception, e:
+ self.logError(e)
+ self.resetAccount()
+
+ # download the file, destination is determined by pyLoad
+ self.download("http://sdilej.cz/profi_down.php", post=inputs, disposition=True)
+ self.checkDownloadedFile()
+
+
+ def handleFree(self):
+ # get free url
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m is None:
+ self.error(_("FREE_URL_PATTERN not found"))
+ parsed_url = "http://sdilej.cz" + m.group(1)
+ self.logDebug("PARSED_URL:" + parsed_url)
+
+ # get download ticket and parse html
+ self.html = self.load(parsed_url, cookies=True, decode=True)
+ if re.search(self.MULTIDL_PATTERN, self.html):
+ self.longWait(5 * 60, 12)
+
+ try:
+ form = re.search(self.FREE_FORM_PATTERN, self.html, re.S).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ self.pyfile.size = int(inputs['size'])
+ except Exception, e:
+ self.logError(e)
+ self.error(_("Form"))
+
+ # get and decrypt captcha
+ captcha_url = 'http://sdilej.cz/captcha.php'
+ for _i in xrange(5):
+ inputs['captchastring2'] = self.decryptCaptcha(captcha_url)
+ self.html = self.load(parsed_url, cookies=True, post=inputs, decode=True)
+ if u"<li>ZadanÃœ ověřovací kód nesouhlasí!</li>" in self.html:
+ self.invalidCaptcha()
+ elif re.search(self.MULTIDL_PATTERN, self.html):
+ self.longWait(5 * 60, 12)
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("No valid captcha code entered"))
+
+ m = re.search("countdown_number = (\d+);", self.html)
+ self.setWait(int(m.group(1)) if m else 50)
+
+ # download the file, destination is determined by pyLoad
+ self.logDebug("WAIT URL", self.req.lastEffectiveURL)
+ m = re.search("free_wait.php\?server=(.*?)&(.*)", self.req.lastEffectiveURL)
+ if m is None:
+ self.error(_("Download URL not found"))
+
+ url = "http://%s/download.php?%s" % (m.group(1), m.group(2))
+
+ self.wait()
+ self.download(url)
+ self.checkDownloadedFile()
+
+
+ def checkDownloadedFile(self):
+ # check download
+ check = self.checkDownload({
+ "temp_offline": re.compile(r"^Soubor je do.*asn.* nedostupn.*$"),
+ "credit": re.compile(r"^Nem.*te dostate.*n.* kredit.$"),
+ "multi_dl": re.compile(self.MULTIDL_PATTERN),
+ "captcha_err": "<li>ZadanÃœ ověřovací kód nesouhlasí!</li>"
+ })
+
+ if check == "temp_offline":
+ self.fail(_("File not available - try later"))
+ if check == "credit":
+ self.resetAccount()
+ elif check == "multi_dl":
+ self.longWait(5 * 60, 12)
+ elif check == "captcha_err":
+ self.invalidCaptcha()
+ self.retry()
+
+
+getInfo = create_getInfo(CzshareCom)
diff --git a/pyload/plugins/hoster/DailymotionCom.py b/pyload/plugins/hoster/DailymotionCom.py
new file mode 100644
index 000000000..cd66e6e21
--- /dev/null
+++ b/pyload/plugins/hoster/DailymotionCom.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.datatype.PyFile import statusMap
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+
+
+def getInfo(urls):
+ result = [] #: [ .. (name, size, status, url) .. ]
+ regex = re.compile(DailymotionCom.__pattern__)
+ apiurl = "https://api.dailymotion.com/video/"
+ request = {"fields": "access_error,status,title"}
+ for url in urls:
+ id = regex.search(url).group("ID")
+ page = getURL(apiurl + id, get=request)
+ info = json_loads(page)
+
+ if "title" in info:
+ name = info['title'] + ".mp4"
+ else:
+ name = url
+
+ if "error" in info or info['access_error']:
+ status = "offline"
+ else:
+ status = info['status']
+ if status in ("ready", "published"):
+ status = "online"
+ elif status in ("waiting", "processing"):
+ status = "temp. offline"
+ else:
+ status = "offline"
+
+ result.append((name, 0, statusMap[status], url))
+ return result
+
+
+class DailymotionCom(Hoster):
+ __name__ = "DailymotionCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'https?://(?:www\.)?dailymotion\.com/.*?video/(?P<ID>[\w^_]+)'
+ __config__ = [("quality", "Lowest;LD 144p;LD 240p;SD 384p;HQ 480p;HD 720p;HD 1080p;Highest", "Quality", "Highest")]
+
+ __description__ = """Dailymotion.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+
+ def getStreams(self):
+ streams = []
+ for result in re.finditer(r"\"(?P<URL>http:\\/\\/www.dailymotion.com\\/cdn\\/H264-(?P<QF>.*?)\\.*?)\"",
+ self.html):
+ url = result.group("URL")
+ qf = result.group("QF")
+ link = url.replace("\\", "")
+ quality = tuple(int(x) for x in qf.split("x"))
+ streams.append((quality, link))
+ return sorted(streams, key=lambda x: x[0][::-1])
+
+
+ def getQuality(self):
+ q = self.getConfig("quality")
+ if q == "Lowest":
+ quality = 0
+ elif q == "Highest":
+ quality = -1
+ else:
+ quality = int(q.rsplit(" ")[1][:-1])
+ return quality
+
+
+ def getLink(self, streams, quality):
+ if quality > 0:
+ for x, s in reversed([item for item in enumerate(streams)]):
+ qf = s[0][1]
+ if qf <= quality:
+ idx = x
+ break
+ else:
+ idx = 0
+ else:
+ idx = quality
+
+ s = streams[idx]
+ self.logInfo(_("Download video quality %sx%s") % s[0])
+ return s[1]
+
+
+ def checkInfo(self, pyfile):
+ pyfile.name, pyfile.size, pyfile.status, pyfile.url = getInfo([pyfile.url])[0]
+ if pyfile.status == 1:
+ self.offline()
+ elif pyfile.status == 6:
+ self.tempOffline()
+
+
+ def process(self, pyfile):
+ self.checkInfo(pyfile)
+
+ id = re.match(self.__pattern__, pyfile.url).group("ID")
+ self.html = self.load("http://www.dailymotion.com/embed/video/" + id, decode=True)
+
+ streams = self.getStreams()
+ quality = self.getQuality()
+ link = self.getLink(streams, quality)
+
+ self.download(link)
diff --git a/pyload/plugins/hoster/DataHu.py b/pyload/plugins/hoster/DataHu.py
new file mode 100644
index 000000000..adadbfe5d
--- /dev/null
+++ b/pyload/plugins/hoster/DataHu.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://data.hu/get/6381232/random.bin
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DataHu(SimpleHoster):
+ __name__ = "DataHu"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?data\.hu/get/\w+'
+
+ __description__ = """Data.hu hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("crash", None),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ INFO_PATTERN = ur'<title>(?P<N>.*) \((?P<S>[^)]+)\) let\xf6lt\xe9se</title>'
+ OFFLINE_PATTERN = ur'Az adott f\xe1jl nem l\xe9tezik'
+ LINK_PATTERN = r'<div class="download_box_button"><a href="([^"]+)">'
+
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = self.premium
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ self.logDebug("Direct link: " + url)
+ else:
+ self.error(_("LINK_PATTERN not found"))
+
+ self.download(url, disposition=True)
+
+
+getInfo = create_getInfo(DataHu)
diff --git a/pyload/plugins/hoster/DataportCz.py b/pyload/plugins/hoster/DataportCz.py
new file mode 100644
index 000000000..174bd04af
--- /dev/null
+++ b/pyload/plugins/hoster/DataportCz.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DataportCz(SimpleHoster):
+ __name__ = "DataportCz"
+ __type__ = "hoster"
+ __version__ = "0.39"
+
+ __pattern__ = r'http://(?:www\.)?dataport\.cz/file/(.*)'
+
+ __description__ = """Dataport.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<span itemprop="name">(?P<N>[^<]+)</span>'
+ SIZE_PATTERN = r'<td class="fil">Velikost</td>\s*<td>(?P<S>[^<]+)</td>'
+ OFFLINE_PATTERN = r'<h2>Soubor nebyl nalezen</h2>'
+
+ URL_REPLACEMENTS = [(__pattern__, r'http://www.dataport.cz/file/\1')]
+
+ CAPTCHA_PATTERN = r'<section id="captcha_bg">\s*<img src="(.*?)"'
+ FREE_SLOTS_PATTERN = ur'Počet volnÜch slotů: <span class="darkblue">(\d+)</span><br />'
+
+
+ def handleFree(self):
+ captchas = {"1": "jkeG", "2": "hMJQ", "3": "vmEK", "4": "ePQM", "5": "blBd"}
+
+ for _i in xrange(60):
+ action, inputs = self.parseHtmlForm('free_download_form')
+ self.logDebug(action, inputs)
+ if not action or not inputs:
+ self.error(_("free_download_form"))
+
+ if "captchaId" in inputs and inputs['captchaId'] in captchas:
+ inputs['captchaCode'] = captchas[inputs['captchaId']]
+ else:
+ self.error(_("captcha"))
+
+ self.html = self.download("http://www.dataport.cz%s" % action, post=inputs)
+
+ check = self.checkDownload({"captcha": 'alert("\u0160patn\u011b opsan\u00fd k\u00f3d z obr\u00e1zu");',
+ "slot": 'alert("Je n\u00e1m l\u00edto, ale moment\u00e1ln\u011b nejsou'})
+ if check == "captcha":
+ self.error(_("invalid captcha"))
+ elif check == "slot":
+ self.logDebug("No free slots - wait 60s and retry")
+ self.wait(60, False)
+ self.html = self.load(self.pyfile.url, decode=True)
+ continue
+ else:
+ break
+
+
+getInfo = create_getInfo(DataportCz)
diff --git a/pyload/plugins/hoster/DateiTo.py b/pyload/plugins/hoster/DateiTo.py
new file mode 100644
index 000000000..2f83960e6
--- /dev/null
+++ b/pyload/plugins/hoster/DateiTo.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DateiTo(SimpleHoster):
+ __name__ = "DateiTo"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?datei\.to/datei/(?P<ID>\w+)\.html'
+
+ __description__ = """Datei.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'Dateiname:</td>\s*<td colspan="2"><strong>(?P<N>.*?)</'
+ SIZE_PATTERN = r'Dateigr&ouml;&szlig;e:</td>\s*<td colspan="2">(?P<S>.*?)</'
+ OFFLINE_PATTERN = r'>Datei wurde nicht gefunden<|>Bitte wÀhle deine Datei aus... <'
+ PARALELL_PATTERN = r'>Du lÀdst bereits eine Datei herunter<'
+
+ WAIT_PATTERN = r'countdown\({seconds: (\d+)'
+ DATA_PATTERN = r'url: "(.*?)", data: "(.*?)",'
+
+
+ def handleFree(self):
+ url = 'http://datei.to/ajax/download.php'
+ data = {'P': 'I', 'ID': self.info['ID']}
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(10):
+ self.logDebug("URL", url, "POST", data)
+ self.html = self.load(url, post=data)
+ self.checkErrors()
+
+ if url.endswith('download.php') and 'P' in data:
+ if data['P'] == 'I':
+ self.doWait()
+
+ elif data['P'] == 'IV':
+ break
+
+ m = re.search(self.DATA_PATTERN, self.html)
+ if m is None:
+ self.error(_("data"))
+ url = 'http://datei.to/' + m.group(1)
+ data = dict(x.split('=') for x in m.group(2).split('&'))
+
+ if url.endswith('recaptcha.php'):
+ data['recaptcha_challenge_field'], data['recaptcha_response_field'] = recaptcha.challenge()
+ else:
+ self.fail(_("Too bad..."))
+
+ download_url = self.html
+ self.download(download_url)
+
+
+ def checkErrors(self):
+ m = re.search(self.PARALELL_PATTERN, self.html)
+ if m:
+ m = re.search(self.WAIT_PATTERN, self.html)
+ wait_time = int(m.group(1)) if m else 30
+ self.retry(wait_time=wait_time)
+
+
+ def doWait(self):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ wait_time = int(m.group(1)) if m else 30
+
+ self.load('http://datei.to/ajax/download.php', post={'P': 'Ads'})
+ self.wait(wait_time, False)
+
+
+getInfo = create_getInfo(DateiTo)
diff --git a/pyload/plugins/hoster/DdlstorageCom.py b/pyload/plugins/hoster/DdlstorageCom.py
new file mode 100644
index 000000000..4a46c4541
--- /dev/null
+++ b/pyload/plugins/hoster/DdlstorageCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class DdlstorageCom(DeadHoster):
+ __name__ = "DdlstorageCom"
+ __type__ = "hoster"
+ __version__ = "1.02"
+
+ __pattern__ = r'https?://(?:www\.)?ddlstorage\.com/\w+'
+
+ __description__ = """DDLStorage.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(DdlstorageCom)
diff --git a/pyload/plugins/hoster/DebridItaliaCom.py b/pyload/plugins/hoster/DebridItaliaCom.py
new file mode 100644
index 000000000..81cf0b830
--- /dev/null
+++ b/pyload/plugins/hoster/DebridItaliaCom.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class DebridItaliaCom(Hoster):
+ __name__ = "DebridItaliaCom"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?debriditalia\.com'
+
+ __description__ = """Debriditalia.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "DebridItalia")
+ self.fail(_("No DebridItalia account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ url = "http://debriditalia.com/linkgen2.php?xjxfun=convertiLink&xjxargs[]=S<![CDATA[%s]]>" % pyfile.url
+ page = self.load(url)
+ self.logDebug("XML data: %s" % page)
+
+ if 'File not available' in page:
+ self.fail(_("File not available"))
+ else:
+ new_url = re.search(r'<a href="(?:[^"]+)">(?P<direct>[^<]+)</a>', page).group('direct')
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"empty": re.compile(r"^$")})
+
+ if check == "empty":
+ self.retry(5, 2 * 60, "Empty file downloaded")
diff --git a/pyload/plugins/hoster/DepositfilesCom.py b/pyload/plugins/hoster/DepositfilesCom.py
new file mode 100644
index 000000000..ee84195b6
--- /dev/null
+++ b/pyload/plugins/hoster/DepositfilesCom.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DepositfilesCom(SimpleHoster):
+ __name__ = "DepositfilesCom"
+ __type__ = "hoster"
+ __version__ = "0.50"
+
+ __pattern__ = r'https?://(?:www\.)?(depositfiles\.com|dfiles\.(eu|ru))(/\w{1,3})?/files/(?P<ID>\w+)'
+
+ __description__ = """Depositfiles.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<script type="text/javascript">eval\( unescape\(\'(?P<N>.*?)\''
+ SIZE_PATTERN = r': <b>(?P<S>[\d.,]+)&nbsp;(?P<U>[\w^_]+)</b>'
+ OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>'
+
+ NAME_REPLACEMENTS = [(r'\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))),
+ (r'.*<b title="(?P<N>[^"]+).*', "\g<N>")]
+ URL_REPLACEMENTS = [(__pattern__, "https://dfiles.eu/files/\g<ID>")]
+
+ COOKIES = [("dfiles.eu", "lang_current", "en")]
+
+ FREE_LINK_PATTERN = r'<form id="downloader_file_form" action="(http://.+?\.(dfiles\.eu|depositfiles\.com)/.+?)" method="post"'
+ PREMIUM_LINK_PATTERN = r'class="repeat"><a href="(.+?)"'
+ PREMIUM_MIRROR_PATTERN = r'class="repeat_mirror"><a href="(.+?)"'
+
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, post={"gateway_result": "1"}, cookies=True)
+
+ if re.search(r'File is checked, please try again in a minute.', self.html) is not None:
+ self.logInfo(_("The file is being checked. Waiting 1 minute"))
+ self.retry(wait_time=60)
+
+ wait = re.search(r'html_download_api-limit_interval\">(\d+)</span>', self.html)
+ if wait:
+ wait_time = int(wait.group(1))
+ self.logInfo(_("Traffic used up. Waiting %d seconds") % wait_time)
+ self.wait(wait_time, True)
+ self.retry()
+
+ wait = re.search(r'>Try in (\d+) minutes or use GOLD account', self.html)
+ if wait:
+ wait_time = int(wait.group(1))
+ self.logInfo(_("All free slots occupied. Waiting %d minutes") % wait_time)
+ self.setWait(wait_time * 60, False)
+
+ wait = re.search(r'Please wait (\d+) sec', self.html)
+ if wait:
+ self.setWait(int(wait.group(1)))
+
+ m = re.search(r"var fid = '(\w+)';", self.html)
+ if m is None:
+ self.retry(wait_time=5)
+ params = {'fid': m.group(1)}
+ self.logDebug("FID: %s" % params['fid'])
+
+ self.wait()
+ recaptcha = ReCaptcha(self)
+ captcha_key = recaptcha.detect_key()
+ if captcha_key is None:
+ self.error(_("ReCaptcha key not found"))
+
+ for _i in xrange(5):
+ self.html = self.load("https://dfiles.eu/get_file.php", get=params)
+
+ if '<input type=button value="Continue" onclick="check_recaptcha' in self.html:
+ if 'response' in params:
+ self.invalidCaptcha()
+ params['challenge'], params['response'] = recaptcha.challenge(captcha_key)
+ self.logDebug(params)
+ continue
+
+ m = re.search(self.FREE_LINK_PATTERN, self.html)
+ if m:
+ if 'response' in params:
+ self.correctCaptcha()
+ link = unquote(m.group(1))
+ self.logDebug("LINK: %s" % link)
+ break
+ else:
+ self.error(_("Download link"))
+ else:
+ self.fail(_("No valid captcha response received"))
+
+ try:
+ self.download(link, disposition=True)
+ except:
+ self.retry(wait_time=60)
+
+
+ def handlePremium(self):
+ if '<span class="html_download_api-gold_traffic_limit">' in self.html:
+ self.logWarning(_("Download limit reached"))
+ self.retry(25, 60 * 60, "Download limit reached")
+ elif 'onClick="show_gold_offer' in self.html:
+ self.account.relogin(self.user)
+ self.retry()
+ else:
+ link = re.search(self.PREMIUM_LINK_PATTERN, self.html)
+ mirror = re.search(self.PREMIUM_MIRROR_PATTERN, self.html)
+ if link:
+ dlink = link.group(1)
+ elif mirror:
+ dlink = mirror.group(1)
+ else:
+ self.error(_("No direct download link or mirror found"))
+ self.download(dlink, disposition=True)
+
+
+getInfo = create_getInfo(DepositfilesCom)
diff --git a/pyload/plugins/hoster/DlFreeFr.py b/pyload/plugins/hoster/DlFreeFr.py
new file mode 100644
index 000000000..b06609298
--- /dev/null
+++ b/pyload/plugins/hoster/DlFreeFr.py
@@ -0,0 +1,213 @@
+# -*- coding: utf-8 -*-
+
+import pycurl
+import re
+
+from pyload.utils import json_loads
+from pyload.network.Browser import Browser
+from pyload.network.CookieJar import CookieJar
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns
+
+
+class CustomBrowser(Browser):
+
+ def __init__(self, bucket=None, options={}):
+ Browser.__init__(self, bucket, options)
+
+
+ def load(self, *args, **kwargs):
+ post = kwargs.get("post")
+
+ if post is None and len(args) > 2:
+ post = args[2]
+
+ if post:
+ self.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.http.c.setopt(pycurl.POST, 1)
+ self.http.c.setopt(pycurl.CUSTOMREQUEST, "POST")
+ else:
+ self.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.http.c.setopt(pycurl.POST, 0)
+ self.http.c.setopt(pycurl.CUSTOMREQUEST, "GET")
+
+ return Browser.load(self, *args, **kwargs)
+
+
+class AdYouLike:
+ """
+ Class to support adyoulike captcha service
+ """
+ ADYOULIKE_INPUT_PATTERN = r'Adyoulike\.create\((.*?)\);'
+ ADYOULIKE_CALLBACK = r'Adyoulike\.g\._jsonp_5579316662423138'
+ ADYOULIKE_CHALLENGE_PATTERN = ADYOULIKE_CALLBACK + r'\((.*?)\)'
+
+
+ def __init__(self, plugin, engine="adyoulike"):
+ self.plugin = plugin
+ self.engine = engine
+
+
+ def challenge(self, html):
+ adyoulike_data_string = None
+ m = re.search(self.ADYOULIKE_INPUT_PATTERN, html)
+ if m:
+ adyoulike_data_string = m.group(1)
+ else:
+ self.plugin.fail("Can't read AdYouLike input data")
+
+ # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"},
+ # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}}
+ ayl_data = json_loads(adyoulike_data_string)
+
+ res = self.plugin.load(
+ r'http://api-ayl.appspot.com/challenge?key=%(ayl_key)s&env=%(ayl_env)s&callback=%(callback)s' % {
+ "ayl_key": ayl_data[self.engine]['key'], "ayl_env": ayl_data['all']['env'],
+ "callback": self.ADYOULIKE_CALLBACK})
+
+ m = re.search(self.ADYOULIKE_CHALLENGE_PATTERN, res)
+ challenge_string = None
+ if m:
+ challenge_string = m.group(1)
+ else:
+ self.plugin.fail("Invalid AdYouLike challenge")
+ challenge_data = json_loads(challenge_string)
+
+ return ayl_data, challenge_data
+
+
+ def result(self, ayl, challenge):
+ """
+ Adyoulike.g._jsonp_5579316662423138
+ ({"translations":{"fr":{"instructions_visual":"Recopiez « Soonnight » ci-dessous :"}},
+ "site_under":true,"clickable":true,"pixels":{"VIDEO_050":[],"DISPLAY":[],"VIDEO_000":[],"VIDEO_100":[],
+ "VIDEO_025":[],"VIDEO_075":[]},"medium_type":"image/adyoulike",
+ "iframes":{"big":"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\"
+ height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},"shares":{},"id":256,
+ "token":"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1","formats":{"small":{"y":300,"x":0,"w":300,"h":60},
+ "big":{"y":0,"x":0,"w":300,"h":250},"hover":{"y":440,"x":0,"w":300,"h":60}},
+ "tid":"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"})
+ """
+ response = None
+ try:
+ instructions_visual = challenge['translations'][ayl['all']['lang']]['instructions_visual']
+ m = re.search(u".*«(.*)».*", instructions_visual)
+ if m:
+ response = m.group(1).strip()
+ else:
+ self.plugin.fail("Can't parse instructions visual")
+ except KeyError:
+ self.plugin.fail("No instructions visual")
+
+ #TODO: Supports captcha
+
+ if not response:
+ self.plugin.fail("AdYouLike result failed")
+
+ return {"_ayl_captcha_engine": self.engine,
+ "_ayl_env": ayl['all']['env'],
+ "_ayl_tid": challenge['tid'],
+ "_ayl_token_challenge": challenge['token'],
+ "_ayl_response": response}
+
+
+class DlFreeFr(SimpleHoster):
+ __name__ = "DlFreeFr"
+ __type__ = "hoster"
+ __version__ = "0.25"
+
+ __pattern__ = r'http://(?:www\.)?dl\.free\.fr/(\w+|getfile\.pl\?file=/\w+)'
+
+ __description__ = """Dl.free.fr hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("the-razer", "daniel_ AT gmx DOT net"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("Toilal", "toilal.dev@gmail.com")]
+
+
+ NAME_PATTERN = r'Fichier:</td>\s*<td[^>]*>(?P<N>[^>]*)</td>'
+ SIZE_PATTERN = r'Taille:</td>\s*<td[^>]*>(?P<S>[\d.,]+\w)o'
+ OFFLINE_PATTERN = r'Erreur 404 - Document non trouv|Fichier inexistant|Le fichier demand&eacute; n\'a pas &eacute;t&eacute; trouv&eacute;'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+ self.limitDL = 5
+ self.chunkLimit = 1
+
+
+ def init(self):
+ factory = self.core.requestFactory
+ self.req = CustomBrowser(factory.bucket, factory.getOptions())
+
+
+ def process(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
+ valid_url = pyfile.url
+ headers = self.load(valid_url, just_header=True)
+
+ if headers.get('code') == 302:
+ valid_url = headers.get('location')
+ headers = self.load(valid_url, just_header=True)
+
+ if headers.get('code') == 200:
+ content_type = headers.get('content-type')
+ if content_type and content_type.startswith("text/html"):
+ # Undirect acces to requested file, with a web page providing it (captcha)
+ self.html = self.load(valid_url)
+ self.handleFree()
+ else:
+ # Direct access to requested file for users using free.fr as Internet Service Provider.
+ self.download(valid_url, disposition=True)
+ elif headers.get('code') == 404:
+ self.offline()
+ else:
+ self.fail(_("Invalid return code: ") + str(headers.get('code')))
+
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('action="getfile.pl"')
+
+ adyoulike = AdYouLike(self)
+ ayl, challenge = adyoulike.challenge(self.html)
+ result = adyoulike.result(ayl, challenge)
+ inputs.update(result)
+
+ self.load("http://dl.free.fr/getfile.pl", post=inputs)
+ headers = self.getLastHeaders()
+ if headers.get("code") == 302 and "set-cookie" in headers and "location" in headers:
+ m = re.search("(.*?)=(.*?); path=(.*?); domain=(.*?)", headers.get("set-cookie"))
+ cj = CookieJar(__name__)
+ if m:
+ cj.setCookie(m.group(4), m.group(1), m.group(2), m.group(3))
+ else:
+ self.fail(_("Cookie error"))
+ location = headers.get("location")
+ self.req.setCookieJar(cj)
+ self.download(location, disposition=True)
+ else:
+ self.fail(_("Invalid response"))
+
+
+ def getLastHeaders(self):
+ #parse header
+ header = {"code": self.req.code}
+ for line in self.req.http.header.splitlines():
+ line = line.strip()
+ if not line or ":" not in line:
+ continue
+
+ key, none, value = line.partition(":")
+ key = key.lower().strip()
+ value = value.strip()
+
+ if key in header:
+ if type(header[key]) == list:
+ header[key].append(value)
+ else:
+ header[key] = [header[key], value]
+ else:
+ header[key] = value
+ return header
+
+
+getInfo = create_getInfo(DlFreeFr)
diff --git a/pyload/plugins/hoster/DuploadOrg.py b/pyload/plugins/hoster/DuploadOrg.py
new file mode 100644
index 000000000..dae4c847e
--- /dev/null
+++ b/pyload/plugins/hoster/DuploadOrg.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class DuploadOrg(DeadHoster):
+ __name__ = "DuploadOrg"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?dupload\.org/\w{12}'
+
+ __description__ = """Dupload.grg hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(DuploadOrg)
diff --git a/pyload/plugins/hoster/EasybytezCom.py b/pyload/plugins/hoster/EasybytezCom.py
new file mode 100644
index 000000000..d02cb371b
--- /dev/null
+++ b/pyload/plugins/hoster/EasybytezCom.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class EasybytezCom(XFSHoster):
+ __name__ = "EasybytezCom"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://(?:www\.)?easybytez\.com/\w{12}'
+
+ __description__ = """Easybytez.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "easybytez.com"
+
+ OFFLINE_PATTERN = r'>File not available'
+
+ LINK_PATTERN = r'(http://(\w+\.(easybytez|easyload|ezbytez|zingload)\.(com|to)|\d+\.\d+\.\d+\.\d+)/files/\d+/\w+/.+?)["\'<]'
+
+
+getInfo = create_getInfo(EasybytezCom)
diff --git a/pyload/plugins/hoster/EdiskCz.py b/pyload/plugins/hoster/EdiskCz.py
new file mode 100644
index 000000000..3b256a89a
--- /dev/null
+++ b/pyload/plugins/hoster/EdiskCz.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class EdiskCz(SimpleHoster):
+ __name__ = "EdiskCz"
+ __type__ = "hoster"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(?:www\.)?edisk\.(cz|sk|eu)/(stahni|sk/stahni|en/download)/.*'
+
+ __description__ = """Edisk.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ INFO_PATTERN = r'<span class="fl" title="(?P<N>[^"]+)">\s*.*?\((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)</h1></span>'
+ OFFLINE_PATTERN = r'<h3>This file does not exist due to one of the following:</h3><ul><li>'
+
+ ACTION_PATTERN = r'/en/download/(\d+/.*\.html)'
+ LINK_PATTERN = r'http://.*edisk\.cz.*\.html'
+
+
+ def setup(self):
+ self.multiDL = False
+
+
+ def process(self, pyfile):
+ url = re.sub("/(stahni|sk/stahni)/", "/en/download/", pyfile.url)
+
+ self.logDebug("URL:" + url)
+
+ m = re.search(self.ACTION_PATTERN, url)
+ if m is None:
+ self.error(_("ACTION_PATTERN not found"))
+ action = m.group(1)
+
+ self.html = self.load(url, decode=True)
+ self.getFileInfo()
+
+ self.html = self.load(re.sub("/en/download/", "/en/download-slow/", url))
+
+ url = self.load(re.sub("/en/download/", "/x-download/", url), post={
+ "action": action
+ })
+
+ if not re.match(self.LINK_PATTERN, url):
+ self.fail(_("Unexpected server response"))
+
+ self.download(url)
+
+
+getInfo = create_getInfo(EdiskCz)
diff --git a/pyload/plugins/hoster/EgoFilesCom.py b/pyload/plugins/hoster/EgoFilesCom.py
new file mode 100644
index 000000000..20176751a
--- /dev/null
+++ b/pyload/plugins/hoster/EgoFilesCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class EgoFilesCom(DeadHoster):
+ __name__ = "EgoFilesCom"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'https?://(?:www\.)?egofiles\.com/\w+'
+
+ __description__ = """Egofiles.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(EgoFilesCom)
diff --git a/pyload/plugins/hoster/EnteruploadCom.py b/pyload/plugins/hoster/EnteruploadCom.py
new file mode 100644
index 000000000..c535d6f48
--- /dev/null
+++ b/pyload/plugins/hoster/EnteruploadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class EnteruploadCom(DeadHoster):
+ __name__ = "EnteruploadCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?enterupload\.com/\w+'
+
+ __description__ = """EnterUpload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(EnteruploadCom)
diff --git a/pyload/plugins/hoster/EpicShareNet.py b/pyload/plugins/hoster/EpicShareNet.py
new file mode 100644
index 000000000..a06b4f1dc
--- /dev/null
+++ b/pyload/plugins/hoster/EpicShareNet.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class EpicShareNet(DeadHoster):
+ __name__ = "EpicShareNet"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?epicshare\.net/\w{12}'
+
+ __description__ = """EpicShare.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("t4skforce", "t4skforce1337[AT]gmail[DOT]com")]
+
+
+getInfo = create_getInfo(EpicShareNet)
diff --git a/pyload/plugins/hoster/EuroshareEu.py b/pyload/plugins/hoster/EuroshareEu.py
new file mode 100644
index 000000000..e170facca
--- /dev/null
+++ b/pyload/plugins/hoster/EuroshareEu.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class EuroshareEu(SimpleHoster):
+ __name__ = "EuroshareEu"
+ __type__ = "hoster"
+ __version__ = "0.26"
+
+ __pattern__ = r'http://(?:www\.)?euroshare\.(eu|sk|cz|hu|pl)/file/.*'
+
+ __description__ = """Euroshare.eu hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>'
+ OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Poşadovaná stránka neexistuje!'
+
+ FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"'
+ ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor'
+ ERR_NOT_LOGGED_IN_PATTERN = r'href="/customer-zone/login/"'
+
+ URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")]
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+ self.req.setOption("timeout", 120)
+
+
+ def handlePremium(self):
+ if self.ERR_NOT_LOGGED_IN_PATTERN in self.html:
+ self.account.relogin(self.user)
+ self.retry(reason=_("User not logged in"))
+
+ self.download(self.pyfile.url.rstrip('/') + "/download/")
+
+ check = self.checkDownload({"login": re.compile(self.ERR_NOT_LOGGED_IN_PATTERN),
+ "json": re.compile(r'\{"status":"error".*?"message":"(.*?)"')})
+ if check == "login" or (check == "json" and self.lastCheck.group(1) == "Access token expired"):
+ self.account.relogin(self.user)
+ self.retry(reason=_("Access token expired"))
+ elif check == "json":
+ self.fail(self.lastCheck.group(1))
+
+
+ def handleFree(self):
+ if re.search(self.ERR_PARDL_PATTERN, self.html) is not None:
+ self.longWait(5 * 60, 12)
+
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m is None:
+ self.error(_("FREE_URL_PATTERN not found"))
+ parsed_url = "http://euroshare.eu%s" % m.group(1)
+ self.logDebug("URL", parsed_url)
+ self.download(parsed_url, disposition=True)
+
+ check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)})
+ if check == "multi_dl":
+ self.longWait(5 * 60, 12)
+
+
+getInfo = create_getInfo(EuroshareEu)
diff --git a/pyload/plugins/hoster/ExtabitCom.py b/pyload/plugins/hoster/ExtabitCom.py
new file mode 100644
index 000000000..6c0dbd185
--- /dev/null
+++ b/pyload/plugins/hoster/ExtabitCom.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import json_loads
+
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class ExtabitCom(SimpleHoster):
+ __name__ = "ExtabitCom"
+ __type__ = "hoster"
+ __version__ = "0.62"
+
+ __pattern__ = r'http://(?:www\.)?extabit\.com/(file|go|fid)/(?P<ID>\w+)'
+
+ __description__ = """Extabit.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<th>File:</th>\s*<td class="col-fileinfo">\s*<div title="(?P<N>[^"]+)">'
+ SIZE_PATTERN = r'<th>Size:</th>\s*<td class="col-fileinfo">(?P<S>[^<]+)</td>'
+ OFFLINE_PATTERN = r'>File not found<'
+ TEMP_OFFLINE_PATTERN = r'>(File is temporary unavailable|No download mirror)<'
+
+ LINK_PATTERN = r'[\'"](http://guest\d+\.extabit\.com/\w+/.*?)[\'"]'
+
+
+ def handleFree(self):
+ if r">Only premium users can download this file" in self.html:
+ self.fail(_("Only premium users can download this file"))
+
+ m = re.search(r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.html)
+ if m:
+ self.wait(int(m.group(1)) * 60, True)
+ elif "The daily downloads limit from your IP is exceeded" in self.html:
+ self.logWarning(_("You have reached your daily downloads limit for today"))
+ self.wait(secondsToMidnight(gmt=2), True)
+
+ self.logDebug("URL: " + self.req.http.lastEffectiveURL)
+ m = re.match(self.__pattern__, self.req.http.lastEffectiveURL)
+ fileID = m.group('ID') if m else self.info('ID')
+
+ m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.html)
+ if m:
+ recaptcha = ReCaptcha(self)
+ captcha_key = m.group(1)
+
+ for _i in xrange(5):
+ get_data = {"type": "recaptcha"}
+ get_data['challenge'], get_data['capture'] = recaptcha.challenge(captcha_key)
+ res = json_loads(self.load("http://extabit.com/file/%s/" % fileID, get=get_data))
+ if "ok" in res:
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail(_("Invalid captcha"))
+ else:
+ self.error(_("Captcha"))
+
+ if not "href" in res:
+ self.error(_("Bad JSON response"))
+
+ self.html = self.load("http://extabit.com/file/%s%s" % (fileID, res['href']))
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+
+ url = m.group(1)
+ self.download(url)
+
+
+getInfo = create_getInfo(ExtabitCom)
diff --git a/pyload/plugins/hoster/FastixRu.py b/pyload/plugins/hoster/FastixRu.py
new file mode 100644
index 000000000..0e353f362
--- /dev/null
+++ b/pyload/plugins/hoster/FastixRu.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import unquote
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class FastixRu(Hoster):
+ __name__ = "FastixRu"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?fastix\.(ru|it)/file/(?P<ID>\w{24})'
+
+ __description__ = """Fastix hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Massimo Rosamilia", "max@spiritix.eu")]
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): # incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+
+ def setup(self):
+ self.chunkLimit = 3
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Fastix")
+ self.fail(_("No Fastix account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ api_key = self.account.getAccountData(self.user)
+ api_key = api_key['api']
+ url = "http://fastix.ru/api_v2/?apikey=%s&sub=getdirectlink&link=%s" % (api_key, pyfile.url)
+ page = self.load(url)
+ data = json_loads(page)
+ self.logDebug("Json data", data)
+ if "error\":true" in page:
+ self.offline()
+ else:
+ new_url = data['downloadlink']
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"error": "<title>An error occurred while processing your request</title>",
+ "empty": re.compile(r"^$")})
+
+ if check == "error":
+ self.retry(wait_time=60, reason=_("An error occurred while generating link"))
+ elif check == "empty":
+ self.retry(wait_time=60, reason=_("Downloaded File was empty"))
diff --git a/pyload/plugins/hoster/FastshareCz.py b/pyload/plugins/hoster/FastshareCz.py
new file mode 100644
index 000000000..17ba2add4
--- /dev/null
+++ b/pyload/plugins/hoster/FastshareCz.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://www.fastshare.cz/2141189/random.bin
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FastshareCz(SimpleHoster):
+ __name__ = "FastshareCz"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://(?:www\.)?fastshare\.cz/\d+/.+'
+
+ __description__ = """FastShare.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ INFO_PATTERN = r'<h1 class="dwp">(?P<N>[^<]+)</h1>\s*<div class="fileinfo">\s*Size\s*: (?P<S>\d+) (?P<U>[\w^_]+),'
+ OFFLINE_PATTERN = r'>(The file has been deleted|Requested page not found)'
+
+ URL_REPLACEMENTS = [("#.*", "")]
+
+ COOKIES = [("fastshare.cz", "lang", "en")]
+
+ FREE_URL_PATTERN = r'action=(/free/.*?)>\s*<img src="([^"]*)"><br'
+ PREMIUM_URL_PATTERN = r'(http://data\d+\.fastshare\.cz/download\.php\?id=\d+&)'
+ CREDIT_PATTERN = r' credit for '
+
+
+ def handleFree(self):
+ if "> 100% of FREE slots are full" in self.html:
+ self.retry(12, 60, _("No free slots"))
+
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m:
+ action, captcha_src = m.groups()
+ else:
+ self.error(_("FREE_URL_PATTERN not found"))
+
+ baseurl = "http://www.fastshare.cz"
+ captcha = self.decryptCaptcha(urljoin(baseurl, captcha_src))
+ self.download(urljoin(baseurl, action), post={"code": captcha, "btn.x": 77, "btn.y": 18})
+
+ check = self.checkDownload({
+ 'paralell_dl': "<title>FastShare.cz</title>|<script>alert\('Pres FREE muzete stahovat jen jeden soubor najednou.'\)",
+ 'wrong_captcha': "Download for FREE"
+ })
+
+ if check == "paralell_dl":
+ self.retry(6, 10 * 60, _("Paralell download"))
+ elif check == "wrong_captcha":
+ self.retry(max_tries=5, reason=_("Wrong captcha"))
+
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if "location" in header:
+ url = header['location']
+ elif self.CREDIT_PATTERN in self.html:
+ self.logWarning(_("Not enough traffic left"))
+ self.resetAccount()
+ else:
+ m = re.search(self.PREMIUM_URL_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ else:
+ self.error(_("PREMIUM_URL_PATTERN not found"))
+
+ self.logDebug("PREMIUM URL: " + url)
+ self.download(url, disposition=True)
+
+ check = self.checkDownload({"credit": re.compile(self.CREDIT_PATTERN)})
+ if check == "credit":
+ self.resetAccount()
+
+
+getInfo = create_getInfo(FastshareCz)
diff --git a/pyload/plugins/hoster/FileApeCom.py b/pyload/plugins/hoster/FileApeCom.py
new file mode 100644
index 000000000..307e0df8d
--- /dev/null
+++ b/pyload/plugins/hoster/FileApeCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FileApeCom(DeadHoster):
+ __name__ = "FileApeCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?fileape\.com/(index\.php\?act=download\&id=|dl/)\w+'
+
+ __description__ = """FileApe.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("espes", None)]
+
+
+getInfo = create_getInfo(FileApeCom)
diff --git a/pyload/plugins/hoster/FileParadoxIn.py b/pyload/plugins/hoster/FileParadoxIn.py
new file mode 100644
index 000000000..174aa7211
--- /dev/null
+++ b/pyload/plugins/hoster/FileParadoxIn.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class FileParadoxIn(XFSHoster):
+ __name__ = "FileParadoxIn"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'https?://(?:www\.)?fileparadox\.in/\w{12}'
+
+ __description__ = """FileParadox.in hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RazorWing", "muppetuk1@hotmail.com")]
+
+
+ HOSTER_DOMAIN = "fileparadox.in"
+
+ SIZE_PATTERN = r'</font>\s*\(\s*(?P<S>[^)]+)\s*\)</font>'
+
+
+getInfo = create_getInfo(FileParadoxIn)
diff --git a/pyload/plugins/hoster/FileSharkPl.py b/pyload/plugins/hoster/FileSharkPl.py
new file mode 100644
index 000000000..99cb4b51b
--- /dev/null
+++ b/pyload/plugins/hoster/FileSharkPl.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FileSharkPl(SimpleHoster):
+ __name__ = "FileSharkPl"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?fileshark\.pl/pobierz/\d{6}/\w{5}'
+
+ __description__ = """FileShark.pl hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("prOq", None),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<h2 class="name-file">(?P<N>.+)</h2>'
+ SIZE_PATTERN = r'<p class="size-file">(.*?)<strong>(?P<S>\d+\.?\d*)\s(?P<U>\w+)</strong></p>'
+
+ OFFLINE_PATTERN = '(P|p)lik zosta. (usuni.ty|przeniesiony)'
+
+ DOWNLOAD_ALERT = r'<p class="lead text-center alert alert-warning">(.*?)</p>'
+ IP_BLOCKED_PATTERN = 'Strona jest dost.pna wy..cznie dla u.ytkownik.w znajduj.cych si. na terenie Polski'
+ DOWNLOAD_SLOTS_ERROR_PATTERN = r'Osi.gni.to maksymaln. liczb. .ci.ganych jednocze.nie plik.w\.'
+
+ DOWNLOAD_URL_FREE = r'<a href="(.*?)" class="btn-upload-free">'
+ DOWNLOAD_URL_PREMIUM = r'<a href="(.*?)" class="btn-upload-premium">'
+
+ SECONDS_PATTERN = r'var timeToDownload = (\d+);'
+
+ CAPTCHA_IMG_PATTERN = '<img src="data:image/jpeg;base64,(.*?)" title="captcha"'
+ CAPTCHA_TOKEN_PATTERN = r'name="form\[_token\]" value="(.*?)" />'
+
+
+ def setup(self):
+ self.resumeDownload = True
+ if self.premium:
+ self.multiDL = True
+ self.limitDL = 20
+ else:
+ self.multiDL = False
+
+
+ def prepare(self):
+ super(FileSharkPl, self).prepare()
+
+ m = re.search(self.DOWNLOAD_ALERT, self.html):
+ if m:
+ return
+
+ alert = m.group(1)
+
+ if re.match(self.IP_BLOCKED_PATTERN, alert):
+ self.fail(_("Only connections from Polish IP are allowed"))
+ elif re.match(self.DOWNLOAD_SLOTS_ERROR_PATTERN, alert):
+ self.logInfo(_("No free download slots available"))
+ self.retry(10, 30 * 60, _("Still no free download slots available"))
+ else:
+ self.logInfo(alert)
+ self.retry(10, 10 * 60, _("Try again later"))
+
+
+ #@NOTE: handlePremium method was never been tested
+ def handlePremium(self):
+ self.logDebug("Premium accounts support in experimental modus!")
+ m = re.search(self.DOWNLOAD_URL_PREMIUM, self.html)
+ file_url = urljoin("http://fileshark.pl", m.group(1))
+
+ self.download(file_url, disposition=True)
+ self.checkDownload()
+
+
+ def handleFree(self):
+ m = re.search(self.DOWNLOAD_URL_FREE, self.html)
+ if m is None:
+ self.error(_("Download url not found"))
+
+ file_url = urljoin("http://fileshark.pl", m.group(1))
+
+ m = re.search(self.SECONDS_PATTERN, self.html)
+ if m:
+ seconds = int(m.group(1))
+ self.logDebug("Wait %s seconds" % seconds)
+ self.wait(seconds + 2)
+
+ action, inputs = self.parseHtmlForm('action=""')
+ m = re.search(self.CAPTCHA_TOKEN_PATTERN, self.html)
+ if m is None:
+ self.retry(reason=_("Captcha form not found"))
+
+ inputs['form[_token]'] = m.group(1)
+
+ m = re.search(self.CAPTCHA_IMG_PATTERN, self.html)
+ if m is None:
+ self.retry(reason=_("Captcha image not found"))
+
+ tmp_load = self.load
+ self.load = self.decode64 #: injects decode64 inside decryptCaptcha
+
+ inputs['form[captcha]'] = self.decryptCaptcha(m.group(1), imgtype='jpeg')
+ inputs['form[start]'] = ""
+
+ self.load = tmp_load
+
+ self.download(file_url, post=inputs, cookies=True, disposition=True)
+ self.checkDownload()
+
+
+ def checkDownload(self):
+ check = super(FileSharkPl, self).checkDownload({
+ 'wrong_captcha': re.compile(r'<label for="form_captcha" generated="true" class="error">(.*?)</label>'),
+ 'wait_pattern': re.compile(self.SECONDS_PATTERN),
+ 'DL-found': re.compile('<a href="(.*)">')
+ })
+
+ if check == "DL-found":
+ self.correctCaptcha()
+
+ elif check == "wrong_captcha":
+ self.invalidCaptcha()
+ self.retry(10, 1, _("Wrong captcha solution"))
+
+ elif check == "wait_pattern":
+ self.retry()
+
+
+ def decode64(self, data, *args, **kwargs):
+ return data.decode("base64")
+
+
+getInfo = create_getInfo(FileSharkPl)
diff --git a/pyload/plugins/hoster/FileStoreTo.py b/pyload/plugins/hoster/FileStoreTo.py
new file mode 100644
index 000000000..f1425d3d6
--- /dev/null
+++ b/pyload/plugins/hoster/FileStoreTo.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FileStoreTo(SimpleHoster):
+ __name__ = "FileStoreTo"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?filestore\.to/\?d=(?P<ID>\w+)'
+
+ __description__ = """FileStore.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ INFO_PATTERN = r'File: <span[^>]*>(?P<N>.+)</span><br />Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'>Download-Datei wurde nicht gefunden<'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+
+ def handleFree(self):
+ self.wait(10)
+ ldc = re.search(r'wert="(\w+)"', self.html).group(1)
+ link = self.load("http://filestore.to/ajax/download.php", get={"LDC": ldc})
+ self.download(link)
+
+
+getInfo = create_getInfo(FileStoreTo)
diff --git a/pyload/plugins/hoster/FilebeerInfo.py b/pyload/plugins/hoster/FilebeerInfo.py
new file mode 100644
index 000000000..dcc6d1513
--- /dev/null
+++ b/pyload/plugins/hoster/FilebeerInfo.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FilebeerInfo(DeadHoster):
+ __name__ = "FilebeerInfo"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?filebeer\.info/(?!\d*~f)(?P<ID>\w+).*'
+
+ __description__ = """Filebeer.info plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(FilebeerInfo)
diff --git a/pyload/plugins/hoster/FilecloudIo.py b/pyload/plugins/hoster/FilecloudIo.py
new file mode 100644
index 000000000..7dc9a3a16
--- /dev/null
+++ b/pyload/plugins/hoster/FilecloudIo.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilecloudIo(SimpleHoster):
+ __name__ = "FilecloudIo"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?(?:filecloud\.io|ifile\.it|mihd\.net)/(?P<ID>\w+).*'
+
+ __description__ = """Filecloud.io hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ SIZE_PATTERN = r'{var __ab1 = (?P<S>\d+);}'
+ NAME_PATTERN = r'id="aliasSpan">(?P<N>.*?)&nbsp;&nbsp;<'
+ OFFLINE_PATTERN = r'l10n\.(FILES__DOESNT_EXIST|REMOVED)'
+ TEMP_OFFLINE_PATTERN = r'l10n\.FILES__WARNING'
+
+ UKEY_PATTERN = r'\'ukey\'\s*:\'(\w+)'
+ AB1_PATTERN = r'if\( __ab1 == \'(\w+)\' \)'
+ ERROR_MSG_PATTERN = r'var __error_msg\s*=\s*l10n\.(.*?);'
+ RECAPTCHA_PATTERN = r'var __recaptcha_public\s*=\s*\'(.+?)\';'
+
+ LINK_PATTERN = r'"(http://s\d+\.filecloud\.io/%s/\d+/.*?)"'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ data = {"ukey": self.info['ID']}
+
+ m = re.search(self.AB1_PATTERN, self.html)
+ if m is None:
+ self.error(_("__AB1"))
+ data['__ab1'] = m.group(1)
+
+ recaptcha = ReCaptcha(self)
+
+ m = re.search(self.RECAPTCHA_PATTERN, self.html)
+ captcha_key = m.group(1) if m else recaptcha.detect_key()
+
+ if captcha_key is None:
+ self.error(_("ReCaptcha key not found"))
+
+ if not self.account:
+ self.fail(_("User not logged in"))
+ elif not self.account.logged_in:
+ captcha_challenge, captcha_response = recaptcha.challenge(captcha_key)
+ self.account.form_data = {"recaptcha_challenge_field": captcha_challenge,
+ "recaptcha_response_field": captcha_response}
+ self.account.relogin(self.user)
+ self.retry(2)
+
+ json_url = "http://filecloud.io/download-request.json"
+ res = self.load(json_url, post=data)
+ self.logDebug(res)
+ res = json_loads(res)
+
+ if "error" in res and res['error']:
+ self.fail(res)
+
+ self.logDebug(res)
+ if res['captcha']:
+ data['ctype'] = "recaptcha"
+
+ for _i in xrange(5):
+ data['recaptcha_challenge'], data['recaptcha_response'] = recaptcha.challenge(captcha_key)
+
+ json_url = "http://filecloud.io/download-request.json"
+ res = self.load(json_url, post=data)
+ self.logDebug(res)
+ res = json_loads(res)
+
+ if "retry" in res and res['retry']:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("Incorrect captcha"))
+
+ if res['dl']:
+ self.html = self.load('http://filecloud.io/download.html')
+
+ m = re.search(self.LINK_PATTERN % self.info['ID'], self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+
+ if "size" in self.info and self.info['size']:
+ self.check_data = {"size": int(self.info['size'])}
+
+ download_url = m.group(1)
+ self.download(download_url)
+ else:
+ self.fail(_("Unexpected server response"))
+
+
+ def handlePremium(self):
+ akey = self.account.getAccountData(self.user)['akey']
+ ukey = self.info['ID']
+ self.logDebug("Akey: %s | Ukey: %s" % (akey, ukey))
+ rep = self.load("http://api.filecloud.io/api-fetch_download_url.api",
+ post={"akey": akey, "ukey": ukey})
+ self.logDebug("FetchDownloadUrl: " + rep)
+ rep = json_loads(rep)
+ if rep['status'] == 'ok':
+ self.download(rep['download_url'], disposition=True)
+ else:
+ self.fail(rep['message'])
+
+
+getInfo = create_getInfo(FilecloudIo)
diff --git a/pyload/plugins/hoster/FilefactoryCom.py b/pyload/plugins/hoster/FilefactoryCom.py
new file mode 100644
index 000000000..969802703
--- /dev/null
+++ b/pyload/plugins/hoster/FilefactoryCom.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+
+
+def getInfo(urls):
+ for url in urls:
+ h = getURL(url, just_header=True)
+ m = re.search(r'Location: (.+)\r\n', h)
+ if m and not re.match(m.group(1), FilefactoryCom.__pattern__): #: It's a direct link! Skipping
+ yield (url, 0, 3, url)
+ else: #: It's a standard html page
+ yield parseFileInfo(FilefactoryCom, url, getURL(url))
+
+
+class FilefactoryCom(SimpleHoster):
+ __name__ = "FilefactoryCom"
+ __type__ = "hoster"
+ __version__ = "0.52"
+
+ __pattern__ = r'https?://(?:www\.)?filefactory\.com/(file|trafficshare/\w+)/\w+'
+
+ __description__ = """Filefactory.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ INFO_PATTERN = r'<div id="file_name"[^>]*>\s*<h2>(?P<N>[^<]+)</h2>\s*<div id="file_info">\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+) uploaded'
+ OFFLINE_PATTERN = r'<h2>File Removed</h2>|This file is no longer available'
+
+ LINK_PATTERN = r'"([^"]+filefactory\.com/get.+?)"'
+
+ WAIT_PATTERN = r'<div id="countdown_clock" data-delay="(\d+)">'
+ PREMIUM_ONLY_PATTERN = r'>Premium Account Required'
+
+ COOKIES = [("filefactory.com", "locale", "en_US.utf8")]
+
+
+ def handleFree(self):
+ if "Currently only Premium Members can download files larger than" in self.html:
+ self.fail(_("File too large for free download"))
+ elif "All free download slots on this server are currently in use" in self.html:
+ self.retry(50, 15 * 60, _("All free slots are busy"))
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Free download link not found"))
+
+ dl_link = m.group(1)
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.wait(m.group(1))
+
+ self.download(dl_link, disposition=True)
+
+ check = self.checkDownload({'multiple': "You are currently downloading too many files at once.",
+ 'error': '<div id="errorMessage">'})
+
+ if check == "multiple":
+ self.logDebug("Parallel downloads detected; waiting 15 minutes")
+ self.retry(wait_time=15 * 60, reason=_("Parallel downloads"))
+ elif check == "error":
+ self.error(_("Unknown error"))
+
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+
+ if 'location' in header:
+ url = header['location'].strip()
+ if not url.startswith("http://"):
+ url = urljoin("http://www.filefactory.com", url)
+ elif 'content-disposition' in header:
+ url = self.pyfile.url
+ else:
+ html = self.load(self.pyfile.url)
+ m = re.search(self.LINK_PATTERN, html)
+ if m:
+ url = m.group(1)
+ else:
+ self.error(_("Premium download link not found"))
+
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/hoster/FilejungleCom.py b/pyload/plugins/hoster/FilejungleCom.py
new file mode 100644
index 000000000..1cfa4c29e
--- /dev/null
+++ b/pyload/plugins/hoster/FilejungleCom.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.FileserveCom import FileserveCom, checkFile
+from pyload.plugins.Plugin import chunks
+
+
+class FilejungleCom(FileserveCom):
+ __name__ = "FilejungleCom"
+ __type__ = "hoster"
+ __version__ = "0.51"
+
+ __pattern__ = r'http://(?:www\.)?filejungle\.com/f/(?P<id>[^/]+).*'
+
+ __description__ = """Filejungle.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ URLS = ["http://www.filejungle.com/f/", "http://www.filejungle.com/check_links.php",
+ "http://www.filejungle.com/checkReCaptcha.php"]
+ LINKCHECK_TR = r'<li>\s*(<div class="col1">.*?)</li>'
+ LINKCHECK_TD = r'<div class="(?:col )?col\d">(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ LONG_WAIT_PATTERN = r'<h1>Please wait for (\d+) (\w+)\s*to download the next file\.</h1>'
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100):
+ yield checkFile(FilejungleCom, chunk)
diff --git a/pyload/plugins/hoster/FileomCom.py b/pyload/plugins/hoster/FileomCom.py
new file mode 100644
index 000000000..128039c2e
--- /dev/null
+++ b/pyload/plugins/hoster/FileomCom.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://fileom.com/gycaytyzdw3g/random.bin.html
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class FileomCom(XFSHoster):
+ __name__ = "FileomCom"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:www\.)?fileom\.com/\w{12}'
+
+ __description__ = """Fileom.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "fileom.com"
+
+ NAME_PATTERN = r'Filename: <span>(?P<N>.+?)<'
+ SIZE_PATTERN = r'File Size: <span class="size">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ LINK_PATTERN = r'var url2 = \'(.+?)\';'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+ self.resumeDownload = self.premium
+
+
+getInfo = create_getInfo(FileomCom)
diff --git a/pyload/plugins/hoster/FilepostCom.py b/pyload/plugins/hoster/FilepostCom.py
new file mode 100644
index 000000000..97fdd6c67
--- /dev/null
+++ b/pyload/plugins/hoster/FilepostCom.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilepostCom(SimpleHoster):
+ __name__ = "FilepostCom"
+ __type__ = "hoster"
+ __version__ = "0.29"
+
+ __pattern__ = r'https?://(?:www\.)?(?:filepost\.com/files|fp\.io)/([^/]+).*'
+
+ __description__ = """Filepost.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[\d.,]+) (?P<U>[\w^_]+)</a>\' class="inp_text"/>'
+ OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
+
+ PREMIUM_ONLY_PATTERN = r'members only. Please upgrade to premium|a premium membership is required to download this file'
+ RECAPTCHA_PATTERN = r'Captcha.init\({\s*key:\s*\'(.+?)\''
+ FLP_TOKEN_PATTERN = r'set_store_options\({token: \'(.+?)\''
+
+
+ def handleFree(self):
+ # Find token and captcha key
+ file_id = re.match(self.__pattern__, self.pyfile.url).group(1)
+
+ m = re.search(self.FLP_TOKEN_PATTERN, self.html)
+ if m is None:
+ self.error(_("Token"))
+ flp_token = m.group(1)
+
+ m = re.search(self.RECAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.error(_("Captcha key"))
+ captcha_key = m.group(1)
+
+ # Get wait time
+ get_dict = {'SID': self.req.cj.getCookie('SID'), 'JsHttpRequest': str(int(time() * 10000)) + '-xml'}
+ post_dict = {'action': 'set_download', 'token': flp_token, 'code': file_id}
+ wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
+
+ if wait_time > 0:
+ self.wait(wait_time)
+
+ post_dict = {"token": flp_token, "code": file_id, "file_pass": ''}
+
+ if 'var is_pass_exists = true;' in self.html:
+ # Solve password
+ for file_pass in self.getPassword().splitlines():
+ get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
+ post_dict['file_pass'] = file_pass
+ self.logInfo(_("Password protected link, trying ") + file_pass)
+
+ download_url = self.getJsonResponse(get_dict, post_dict, 'link')
+ if download_url:
+ break
+
+ else:
+ self.fail(_("No or incorrect password"))
+
+ else:
+ # Solve recaptcha
+ recaptcha = ReCaptcha(self)
+
+ for i in xrange(5):
+ get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
+ if i:
+ post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field'] = recaptcha.challenge(
+ captcha_key)
+ self.logDebug(u"RECAPTCHA: %s : %s : %s" % (
+ captcha_key, post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field']))
+
+ download_url = self.getJsonResponse(get_dict, post_dict, 'link')
+ if download_url:
+ if i:
+ self.correctCaptcha()
+ break
+ elif i:
+ self.invalidCaptcha()
+
+ else:
+ self.fail(_("Invalid captcha"))
+
+ # Download
+ self.download(download_url)
+
+
+ def getJsonResponse(self, get_dict, post_dict, field):
+ json_response = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict))
+ self.logDebug(json_response)
+
+ if not 'js' in json_response:
+ self.error(_("JSON %s 1") % field)
+
+ # i changed js_answer to json_response['js'] since js_answer is nowhere set.
+ # i don't know the JSON-HTTP specs in detail, but the previous author
+ # accessed json_response['js']['error'] as well as js_answer['error'].
+ # see the two lines commented out with "# ~?".
+ if 'error' in json_response['js']:
+ if json_response['js']['error'] == 'download_delay':
+ self.retry(wait_time=json_response['js']['params']['next_download'])
+ # ~? self.retry(wait_time=js_answer['params']['next_download'])
+ elif 'Wrong file password' in json_response['js']['error']:
+ return None
+ elif 'You entered a wrong CAPTCHA code' in json_response['js']['error']:
+ return None
+ elif 'CAPTCHA Code nicht korrekt' in json_response['js']['error']:
+ return None
+ elif 'CAPTCHA' in json_response['js']['error']:
+ self.logDebug("Error response is unknown, but mentions CAPTCHA")
+ return None
+ else:
+ self.fail(json_response['js']['error'])
+
+ if not 'answer' in json_response['js'] or not field in json_response['js']['answer']:
+ self.error(_("JSON %s 2") % field)
+
+ return json_response['js']['answer'][field]
+
+
+getInfo = create_getInfo(FilepostCom)
diff --git a/pyload/plugins/hoster/FilepupNet.py b/pyload/plugins/hoster/FilepupNet.py
new file mode 100644
index 000000000..0b1247bf4
--- /dev/null
+++ b/pyload/plugins/hoster/FilepupNet.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://www.filepup.net/files/k5w4ZVoF1410184283.html
+# http://www.filepup.net/files/R4GBq9XH1410186553.html
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilepupNet(SimpleHoster):
+ __name__ = "FilepupNet"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?filepup\.net/files/\w+'
+
+ __description__ = """Filepup.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'>(?P<N>.+?)</h1>'
+ SIZE_PATTERN = r'class="fa fa-archive"></i> \((?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'>This file has been deleted'
+
+ LINK_PATTERN = r'(http://www\.filepup\.net/get/.+?)\''
+
+
+ def setup(self):
+ self.multiDL = False
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download link not found"))
+
+ dl_link = m.group(1)
+ self.download(dl_link, post={'task': "download"})
+
+ check = self.checkDownload({'html': re.compile("html")})
+ if check == "html":
+ self.error(_("Downloaded file is an html page"))
+
+
+getInfo = create_getInfo(FilepupNet)
diff --git a/pyload/plugins/hoster/FilerNet.py b/pyload/plugins/hoster/FilerNet.py
new file mode 100644
index 000000000..e34a5799e
--- /dev/null
+++ b/pyload/plugins/hoster/FilerNet.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://filer.net/get/ivgf5ztw53et3ogd
+# http://filer.net/get/hgo14gzcng3scbvv
+
+import pycurl
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FilerNet(SimpleHoster):
+ __name__ = "FilerNet"
+ __type__ = "hoster"
+ __version__ = "0.07"
+
+ __pattern__ = r'https?://(?:www\.)?filer\.net/get/\w+'
+
+ __description__ = """Filer.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>[\w^_]+)</small></h1>'
+ OFFLINE_PATTERN = r'Nicht gefunden'
+
+ LINK_PATTERN = r'href="([^"]+)">Get download</a>'
+
+
+ def handleFree(self):
+ # Wait between downloads
+ m = re.search(r'musst du <span id="time">(\d+)</span> Sekunden warten', self.html)
+ if m:
+ self.retry(wait_time=int(m.group(1)), reason=_("Wait between free downloads"))
+
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ inputs = self.parseHtmlForm(input_names='token')[1]
+ if 'token' not in inputs:
+ self.error(_("Unable to detect token"))
+ token = inputs['token']
+ self.logDebug("Token: " + token)
+
+ self.html = self.load(self.pyfile.url, post={'token': token}, decode=True)
+
+ inputs = self.parseHtmlForm(input_names='hash')[1]
+ if 'hash' not in inputs:
+ self.error(_("Unable to detect hash"))
+ hash_data = inputs['hash']
+ self.logDebug("Hash: " + hash_data)
+
+ downloadURL = r''
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ challenge, response = recaptcha.challenge()
+ post_data = {'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response,
+ 'hash': hash_data}
+
+ # Workaround for 0.4.9 just_header issue. In 0.5 clean the code using just_header
+ self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.load(self.pyfile.url, post=post_data)
+ self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
+
+ if 'location' in self.req.http.header.lower():
+ location = re.search(r'location: (\S+)', self.req.http.header, re.I).group(1)
+ downloadURL = urljoin('http://filer.net', location)
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+
+ if not downloadURL:
+ self.fail(_("No Download url retrieved/all captcha attempts failed"))
+
+ self.download(downloadURL, disposition=True)
+
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header: # Direct Download ON
+ dl = self.pyfile.url
+ else: # Direct Download OFF
+ html = self.load(self.pyfile.url)
+ m = re.search(self.LINK_PATTERN, html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ dl = 'http://filer.net' + m.group(1)
+
+ self.logDebug("Direct link: " + dl)
+ self.download(dl, disposition=True)
+
+
+getInfo = create_getInfo(FilerNet)
diff --git a/pyload/plugins/hoster/FilerioCom.py b/pyload/plugins/hoster/FilerioCom.py
new file mode 100644
index 000000000..f4582b876
--- /dev/null
+++ b/pyload/plugins/hoster/FilerioCom.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class FilerioCom(XFSHoster):
+ __name__ = "FilerioCom"
+ __type__ = "hoster"
+ __version__ = "0.06"
+
+ __pattern__ = r'http://(?:www\.)?(filerio\.(in|com)|filekeen\.com)/\w{12}'
+
+ __description__ = """FileRio.in hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "filerio.in"
+
+ OFFLINE_PATTERN = r'>&quot;File Not Found|File has been removed'
+ URL_REPLACEMENTS = [(r'/(?:embed-)?(\w{12}).*', r'/\1'), (r'filekeen\.com', "filerio.in")]
+
+
+getInfo = create_getInfo(FilerioCom)
diff --git a/pyload/plugins/hoster/FilesMailRu.py b/pyload/plugins/hoster/FilesMailRu.py
new file mode 100644
index 000000000..1c7c8059e
--- /dev/null
+++ b/pyload/plugins/hoster/FilesMailRu.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+
+
+def getInfo(urls):
+ result = []
+ for chunk in chunks(urls, 10):
+ for url in chunk:
+ html = getURL(url)
+ if r'<div class="errorMessage mb10">' in html:
+ result.append((url, 0, 1, url))
+ elif r'Page cannot be displayed' in html:
+ result.append((url, 0, 1, url))
+ else:
+ try:
+ url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
+ file_name = re.search(url_pattern, html).group(0).split(', event)">')[1].split('</a>')[0]
+ result.append((file_name, 0, 2, url))
+ except:
+ pass
+
+ # status 1=OFFLINE, 2=OK, 3=UNKNOWN
+ # result.append((#name,#size,#status,#url))
+ yield result
+
+
+class FilesMailRu(Hoster):
+ __name__ = "FilesMailRu"
+ __type__ = "hoster"
+ __version__ = "0.31"
+
+ __pattern__ = r'http://(?:www\.)?files\.mail\.ru/.*'
+
+ __description__ = """Files.mail.ru hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("oZiRiz", "ich@oziriz.de")]
+
+
+ def setup(self):
+ if not self.account:
+ self.multiDL = False
+
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url)
+ self.url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
+
+ #marks the file as "offline" when the pattern was found on the html-page'''
+ if r'<div class="errorMessage mb10">' in self.html:
+ self.offline()
+
+ elif r'Page cannot be displayed' in self.html:
+ self.offline()
+
+ #the filename that will be showed in the list (e.g. test.part1.rar)'''
+ pyfile.name = self.getFileName()
+
+ #prepare and download'''
+ if not self.account:
+ self.prepare()
+ self.download(self.getFileUrl())
+ self.myPostProcess()
+ else:
+ self.download(self.getFileUrl())
+ self.myPostProcess()
+
+
+ def prepare(self):
+ """You have to wait some seconds. Otherwise you will get a 40Byte HTML Page instead of the file you expected"""
+ self.setWait(10)
+ self.wait()
+ return True
+
+
+ def getFileUrl(self):
+ """gives you the URL to the file. Extracted from the Files.mail.ru HTML-page stored in self.html"""
+ return re.search(self.url_pattern, self.html).group(0).split('<a href="')[1].split('" onclick="return Act')[0]
+
+
+ def getFileName(self):
+ """gives you the Name for each file. Also extracted from the HTML-Page"""
+ return re.search(self.url_pattern, self.html).group(0).split(', event)">')[1].split('</a>')[0]
+
+
+ def myPostProcess(self):
+ # searches the file for HTMl-Code. Sometimes the Redirect
+ # doesn't work (maybe a curl Problem) and you get only a small
+ # HTML file and the Download is marked as "finished"
+ # then the download will be restarted. It's only bad for these
+ # who want download a HTML-File (it's one in a million ;-) )
+ #
+ # The maximum UploadSize allowed on files.mail.ru at the moment is 100MB
+ # so i set it to check every download because sometimes there are downloads
+ # that contain the HTML-Text and 60MB ZEROs after that in a xyzfile.part1.rar file
+ # (Loading 100MB in to ram is not an option)
+ check = self.checkDownload({"html": "<meta name="}, read_size=50000)
+ if check == "html":
+ self.logInfo(_(
+ "There was HTML Code in the Downloaded File (%s)...redirect error? The Download will be restarted." %
+ self.pyfile.name))
+ self.retry()
diff --git a/pyload/plugins/hoster/FileserveCom.py b/pyload/plugins/hoster/FileserveCom.py
new file mode 100644
index 000000000..f486d9f56
--- /dev/null
+++ b/pyload/plugins/hoster/FileserveCom.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import json_loads
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.utils import parseFileSize
+
+
+def checkFile(plugin, urls):
+ html = getURL(plugin.URLS[1], post={"urls": "\n".join(urls)}, decode=True)
+
+ file_info = []
+ for li in re.finditer(plugin.LINKCHECK_TR, html, re.S):
+ try:
+ cols = re.findall(plugin.LINKCHECK_TD, li.group(1))
+ if cols:
+ file_info.append((
+ cols[1] if cols[1] != '--' else cols[0],
+ parseFileSize(cols[2]) if cols[2] != '--' else 0,
+ 2 if cols[3].startswith('Available') else 1,
+ cols[0]))
+ except Exception, e:
+ continue
+
+ return file_info
+
+
+class FileserveCom(Hoster):
+ __name__ = "FileserveCom"
+ __type__ = "hoster"
+ __version__ = "0.52"
+
+ __pattern__ = r'http://(?:www\.)?fileserve\.com/file/(?P<id>[^/]+).*'
+
+ __description__ = """Fileserve.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("Paul King", None),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ URLS = ["http://www.fileserve.com/file/", "http://www.fileserve.com/link-checker.php",
+ "http://www.fileserve.com/checkReCaptcha.php"]
+ LINKCHECK_TR = r'<tr>\s*(<td>http://www\.fileserve\.com/file/.*?)</tr>'
+ LINKCHECK_TD = r'<td>(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ CAPTCHA_KEY_PATTERN = r'var reCAPTCHA_publickey=\'(?P<key>.+?)\''
+ LONG_WAIT_PATTERN = r'<li class="title">You need to wait (\d+) (\w+) to start another download\.</li>'
+ LINK_EXPIRED_PATTERN = r'Your download link has expired'
+ DAILY_LIMIT_PATTERN = r'Your daily download limit has been reached'
+ NOT_LOGGED_IN_PATTERN = r'<form (name="loginDialogBoxForm"|id="login_form")|<li><a href="/login\.php">Login</a></li>'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+ self.file_id = re.match(self.__pattern__, self.pyfile.url).group('id')
+ self.url = "%s%s" % (self.URLS[0], self.file_id)
+ self.logDebug("File ID: %s URL: %s" % (self.file_id, self.url))
+
+
+ def process(self, pyfile):
+ pyfile.name, pyfile.size, status, self.url = checkFile(self, [self.url])[0]
+ if status != 2:
+ self.offline()
+ self.logDebug("File Name: %s Size: %d" % (pyfile.name, pyfile.size))
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+
+ def handleFree(self):
+ self.html = self.load(self.url)
+ action = self.load(self.url, post={"checkDownload": "check"}, decode=True)
+ action = json_loads(action)
+ self.logDebug(action)
+
+ if "fail" in action:
+ if action['fail'] == "timeLimit":
+ self.html = self.load(self.url, post={"checkDownload": "showError", "errorType": "timeLimit"},
+ decode=True)
+
+ self.doLongWait(re.search(self.LONG_WAIT_PATTERN, self.html))
+
+ elif action['fail'] == "parallelDownload":
+ self.logWarning(_("Parallel download error, now waiting 60s"))
+ self.retry(wait_time=60, reason=_("parallelDownload"))
+
+ else:
+ self.fail(_("Download check returned: %s") % action['fail'])
+
+ elif "success" in action:
+ if action['success'] == "showCaptcha":
+ self.doCaptcha()
+ self.doTimmer()
+ elif action['success'] == "showTimmer":
+ self.doTimmer()
+
+ else:
+ self.error(_("Unknown server response"))
+
+ # show download link
+ res = self.load(self.url, post={"downloadLink": "show"}, decode=True)
+ self.logDebug("Show downloadLink response: %s" % res)
+ if "fail" in res:
+ self.error(_("Couldn't retrieve download url"))
+
+ # this may either download our file or forward us to an error page
+ self.download(self.url, post={"download": "normal"})
+ self.logDebug(self.req.http.lastEffectiveURL)
+
+ check = self.checkDownload({"expired": self.LINK_EXPIRED_PATTERN,
+ "wait": re.compile(self.LONG_WAIT_PATTERN),
+ "limit": self.DAILY_LIMIT_PATTERN})
+
+ if check == "expired":
+ self.logDebug("Download link was expired")
+ self.retry()
+ elif check == "wait":
+ self.doLongWait(self.lastCheck)
+ elif check == "limit":
+ self.logWarning(_("Download limited reached for today"))
+ self.setWait(secondsToMidnight(gmt=2), True)
+ self.wait()
+ self.retry()
+
+ self.thread.m.reconnecting.wait(3) # Ease issue with later downloads appearing to be in parallel
+
+
+ def doTimmer(self):
+ res = self.load(self.url, post={"downloadLink": "wait"}, decode=True)
+ self.logDebug("Wait response: %s" % res[:80])
+
+ if "fail" in res:
+ self.fail(_("Failed getting wait time"))
+
+ if self.__name__ == "FilejungleCom":
+ m = re.search(r'"waitTime":(\d+)', res)
+ if m is None:
+ self.fail(_("Cannot get wait time"))
+ wait_time = int(m.group(1))
+ else:
+ wait_time = int(res) + 3
+
+ self.setWait(wait_time)
+ self.wait()
+
+
+ def doCaptcha(self):
+ captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group("key")
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ challenge, code = recaptcha.challenge(captcha_key)
+ res = json_loads(self.load(self.URLS[2],
+ post={'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': code,
+ 'recaptcha_shortencode_field': self.file_id}))
+ if not res['success']:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("Invalid captcha"))
+
+
+ def doLongWait(self, m):
+ wait_time = (int(m.group(1)) * {'seconds': 1, 'minutes': 60, 'hours': 3600}[m.group(2)]) if m else 12 * 60
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry()
+
+
+ def handlePremium(self):
+ premium_url = None
+ if self.__name__ == "FileserveCom":
+ #try api download
+ res = self.load("http://app.fileserve.com/api/download/premium/",
+ post={"username": self.user,
+ "password": self.account.getAccountData(self.user)['password'],
+ "shorten": self.file_id},
+ decode=True)
+ if res:
+ res = json_loads(res)
+ if res['error_code'] == "302":
+ premium_url = res['next']
+ elif res['error_code'] in ["305", "500"]:
+ self.tempOffline()
+ elif res['error_code'] in ["403", "605"]:
+ self.resetAccount()
+ elif res['error_code'] in ["606", "607", "608"]:
+ self.offline()
+ else:
+ self.logError(res['error_code'], res['error_message'])
+
+ self.download(premium_url or self.pyfile.url)
+
+ if not premium_url:
+ check = self.checkDownload({"login": re.compile(self.NOT_LOGGED_IN_PATTERN)})
+
+ if check == "login":
+ self.account.relogin(self.user)
+ self.retry(reason=_("Not logged in"))
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100):
+ yield checkFile(FileserveCom, chunk)
diff --git a/pyload/plugins/hoster/FileshareInUa.py b/pyload/plugins/hoster/FileshareInUa.py
new file mode 100644
index 000000000..8bb1a72c5
--- /dev/null
+++ b/pyload/plugins/hoster/FileshareInUa.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FileshareInUa(DeadHoster):
+ __name__ = "FileshareInUa"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?fileshare\.in\.ua/\w{7}'
+
+ __description__ = """Fileshare.in.ua hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fwannmacher", "felipe@warhammerproject.com")]
+
+
+getInfo = create_getInfo(FileshareInUa)
diff --git a/pyload/plugins/hoster/FilesonicCom.py b/pyload/plugins/hoster/FilesonicCom.py
new file mode 100644
index 000000000..323983169
--- /dev/null
+++ b/pyload/plugins/hoster/FilesonicCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FilesonicCom(DeadHoster):
+ __name__ = "FilesonicCom"
+ __type__ = "hoster"
+ __version__ = "0.35"
+
+ __pattern__ = r'http://(?:www\.)?filesonic\.com/file/\w+'
+
+ __description__ = """Filesonic.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("paulking", None)]
+
+
+getInfo = create_getInfo(FilesonicCom)
diff --git a/pyload/plugins/hoster/FilezyNet.py b/pyload/plugins/hoster/FilezyNet.py
new file mode 100644
index 000000000..2296cf856
--- /dev/null
+++ b/pyload/plugins/hoster/FilezyNet.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FilezyNet(DeadHoster):
+ __name__ = "FilezyNet"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?filezy\.net/\w{12}'
+
+ __description__ = """Filezy.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+
+getInfo = create_getInfo(FilezyNet)
diff --git a/pyload/plugins/hoster/FiredriveCom.py b/pyload/plugins/hoster/FiredriveCom.py
new file mode 100644
index 000000000..3c7ec25f2
--- /dev/null
+++ b/pyload/plugins/hoster/FiredriveCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FiredriveCom(DeadHoster):
+ __name__ = "FiredriveCom"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:www\.)?(firedrive|putlocker)\.com/(mobile/)?(file|embed)/(?P<ID>\w+)'
+
+ __description__ = """Firedrive.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+getInfo = create_getInfo(FiredriveCom)
diff --git a/pyload/plugins/hoster/FlyFilesNet.py b/pyload/plugins/hoster/FlyFilesNet.py
new file mode 100644
index 000000000..05e37e95a
--- /dev/null
+++ b/pyload/plugins/hoster/FlyFilesNet.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster
+
+
+class FlyFilesNet(SimpleHoster):
+ __name__ = "FlyFilesNet"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?flyfiles\.net/.*'
+
+ __description__ = """FlyFiles.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+ SESSION_PATTERN = r'flyfiles\.net/(.*)/.*'
+ NAME_PATTERN = r'flyfiles\.net/.*/(.*)'
+
+
+ def process(self, pyfile):
+ name = re.search(self.NAME_PATTERN, pyfile.url).group(1)
+ pyfile.name = unquote_plus(name)
+
+ session = re.search(self.SESSION_PATTERN, pyfile.url).group(1)
+
+ url = "http://flyfiles.net"
+
+ # get download URL
+ parsed_url = getURL(url, post={"getDownLink": session}, cookies=True)
+ self.logDebug("Parsed URL: %s" % parsed_url)
+
+ if parsed_url == '#downlink|' or parsed_url == "#downlink|#":
+ self.logWarning(_("Could not get the download URL. Please wait 10 minutes"))
+ self.wait(10 * 60, True)
+ self.retry()
+
+ download_url = parsed_url.replace('#downlink|', '')
+
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/FourSharedCom.py b/pyload/plugins/hoster/FourSharedCom.py
new file mode 100644
index 000000000..03500654e
--- /dev/null
+++ b/pyload/plugins/hoster/FourSharedCom.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class FourSharedCom(SimpleHoster):
+ __name__ = "FourSharedCom"
+ __type__ = "hoster"
+ __version__ = "0.30"
+
+ __pattern__ = r'https?://(?:www\.)?4shared(\-china)?\.com/(account/)?(download|get|file|document|photo|video|audio|mp3|office|rar|zip|archive|music)/.+?/.*'
+
+ __description__ = """4Shared.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<meta name="title" content="(?P<N>.+?)"'
+ SIZE_PATTERN = r'<span title="Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)">'
+ OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted.'
+
+ NAME_REPLACEMENTS = [(r"&#(\d+).", lambda m: unichr(int(m.group(1))))]
+ SIZE_REPLACEMENTS = [(",", "")]
+
+ DOWNLOAD_URL_PATTERN = r'name="d3link" value="(.*?)"'
+ DOWNLOAD_BUTTON_PATTERN = r'id="btnLink" href="(.*?)"'
+ FID_PATTERN = r'name="d3fid" value="(.*?)"'
+
+
+ def handleFree(self):
+ if not self.account:
+ self.fail(_("User not logged in"))
+
+ m = re.search(self.DOWNLOAD_BUTTON_PATTERN, self.html)
+ if m:
+ link = m.group(1)
+ else:
+ link = re.sub(r'/(download|get|file|document|photo|video|audio)/', r'/get/', self.pyfile.url)
+
+ self.html = self.load(link)
+
+ m = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download link"))
+ link = m.group(1)
+
+ try:
+ m = re.search(self.FID_PATTERN, self.html)
+ res = self.load('http://www.4shared.com/web/d2/getFreeDownloadLimitInfo?fileId=%s' % m.group(1))
+ self.logDebug(res)
+ except:
+ pass
+
+ self.wait(20)
+ self.download(link)
+
+
+getInfo = create_getInfo(FourSharedCom)
diff --git a/pyload/plugins/hoster/FreakshareCom.py b/pyload/plugins/hoster/FreakshareCom.py
new file mode 100644
index 000000000..bd31a5752
--- /dev/null
+++ b/pyload/plugins/hoster/FreakshareCom.py
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+class FreakshareCom(Hoster):
+ __name__ = "FreakshareCom"
+ __type__ = "hoster"
+ __version__ = "0.39"
+
+ __pattern__ = r'http://(?:www\.)?freakshare\.(net|com)/files/\S*?/'
+
+ __description__ = """Freakshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("sitacuisses", "sitacuisses@yahoo.de"),
+ ("spoob", "spoob@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("Toilal", "toilal.dev@gmail.com")]
+
+
+ def setup(self):
+ self.multiDL = False
+ self.req_opts = []
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ pyfile.url = pyfile.url.replace("freakshare.net/", "freakshare.com/")
+
+ if self.account:
+ self.html = self.load(pyfile.url, cookies=False)
+ pyfile.name = self.get_file_name()
+ self.download(pyfile.url)
+
+ else:
+ self.prepare()
+ self.get_file_url()
+
+ self.download(pyfile.url, post=self.req_opts)
+
+ check = self.checkDownload({"bad": "bad try",
+ "paralell": "> Sorry, you cant download more then 1 files at time. <",
+ "empty": "Warning: Unknown: Filename cannot be empty",
+ "wrong_captcha": "Wrong Captcha!",
+ "downloadserver": "No Downloadserver. Please try again later!"})
+
+ if check == "bad":
+ self.fail(_("Bad Try"))
+ elif check == "paralell":
+ self.setWait(300, True)
+ self.wait()
+ self.retry()
+ elif check == "empty":
+ self.fail(_("File not downloadable"))
+ elif check == "wrong_captcha":
+ self.invalidCaptcha()
+ self.retry()
+ elif check == "downloadserver":
+ self.retry(5, 15 * 60, _("No Download server"))
+
+
+ def prepare(self):
+ pyfile = self.pyfile
+
+ self.download_html()
+
+ if not self.file_exists():
+ self.offline()
+
+ self.setWait(self.get_waiting_time())
+
+ pyfile.name = self.get_file_name()
+ pyfile.size = self.get_file_size()
+
+ self.wait()
+
+ return True
+
+
+ def download_html(self):
+ self.load("http://freakshare.com/index.php", {"language": "EN"}) # Set english language in server session
+ self.html = self.load(self.pyfile.url)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+ if not self.wantReconnect:
+ self.req_opts = self.get_download_options() # get the Post options for the Request
+ #file_url = self.pyfile.url
+ #return file_url
+ else:
+ self.offline()
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+ if not self.wantReconnect:
+ file_name = re.search(r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">([^ ]+)", self.html)
+ if file_name is not None:
+ file_name = file_name.group(1)
+ else:
+ file_name = self.pyfile.url
+ return file_name
+ else:
+ return self.pyfile.url
+
+
+ def get_file_size(self):
+ size = 0
+ if not self.html:
+ self.download_html()
+ if not self.wantReconnect:
+ file_size_check = re.search(
+ r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">[^ ]+ - ([^ ]+) (\w\w)yte", self.html)
+ if file_size_check is not None:
+ units = float(file_size_check.group(1).replace(",", ""))
+ pow = {'KB': 1, 'MB': 2, 'GB': 3}[file_size_check.group(2)]
+ size = int(units * 1024 ** pow)
+
+ return size
+
+
+ def get_waiting_time(self):
+ if not self.html:
+ self.download_html()
+
+ if "Your Traffic is used up for today" in self.html:
+ self.wantReconnect = True
+ return secondsToMidnight(gmt=2)
+
+ timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[\d.]*;', self.html)
+ if timestring:
+ return int(timestring.group(1))
+ else:
+ return 60
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+ if re.search(r"This file does not exist!", self.html) is not None:
+ return False
+ else:
+ return True
+
+
+ def get_download_options(self):
+ re_envelope = re.search(r".*?value=\"Free\sDownload\".*?\n*?(.*?<.*?>\n*)*?\n*\s*?</form>",
+ self.html).group(0) # get the whole request
+ to_sort = re.findall(r"<input\stype=\"hidden\"\svalue=\"(.*?)\"\sname=\"(.*?)\"\s\/>", re_envelope)
+ request_options = dict((n, v) for (v, n) in to_sort)
+
+ herewego = self.load(self.pyfile.url, None, request_options) # the actual download-Page
+
+ # comment this in, when it doesnt work
+ # with open("DUMP__FS_.HTML", "w") as fp:
+ # fp.write(herewego)
+
+ to_sort = re.findall(r"<input\stype=\".*?\"\svalue=\"(\S*?)\".*?name=\"(\S*?)\"\s.*?\/>", herewego)
+ request_options = dict((n, v) for (v, n) in to_sort)
+
+ challenge = re.search(r"http://api\.recaptcha\.net/challenge\?k=(\w+)", herewego)
+
+ if challenge:
+ re_captcha = ReCaptcha(self)
+ (request_options['recaptcha_challenge_field'],
+ request_options['recaptcha_response_field']) = re_captcha.challenge(challenge.group(1))
+
+ return request_options
diff --git a/pyload/plugins/hoster/FreeWayMe.py b/pyload/plugins/hoster/FreeWayMe.py
new file mode 100644
index 000000000..128f54958
--- /dev/null
+++ b/pyload/plugins/hoster/FreeWayMe.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class FreeWayMe(Hoster):
+ __name__ = "FreeWayMe"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'https://(?:www\.)?free-way\.me/.*'
+
+ __description__ = """FreeWayMe hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Nicolas Giese", "james@free-way.me")]
+
+
+ def setup(self):
+ self.resumeDownload = False
+ self.chunkLimit = 1
+ self.multiDL = self.premium
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "FreeWayMe")
+ self.fail(_("No FreeWay account provided"))
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+
+ (user, data) = self.account.selectAccount()
+
+ self.download(
+ "https://www.free-way.me/load.php",
+ get={"multiget": 7, "url": pyfile.url, "user": user, "pw": self.account.getpw(user), "json": ""},
+ disposition=True)
diff --git a/pyload/plugins/hoster/FreevideoCz.py b/pyload/plugins/hoster/FreevideoCz.py
new file mode 100644
index 000000000..7cc0a6ef7
--- /dev/null
+++ b/pyload/plugins/hoster/FreevideoCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FreevideoCz(DeadHoster):
+ __name__ = "FreevideoCz"
+ __type__ = "hoster"
+ __version__ = "0.3"
+
+ __pattern__ = r'http://(?:www\.)?freevideo\.cz/vase-videa/.+'
+
+ __description__ = """Freevideo.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(FreevideoCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FshareVn.py b/pyload/plugins/hoster/FshareVn.py
new file mode 100644
index 000000000..92f7c659a
--- /dev/null
+++ b/pyload/plugins/hoster/FshareVn.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import strptime, mktime, gmtime
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+
+
+def getInfo(urls):
+ for url in urls:
+ html = getURL("http://www.fshare.vn/check_link.php",
+ post={'action': "check_link", 'arrlinks': url},
+ decode=True)
+
+ yield parseFileInfo(FshareVn, url, html)
+
+
+def doubleDecode(m):
+ return m.group(1).decode('raw_unicode_escape')
+
+
+class FshareVn(SimpleHoster):
+ __name__ = "FshareVn"
+ __type__ = "hoster"
+ __version__ = "0.17"
+
+ __pattern__ = r'http://(?:www\.)?fshare\.vn/file/.*'
+
+ __description__ = """FshareVn hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ INFO_PATTERN = r'<p>(?P<N>[^<]+)<\\/p>[\\trn\s]*<p>(?P<S>[\d.,]+)\s*(?P<U>[\w^_]+)<\\/p>'
+ OFFLINE_PATTERN = r'<div class=\\"f_left file_w\\"|<\\/p>\\t\\t\\t\\t\\r\\n\\t\\t<p><\\/p>\\t\\t\\r\\n\\t\\t<p>0 KB<\\/p>'
+
+ NAME_REPLACEMENTS = [("(.*)", doubleDecode)]
+
+ LINK_PATTERN = r'action="(http://download.*?)[#"]'
+ WAIT_PATTERN = ur'Lượt tải xuống kế tiếp là:\s*(.*?)\s*<'
+
+
+ def process(self, pyfile):
+ self.html = self.load('http://www.fshare.vn/check_link.php', post={
+ "action": "check_link",
+ "arrlinks": pyfile.url
+ }, decode=True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ self.checkDownloadedFile()
+
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ self.checkErrors()
+
+ action, inputs = self.parseHtmlForm('frm_download')
+ self.url = self.pyfile.url + action
+
+ if not inputs:
+ self.error(_("No FORM"))
+ elif 'link_file_pwd_dl' in inputs:
+ for password in self.getPassword().splitlines():
+ self.logInfo(_("Password protected link, trying ") + password)
+ inputs['link_file_pwd_dl'] = password
+ self.html = self.load(self.url, post=inputs, decode=True)
+ if not 'name="link_file_pwd_dl"' in self.html:
+ break
+ else:
+ self.fail(_("No or incorrect password"))
+ else:
+ self.html = self.load(self.url, post=inputs, decode=True)
+
+ self.checkErrors()
+
+ m = re.search(r'var count = (\d+)', self.html)
+ self.setWait(int(m.group(1)) if m else 30)
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ self.url = m.group(1)
+ self.logDebug("FREE DL URL: %s" % self.url)
+
+ self.wait()
+ self.download(self.url)
+
+
+ def handlePremium(self):
+ self.download(self.pyfile.url)
+
+
+ def checkErrors(self):
+ if '/error.php?' in self.req.lastEffectiveURL or u"Liên kết bạn chọn khÃŽng tồn" in self.html:
+ self.offline()
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.logInfo(_("Wait until %s ICT") % m.group(1))
+ wait_until = mktime(strptime(m.group(1), "%d/%m/%Y %H:%M"))
+ self.wait(wait_until - mktime(gmtime()) - 7 * 60 * 60, True)
+ self.retry()
+ elif '<ul class="message-error">' in self.html:
+ msg = "Unknown error occured or wait time not parsed"
+ self.logError(msg)
+ self.retry(30, 2 * 60, msg)
+
+
+ def checkDownloadedFile(self):
+ # check download
+ check = self.checkDownload({
+ "not_found": "<head><title>404 Not Found</title></head>"
+ })
+
+ if check == "not_found":
+ self.fail(_("File not m on server"))
diff --git a/pyload/plugins/hoster/Ftp.py b/pyload/plugins/hoster/Ftp.py
new file mode 100644
index 000000000..2baec064b
--- /dev/null
+++ b/pyload/plugins/hoster/Ftp.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+import pycurl
+import re
+
+from urllib import quote, unquote
+from urlparse import urlparse
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class Ftp(Hoster):
+ __name__ = "Ftp"
+ __type__ = "hoster"
+ __version__ = "0.43"
+
+ __pattern__ = r'(?:ftps?|sftp)://([\w.-]+(:[\w.-]+)?@)?[\w.-]+(:\d+)?/.+'
+
+ __description__ = """Download from ftp directory"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.com"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ parsed_url = urlparse(pyfile.url)
+ netloc = parsed_url.netloc
+
+ pyfile.name = parsed_url.path.rpartition('/')[2]
+ try:
+ pyfile.name = unquote(str(pyfile.name)).decode('utf8')
+ except:
+ pass
+
+ if not "@" in netloc:
+ servers = [x['login'] for x in self.account.getAllAccounts()] if self.account else []
+
+ if netloc in servers:
+ self.logDebug("Logging on to %s" % netloc)
+ self.req.addAuth(self.account.accounts[netloc]['password'])
+ else:
+ for pwd in self.getPassword().splitlines():
+ if ":" in pwd:
+ self.req.addAuth(pwd.strip())
+ break
+
+ self.req.http.c.setopt(pycurl.NOBODY, 1)
+
+ try:
+ res = self.load(pyfile.url)
+ except pycurl.error, e:
+ self.fail(_("Error %d: %s") % e.args)
+
+ self.req.http.c.setopt(pycurl.NOBODY, 0)
+ self.logDebug(self.req.http.header)
+
+ m = re.search(r"Content-Length:\s*(\d+)", res)
+ if m:
+ pyfile.size = int(m.group(1))
+ self.download(pyfile.url)
+ else:
+ #Naive ftp directory listing
+ if re.search(r'^25\d.*?"', self.req.http.header, re.M):
+ pyfile.url = pyfile.url.rstrip('/')
+ pkgname = "/".join(pyfile.package().name, urlparse(pyfile.url).path.rpartition('/')[2])
+ pyfile.url += '/'
+ self.req.http.c.setopt(48, 1) # CURLOPT_DIRLISTONLY
+ res = self.load(pyfile.url, decode=False)
+ links = [pyfile.url + quote(x) for x in res.splitlines()]
+ self.logDebug("LINKS", links)
+ self.core.api.addPackage(pkgname, links)
+ else:
+ self.fail(_("Unexpected server response"))
diff --git a/pyload/plugins/hoster/GamefrontCom.py b/pyload/plugins/hoster/GamefrontCom.py
new file mode 100644
index 000000000..195c6037e
--- /dev/null
+++ b/pyload/plugins/hoster/GamefrontCom.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class GamefrontCom(Hoster):
+ __name__ = "GamefrontCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?gamefront\.com/files/\w+'
+
+ __description__ = """Gamefront.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fwannmacher", "felipe@warhammerproject.com")]
+
+
+ PATTERN_FILENAME = r'<title>(.*?) | Game Front'
+ PATTERN_FILESIZE = r'<dt>File Size:</dt>[\n\s]*<dd>(.*?)</dd>'
+ PATTERN_OFFLINE = r'This file doesn\'t exist, or has been removed.'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = -1
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.html = self.load(pyfile.url, decode=True)
+
+ if not self._checkOnline():
+ self.offline()
+
+ pyfile.name = self._getName()
+
+ link = self._getLink()
+
+ if not link.startswith('http://'):
+ link = "http://www.gamefront.com/" + link
+
+ self.download(link)
+
+
+ def _checkOnline(self):
+ if re.search(self.PATTERN_OFFLINE, self.html):
+ return False
+ else:
+ return True
+
+
+ def _getName(self):
+ name = re.search(self.PATTERN_FILENAME, self.html)
+ if name is None:
+ self.fail(_("Plugin broken")
+
+ return name.group(1)
+
+
+ def _getLink(self):
+ self.html2 = self.load("http://www.gamefront.com/" + re.search("(files/service/thankyou\\?id=\w+)",
+ self.html).group(1))
+ return re.search("<a href=\"(http://media\d+\.gamefront.com/.*)\">click here</a>", self.html2).group(1).replace("&amp;", "&")
+
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+ html = getURL(url)
+
+ if re.search(GamefrontCom.PATTERN_OFFLINE, html):
+ result.append((url, 0, 1, url))
+ else:
+ name = re.search(GamefrontCom.PATTERN_FILENAME, html)
+ if name is None:
+ result.append((url, 0, 1, url))
+ else:
+ name = name.group(1)
+ size = re.search(GamefrontCom.PATTERN_FILESIZE, html)
+ size = parseFileSize(size.group(1))
+
+ result.append((name, size, 3, url))
+
+ yield result
diff --git a/pyload/plugins/hoster/GigapetaCom.py b/pyload/plugins/hoster/GigapetaCom.py
new file mode 100644
index 000000000..491fcad01
--- /dev/null
+++ b/pyload/plugins/hoster/GigapetaCom.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+from random import randint
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class GigapetaCom(SimpleHoster):
+ __name__ = "GigapetaCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?gigapeta\.com/dl/\w+'
+
+ __description__ = """GigaPeta.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<img src=".*" alt="file" />-->\s*(?P<N>.*?)\s*</td>'
+ SIZE_PATTERN = r'<th>\s*Size\s*</th>\s*<td>\s*(?P<S>.*?)\s*</td>'
+ OFFLINE_PATTERN = r'<div id="page_error">'
+
+ COOKIES = [("gigapeta.com", "lang", "us")]
+
+
+ def handleFree(self):
+ captcha_key = str(randint(1, 100000000))
+ captcha_url = "http://gigapeta.com/img/captcha.gif?x=%s" % captcha_key
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ for _i in xrange(5):
+ self.checkErrors()
+
+ captcha = self.decryptCaptcha(captcha_url)
+ self.html = self.load(self.pyfile.url, post={
+ "captcha_key": captcha_key,
+ "captcha": captcha,
+ "download": "Download"})
+
+ m = re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I)
+ if m:
+ download_url = m.group(1)
+ break
+ elif "Entered figures don&#96;t coincide with the picture" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.fail(_("No valid captcha code entered"))
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.download(download_url)
+
+
+ def checkErrors(self):
+ if "All threads for IP" in self.html:
+ self.logDebug("Your IP is already downloading a file")
+ self.wait(5 * 60, True)
+ self.retry()
+
+
+getInfo = create_getInfo(GigapetaCom)
diff --git a/pyload/plugins/hoster/GooIm.py b/pyload/plugins/hoster/GooIm.py
new file mode 100644
index 000000000..28f50661b
--- /dev/null
+++ b/pyload/plugins/hoster/GooIm.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# https://goo.im/devs/liquidsmooth/3.x/codina/Nightly/LS-KK-v3.2-2014-08-01-codina.zip
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class GooIm(SimpleHoster):
+ __name__ = "GooIm"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?goo\.im/.+'
+
+ __description__ = """Goo.im hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
+
+
+ NAME_PATTERN = r'You will be redirected to .*(?P<N>[^/ ]+) in'
+ OFFLINE_PATTERN = r'The file you requested was not found'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+
+
+ def handleFree(self):
+ url = self.pyfile.url
+ self.html = self.load(url, cookies=True)
+ self.wait(10)
+ self.download(url, cookies=True)
+
+
+getInfo = create_getInfo(GooIm)
diff --git a/pyload/plugins/hoster/HellshareCz.py b/pyload/plugins/hoster/HellshareCz.py
new file mode 100644
index 000000000..90dfb4dd6
--- /dev/null
+++ b/pyload/plugins/hoster/HellshareCz.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class HellshareCz(SimpleHoster):
+ __name__ = "HellshareCz"
+ __type__ = "hoster"
+ __version__ = "0.83"
+
+ __pattern__ = r'(http://(?:www\.)?hellshare\.(?:cz|com|sk|hu|pl)/[^?]*/\d+).*'
+
+ __description__ = """Hellshare.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<h1 id="filename"[^>]*>(?P<N>[^<]+)</h1>'
+ SIZE_PATTERN = r'<strong id="FileSize_master">(?P<S>[\d.,]+)&nbsp;(?P<U>[\w^_]+)</strong>'
+ OFFLINE_PATTERN = r'<h1>File not found.</h1>'
+ SHOW_WINDOW_PATTERN = r'<a href="([^?]+/(\d+)/\?do=(fileDownloadButton|relatedFileDownloadButton-\2)-showDownloadWindow)"'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.account else False
+ self.chunkLimit = 1
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.fail(_("User not logged in"))
+ pyfile.url = re.match(self.__pattern__, pyfile.url).group(1)
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+ if not self.checkTrafficLeft():
+ self.fail(_("Not enough traffic left for user ") + self.user)
+
+ m = re.search(self.SHOW_WINDOW_PATTERN, self.html)
+ if m is None:
+ self.error(_("SHOW_WINDOW_PATTERN not found"))
+
+ self.url = "http://www.hellshare.com" + m.group(1)
+ self.download(self.url)
+
+
+getInfo = create_getInfo(HellshareCz)
diff --git a/pyload/plugins/hoster/HellspyCz.py b/pyload/plugins/hoster/HellspyCz.py
new file mode 100644
index 000000000..f3578c0f3
--- /dev/null
+++ b/pyload/plugins/hoster/HellspyCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class HellspyCz(DeadHoster):
+ __name__ = "HellspyCz"
+ __type__ = "hoster"
+ __version__ = "0.28"
+
+ __pattern__ = r'http://(?:www\.)?(?:hellspy\.(?:cz|com|sk|hu|pl)|sciagaj\.pl)(/\S+/\d+)/?.*'
+
+ __description__ = """HellSpy.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(HellspyCz)
diff --git a/pyload/plugins/hoster/HotfileCom.py b/pyload/plugins/hoster/HotfileCom.py
new file mode 100644
index 000000000..bede362e5
--- /dev/null
+++ b/pyload/plugins/hoster/HotfileCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class HotfileCom(DeadHoster):
+ __name__ = "HotfileCom"
+ __type__ = "hoster"
+ __version__ = "0.37"
+
+ __pattern__ = r'https?://(?:www\.)?hotfile\.com/dl/\d+/\w+'
+
+ __description__ = """Hotfile.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("sitacuisses", "sitacuisses@yhoo.de"),
+ ("spoob", "spoob@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("JoKoT3", "jokot3@gmail.com")]
+
+
+getInfo = create_getInfo(HotfileCom)
diff --git a/pyload/plugins/hoster/HugefilesNet.py b/pyload/plugins/hoster/HugefilesNet.py
new file mode 100644
index 000000000..e955d53cc
--- /dev/null
+++ b/pyload/plugins/hoster/HugefilesNet.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://hugefiles.net/prthf9ya4w6s
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class HugefilesNet(XFSHoster):
+ __name__ = "HugefilesNet"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?hugefiles\.net/\w{12}'
+
+ __description__ = """Hugefiles.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "hugefiles.net"
+
+ SIZE_PATTERN = r'File Size:</span>\s*<span[^>]*>(?P<S>[^<]+)</span></div>'
+
+
+getInfo = create_getInfo(HugefilesNet)
diff --git a/pyload/plugins/hoster/HundredEightyUploadCom.py b/pyload/plugins/hoster/HundredEightyUploadCom.py
new file mode 100644
index 000000000..717344efa
--- /dev/null
+++ b/pyload/plugins/hoster/HundredEightyUploadCom.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://180upload.com/js9qdm6kjnrs
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class HundredEightyUploadCom(XFSHoster):
+ __name__ = "HundredEightyUploadCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?180upload\.com/\w{12}'
+
+ __description__ = """180upload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "180upload.com"
+
+ NAME_PATTERN = r'Filename:</b></td><td nowrap>(?P<N>.+)</td></tr>-->'
+ SIZE_PATTERN = r'Size:</b></td><td>(?P<S>[\d.,]+) (?P<U>[\w^_]+)\s*<small>'
+
+
+getInfo = create_getInfo(HundredEightyUploadCom)
diff --git a/pyload/plugins/hoster/IFileWs.py b/pyload/plugins/hoster/IFileWs.py
new file mode 100644
index 000000000..d92ec087d
--- /dev/null
+++ b/pyload/plugins/hoster/IFileWs.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class IFileWs(DeadHoster):
+ __name__ = "IFileWs"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?ifile\.ws/\w{12}'
+
+ __description__ = """Ifile.ws hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("z00nx", "z00nx0@gmail.com")]
+
+
+getInfo = create_getInfo(IFileWs)
diff --git a/pyload/plugins/hoster/IcyFilesCom.py b/pyload/plugins/hoster/IcyFilesCom.py
new file mode 100644
index 000000000..126b9208d
--- /dev/null
+++ b/pyload/plugins/hoster/IcyFilesCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class IcyFilesCom(DeadHoster):
+ __name__ = "IcyFilesCom"
+ __type__ = "hoster"
+ __version__ = "0.06"
+
+ __pattern__ = r'http://(?:www\.)?icyfiles\.com/(.*)'
+
+ __description__ = """IcyFiles.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com")]
+
+
+getInfo = create_getInfo(IcyFilesCom)
diff --git a/pyload/plugins/hoster/IfileIt.py b/pyload/plugins/hoster/IfileIt.py
new file mode 100644
index 000000000..75c2f115c
--- /dev/null
+++ b/pyload/plugins/hoster/IfileIt.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class IfileIt(SimpleHoster):
+ __name__ = "IfileIt"
+ __type__ = "hoster"
+ __version__ = "0.28"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Ifile.it"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ LINK_PATTERN = r'</span> If it doesn\'t, <a target="_blank" href="([^"]+)">'
+ RECAPTCHA_PATTERN = r'var __recaptcha_public\s*=\s*\'(.+?)\''
+ INFO_PATTERN = r'<span style="cursor: default;[^>]*>\s*(?P<N>.*?)\s*&nbsp;\s*<strong>\s*(?P<S>[\d.,]+)\s*(?P<U>[\w^_]+)\s*</strong>\s*</span>'
+ OFFLINE_PATTERN = r'<span style="cursor: default;[^>]*>\s*&nbsp;\s*<strong>\s*</strong>\s*</span>'
+ TEMP_OFFLINE_PATTERN = r'<span class="msg_red">Downloading of this file is temporarily disabled</span>'
+
+
+ def handleFree(self):
+ ukey = re.match(self.__pattern__, self.pyfile.url).group(1)
+ json_url = 'http://ifile.it/new_download-request.json'
+ post_data = {"ukey": ukey, "ab": "0"}
+
+ json_response = json_loads(self.load(json_url, post=post_data))
+ self.logDebug(json_response)
+ if json_response['status'] == 3:
+ self.offline()
+
+ if json_response['captcha']:
+ captcha_key = re.search(self.RECAPTCHA_PATTERN, self.html).group(1)
+
+ recaptcha = ReCaptcha(self)
+ post_data['ctype'] = "recaptcha"
+
+ for _i in xrange(5):
+ post_data['recaptcha_challenge'], post_data['recaptcha_response'] = recaptcha.challenge(captcha_key)
+ json_response = json_loads(self.load(json_url, post=post_data))
+ self.logDebug(json_response)
+
+ if json_response['retry']:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("Incorrect captcha"))
+
+ if not "ticket_url" in json_response:
+ self.error(_("No download URL"))
+
+ self.download(json_response['ticket_url'])
+
+
+getInfo = create_getInfo(IfileIt)
diff --git a/pyload/plugins/hoster/IfolderRu.py b/pyload/plugins/hoster/IfolderRu.py
new file mode 100644
index 000000000..2a961f780
--- /dev/null
+++ b/pyload/plugins/hoster/IfolderRu.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class IfolderRu(SimpleHoster):
+ __name__ = "IfolderRu"
+ __type__ = "hoster"
+ __version__ = "0.38"
+
+ __pattern__ = r'http://(?:www\.)?(?:ifolder\.ru|rusfolder\.(?:com|net|ru))/(?:files/)?(?P<ID>\d+).*'
+
+ __description__ = """Ifolder.ru hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ SIZE_REPLACEMENTS = [(u'Кб', 'KB'), (u'Мб', 'MB'), (u'Гб', 'GB')]
+ NAME_PATTERN = ur'(?:<div><span>)?НазваМОе:(?:</span>)? <b>(?P<N>[^<]+)</b><(?:/div|br)>'
+ SIZE_PATTERN = ur'(?:<div><span>)?РазЌер:(?:</span>)? <b>(?P<S>[^<]+)</b><(?:/div|br)>'
+ OFFLINE_PATTERN = ur'<p>Ѐайл МПЌер <b>[^<]*</b> (Ме МайЎеМ|уЎалеМ) !!!</p>'
+
+ SESSION_ID_PATTERN = r'<a href=(http://ints\.(?:rusfolder\.com|ifolder\.ru)/ints/sponsor/\?bi=\d*&session=([^&]+)&u=[^>]+)>'
+ INTS_SESSION_PATTERN = r'\(\'ints_session\'\);\s*if\(tag\)\{tag\.value = "([^"]+)";\}'
+ HIDDEN_INPUT_PATTERN = r'var v = .*?name=\'(.+?)\' value=\'1\''
+ LINK_PATTERN = r'<a id="download_file_href" href="([^"]+)"'
+ WRONG_CAPTCHA_PATTERN = ur'<font color=Red>МеверМый кПЎ,<br>ввеЎОте еще раз</font><br>'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.account else False
+ self.chunkLimit = 1
+
+
+ def process(self, pyfile):
+ file_id = re.match(self.__pattern__, pyfile.url).group('ID')
+ self.html = self.load("http://rusfolder.com/%s" % file_id, cookies=True, decode=True)
+ self.getFileInfo()
+
+ url = re.search(r"location\.href = '(http://ints\..*?=)'", self.html).group(1)
+ self.html = self.load(url, cookies=True, decode=True)
+
+ url, session_id = re.search(self.SESSION_ID_PATTERN, self.html).groups()
+ self.html = self.load(url, cookies=True, decode=True)
+
+ url = "http://ints.rusfolder.com/ints/frame/?session=%s" % session_id
+ self.html = self.load(url, cookies=True)
+
+ self.wait(31, False)
+
+ captcha_url = "http://ints.rusfolder.com/random/images/?session=%s" % session_id
+ for _i in xrange(5):
+ self.html = self.load(url, cookies=True)
+ action, inputs = self.parseHtmlForm('ID="Form1"')
+ inputs['ints_session'] = re.search(self.INTS_SESSION_PATTERN, self.html).group(1)
+ inputs[re.search(self.HIDDEN_INPUT_PATTERN, self.html).group(1)] = '1'
+ inputs['confirmed_number'] = self.decryptCaptcha(captcha_url, cookies=True)
+ inputs['action'] = '1'
+ self.logDebug(inputs)
+
+ self.html = self.load(url, decode=True, cookies=True, post=inputs)
+ if self.WRONG_CAPTCHA_PATTERN in self.html:
+ self.invalidCaptcha()
+ else:
+ break
+ else:
+ self.fail(_("Invalid captcha"))
+
+ download_url = re.search(self.LINK_PATTERN, self.html).group(1)
+ self.correctCaptcha()
+ self.download(download_url)
+
+
+getInfo = create_getInfo(IfolderRu)
diff --git a/pyload/plugins/hoster/JumbofilesCom.py b/pyload/plugins/hoster/JumbofilesCom.py
new file mode 100644
index 000000000..e39bbcc20
--- /dev/null
+++ b/pyload/plugins/hoster/JumbofilesCom.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class JumbofilesCom(SimpleHoster):
+ __name__ = "JumbofilesCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?jumbofiles\.com/(\w{12}).*'
+
+ __description__ = """JumboFiles.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com")]
+
+
+ INFO_PATTERN = r'<TR><TD>(?P<N>[^<]+?)\s*<small>\((?P<S>[\d.,]+)\s*(?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'Not Found or Deleted / Disabled due to inactivity or DMCA'
+ LINK_PATTERN = r'<meta http-equiv="refresh" content="10;url=(.+)">'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+
+ def handleFree(self):
+ ukey = re.match(self.__pattern__, self.pyfile.url).group(1)
+ post_data = {"id": ukey, "op": "download3", "rand": ""}
+ html = self.load(self.pyfile.url, post=post_data, decode=True)
+ url = re.search(self.LINK_PATTERN, html).group(1)
+ self.download(url)
+
+
+getInfo = create_getInfo(JumbofilesCom)
diff --git a/pyload/plugins/hoster/JunocloudMe.py b/pyload/plugins/hoster/JunocloudMe.py
new file mode 100644
index 000000000..dc5620e0e
--- /dev/null
+++ b/pyload/plugins/hoster/JunocloudMe.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class JunocloudMe(XFSHoster):
+ __name__ = "JunocloudMe"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:\w+\.)?junocloud\.me/\w{12}'
+
+ __description__ = """Junocloud.me hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "junocloud.me"
+
+ URL_REPLACEMENTS = [(r'/(?:embed-)?(\w{12}).*', r'/\1'), (r'//www\.', "//dl3.")]
+
+ NAME_PATTERN = r'<p class="request_file">http://junocloud.me/w{12}/(?P<N>.+?)</p>'
+ SIZE_PATTERN = r'<p class="request_filesize">Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)</p>'
+
+ OFFLINE_PATTERN = r'>No such file with this filename<'
+ TEMP_OFFLINE_PATTERN = r'The page may have been renamed, removed or be temporarily unavailable.<'
+
+
+getInfo = create_getInfo(JunocloudMe)
diff --git a/pyload/plugins/hoster/Keep2shareCc.py b/pyload/plugins/hoster/Keep2shareCc.py
new file mode 100644
index 000000000..6fc521107
--- /dev/null
+++ b/pyload/plugins/hoster/Keep2shareCc.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urlparse, urljoin
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class Keep2shareCc(SimpleHoster):
+ __name__ = "Keep2shareCc"
+ __type__ = "hoster"
+ __version__ = "0.15"
+
+ __pattern__ = r'https?://(?:www\.)?(keep2share|k2s|keep2s)\.cc/file/(?P<ID>\w+)'
+
+ __description__ = """Keep2share.cc hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'File: <span>(?P<N>.+)</span>'
+ SIZE_PATTERN = r'Size: (?P<S>[^<]+)</div>'
+ OFFLINE_PATTERN = r'File not found or deleted|Sorry, this file is blocked or deleted|Error 404'
+
+ LINK_PATTERN = r'To download this file with slow speed, use <a href="([^"]+)">this link</a>'
+ CAPTCHA_PATTERN = r'src="(/file/captcha\.html.+?)"'
+ WAIT_PATTERN = r'Please wait ([\d:]+) to download this file'
+ MULTIDL_ERROR = r'Free account does not allow to download more than one file at the same time'
+
+
+ def handleFree(self):
+ self.sanitize_url()
+ self.html = self.load(self.pyfile.url)
+
+ self.fid = re.search(r'<input type="hidden" name="slow_id" value="([^"]+)">', self.html).group(1)
+ self.html = self.load(self.pyfile.url, post={'yt0': '', 'slow_id': self.fid})
+
+ if ">Downloading is not possible" in self.html:
+ self.fail("Free user can't download large files")
+
+ m = re.search(r"function download\(\){.*window\.location\.href = '([^']+)';", self.html, re.S)
+ if m: # Direct mode
+ self.startDownload(m.group(1))
+ else:
+ self.handleCaptcha()
+
+ self.wait(30)
+
+ self.html = self.load(self.pyfile.url, post={'uniqueId': self.fid, 'free': 1})
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.logDebug("Hoster told us to wait for %s" % m.group(1))
+ # string to time convert courtesy of https://stackoverflow.com/questions/10663720
+ ftr = [3600, 60, 1]
+ wait_time = sum([a * b for a, b in zip(ftr, map(int, m.group(1).split(':')))])
+ self.wait(wait_time, True)
+ self.retry()
+
+ m = re.search(self.MULTIDL_ERROR, self.html)
+ if m:
+ # if someone is already downloading on our line, wait 30min and retry
+ self.logDebug("Already downloading, waiting for 30 minutes")
+ self.wait(30 * 60, True)
+ self.retry()
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ self.startDownload(m.group(1))
+
+
+ def handleCaptcha(self):
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ post_data = {'free': 1,
+ 'freeDownloadRequest': 1,
+ 'uniqueId': self.fid,
+ 'yt0': ''}
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_url = urljoin(self.base_url, m.group(1))
+ post_data['CaptchaForm[code]'] = self.decryptCaptcha(captcha_url)
+ else:
+ challenge, response = recaptcha.challenge()
+ post_data.update({'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response})
+
+ self.html = self.load(self.pyfile.url, post=post_data)
+
+ if 'recaptcha' not in self.html:
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail(_("All captcha attempts failed"))
+
+
+ def startDownload(self, url):
+ d = urljoin(self.base_url, url)
+ self.download(d, disposition=True)
+
+
+ def sanitize_url(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header:
+ self.pyfile.url = header['location']
+ p = urlparse(self.pyfile.url)
+ self.base_url = "%s://%s" % (p.scheme, p.hostname)
+
+
+getInfo = create_getInfo(Keep2shareCc)
diff --git a/pyload/plugins/hoster/KickloadCom.py b/pyload/plugins/hoster/KickloadCom.py
new file mode 100644
index 000000000..88c84390c
--- /dev/null
+++ b/pyload/plugins/hoster/KickloadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class KickloadCom(DeadHoster):
+ __name__ = "KickloadCom"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?kickload\.com/get/.+'
+
+ __description__ = """Kickload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+getInfo = create_getInfo(KickloadCom)
diff --git a/pyload/plugins/hoster/LemUploadsCom.py b/pyload/plugins/hoster/LemUploadsCom.py
new file mode 100644
index 000000000..0ef739a56
--- /dev/null
+++ b/pyload/plugins/hoster/LemUploadsCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class LemUploadsCom(DeadHoster):
+ __name__ = "LemUploadsCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?lemuploads\.com/\w{12}'
+
+ __description__ = """LemUploads.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("t4skforce", "t4skforce1337[AT]gmail[DOT]com")]
+
+
+getInfo = create_getInfo(LemUploadsCom)
diff --git a/pyload/plugins/hoster/LetitbitNet.py b/pyload/plugins/hoster/LetitbitNet.py
new file mode 100644
index 000000000..16f01bf06
--- /dev/null
+++ b/pyload/plugins/hoster/LetitbitNet.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+#
+# API Documentation:
+# http://api.letitbit.net/reg/static/api.pdf
+#
+# Test links:
+# http://letitbit.net/download/07874.0b5709a7d3beee2408bb1f2eefce/random.bin.html
+
+import re
+
+from urllib import urlencode, urlopen
+from urlparse import urljoin
+
+from pyload.utils import json_loads, json_dumps
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster
+
+
+def api_download_info(url):
+ json_data = ["yw7XQy2v9", ["download/info", {"link": url}]]
+ post_data = urlencode({'r': json_dumps(json_data)})
+ api_rep = urlopen("http://api.letitbit.net/json", data=post_data).read()
+ return json_loads(api_rep)
+
+
+def getInfo(urls):
+ for url in urls:
+ api_rep = api_download_info(url)
+ if api_rep['status'] == 'OK':
+ info = api_rep['data'][0]
+ yield (info['name'], info['size'], 2, url)
+ else:
+ yield (url, 0, 1, url)
+
+
+class LetitbitNet(SimpleHoster):
+ __name__ = "LetitbitNet"
+ __type__ = "hoster"
+ __version__ = "0.26"
+
+ __pattern__ = r'https?://(?:www\.)?(letitbit|shareflare)\.net/download/.*'
+
+ __description__ = """Letitbit.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("z00nx", "z00nx0@gmail.com")]
+
+
+ URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "letitbit.net")]
+
+ SECONDS_PATTERN = r'seconds\s*=\s*(\d+);'
+ CAPTCHA_CONTROL_FIELD = r'recaptcha_control_field\s=\s\'(?P<value>.+?)\''
+
+
+ def setup(self):
+ self.resumeDownload = True
+
+
+ def getFileInfo(self):
+ api_rep = api_download_info(self.pyfile.url)
+ if api_rep['status'] == 'OK':
+ self.api_data = api_rep['data'][0]
+ self.pyfile.name = self.api_data['name']
+ self.pyfile.size = self.api_data['size']
+ else:
+ self.offline()
+
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('id="ifree_form"')
+ if not action:
+ self.error(_("ifree_form"))
+
+ self.pyfile.size = float(inputs['sssize'])
+ self.logDebug(action, inputs)
+ inputs['desc'] = ""
+
+ self.html = self.load(urljoin("http://letitbit.net/", action), post=inputs, cookies=True)
+
+ m = re.search(self.SECONDS_PATTERN, self.html)
+ seconds = int(m.group(1)) if m else 60
+ self.logDebug("Seconds found", seconds)
+ m = re.search(self.CAPTCHA_CONTROL_FIELD, self.html)
+ recaptcha_control_field = m.group(1)
+ self.logDebug("ReCaptcha control field found", recaptcha_control_field)
+ self.wait(seconds)
+
+ res = self.load("http://letitbit.net/ajax/download3.php", post=" ", cookies=True)
+ if res != '1':
+ self.error(_("Unknown response - ajax_check_url"))
+ self.logDebug(res)
+
+ recaptcha = ReCaptcha(self)
+ challenge, response = recaptcha.challenge()
+
+ post_data = {"recaptcha_challenge_field": challenge,
+ "recaptcha_response_field": response,
+ "recaptcha_control_field": recaptcha_control_field}
+ self.logDebug("Post data to send", post_data)
+ res = self.load("http://letitbit.net/ajax/check_recaptcha.php", post=post_data, cookies=True)
+ self.logDebug(res)
+ if not res:
+ self.invalidCaptcha()
+ if res == "error_free_download_blocked":
+ self.logWarning(_("Daily limit reached"))
+ self.wait(secondsToMidnight(gmt=2), True)
+ if res == "error_wrong_captcha":
+ self.invalidCaptcha()
+ self.retry()
+ elif res.startswith('['):
+ urls = json_loads(res)
+ elif res.startswith('http://'):
+ urls = [res]
+ else:
+ self.error(_("Unknown response - captcha check"))
+
+ self.correctCaptcha()
+
+ for download_url in urls:
+ try:
+ self.download(download_url)
+ break
+ except Exception, e:
+ self.logError(e)
+ else:
+ self.fail(_("Download did not finish correctly"))
+
+
+ def handlePremium(self):
+ api_key = self.user
+ premium_key = self.account.getAccountData(self.user)['password']
+
+ json_data = [api_key, ["download/direct_links", {"pass": premium_key, "link": self.pyfile.url}]]
+ api_rep = self.load('http://api.letitbit.net/json', post={'r': json_dumps(json_data)})
+ self.logDebug("API Data: " + api_rep)
+ api_rep = json_loads(api_rep)
+
+ if api_rep['status'] == 'FAIL':
+ self.fail(api_rep['data'])
+
+ direct_link = api_rep['data'][0][0]
+ self.logDebug("Direct Link: " + direct_link)
+
+ self.download(direct_link, disposition=True)
diff --git a/pyload/plugins/hoster/LinksnappyCom.py b/pyload/plugins/hoster/LinksnappyCom.py
new file mode 100644
index 000000000..d7991ff3c
--- /dev/null
+++ b/pyload/plugins/hoster/LinksnappyCom.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urlsplit
+
+from pyload.utils import json_loads, json_dumps
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class LinksnappyCom(Hoster):
+ __name__ = "LinksnappyCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?linksnappy\.com'
+
+ __description__ = """Linksnappy.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ SINGLE_CHUNK_HOSTERS = ('easybytez.com')
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Linksnappy.com")
+ self.fail(_("No Linksnappy.com account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ host = self._get_host(pyfile.url)
+ json_params = json_dumps({'link': pyfile.url,
+ 'type': host,
+ 'username': self.user,
+ 'password': self.account.getAccountData(self.user)['password']})
+ r = self.load('http://gen.linksnappy.com/genAPI.php',
+ post={'genLinks': json_params})
+ self.logDebug("JSON data: " + r)
+
+ j = json_loads(r)['links'][0]
+
+ if j['error']:
+ msg = _("Error converting the link")
+ self.logError(msg, j['error'])
+ self.fail(msg)
+
+ pyfile.name = j['filename']
+ new_url = j['generated']
+
+ if host in self.SINGLE_CHUNK_HOSTERS:
+ self.chunkLimit = 1
+ else:
+ self.setup()
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: " + new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"html302": "<title>302 Found</title>"})
+ if check == "html302":
+ self.retry(wait_time=5, reason=_("Linksnappy returns only HTML data"))
+
+
+ @staticmethod
+ def _get_host(url):
+ host = urlsplit(url).netloc
+ return re.search(r'[\w-]+\.\w+$', host).group(0)
diff --git a/pyload/plugins/hoster/LoadTo.py b/pyload/plugins/hoster/LoadTo.py
new file mode 100644
index 000000000..974a27d29
--- /dev/null
+++ b/pyload/plugins/hoster/LoadTo.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://www.load.to/JWydcofUY6/random.bin
+# http://www.load.to/oeSmrfkXE/random100.bin
+
+import re
+
+from pyload.plugins.internal.CaptchaService import SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class LoadTo(SimpleHoster):
+ __name__ = "LoadTo"
+ __type__ = "hoster"
+ __version__ = "0.18"
+
+ __pattern__ = r'http://(?:www\.)?load\.to/\w+'
+
+ __description__ = """Load.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("halfman", "Pulpan3@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ NAME_PATTERN = r'<h1>(?P<N>.+)</h1>'
+ SIZE_PATTERN = r'Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'>Can\'t find file'
+
+ LINK_PATTERN = r'<form method="post" action="(.+?)"'
+ WAIT_PATTERN = r'type="submit" value="Download \((\d+)\)"'
+
+ URL_REPLACEMENTS = [(r'(\w)$', r'\1/')]
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ # Search for Download URL
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+
+ download_url = m.group(1)
+
+ # Set Timer - may be obsolete
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.wait(m.group(1))
+
+ # Load.to is using solvemedia captchas since ~july 2014:
+ solvemedia = SolveMedia(self)
+ captcha_key = solvemedia.detect_key()
+
+ if captcha_key is None:
+ self.download(download_url)
+ else:
+ captcha_challenge, captcha_response = solvemedia.challenge(captcha_key)
+ self.download(download_url, post={"adcopy_challenge": captcha_challenge, "adcopy_response": captcha_response})
+ check = self.checkDownload({'404': re.compile("\A<h1>404 Not Found</h1>"), 'html': re.compile("html")})
+ if check == "404":
+ self.invalidCaptcha()
+ self.retry()
+ elif check == "html":
+ self.logWarning(_("Downloaded file is an html page, will retry"))
+ self.retry()
+
+
+getInfo = create_getInfo(LoadTo)
diff --git a/pyload/plugins/hoster/LomafileCom.py b/pyload/plugins/hoster/LomafileCom.py
new file mode 100644
index 000000000..2c4bd37a2
--- /dev/null
+++ b/pyload/plugins/hoster/LomafileCom.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class LomafileCom(XFSHoster):
+ __name__ = "LomafileCom"
+ __type__ = "hoster"
+ __version__ = "0.51"
+
+ __pattern__ = r'http://lomafile\.com/\w{12}'
+
+ __description__ = """Lomafile.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("nath_schwarz", "nathan.notwhite@gmail.com"),
+ ("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "lomafile.com"
+
+ NAME_PATTERN = r'<a href="http://lomafile\.com/w{12}/(?P<N>.+?)">'
+ SIZE_PATTERN = r'Size:</b></td><td>(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'>(No such file|Software error:<)'
+ TEMP_OFFLINE_PATTERN = r'The page may have been renamed, removed or be temporarily unavailable.<'
+
+ CAPTCHA_PATTERN = r'(http://lomafile\.com/captchas/[^"\']+)'
+
+
+getInfo = create_getInfo(LomafileCom)
diff --git a/pyload/plugins/hoster/LuckyShareNet.py b/pyload/plugins/hoster/LuckyShareNet.py
new file mode 100644
index 000000000..31de417b7
--- /dev/null
+++ b/pyload/plugins/hoster/LuckyShareNet.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from bottle import json_loads
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class LuckyShareNet(SimpleHoster):
+ __name__ = "LuckyShareNet"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?luckyshare\.net/(?P<ID>\d{10,})'
+
+ __description__ = """LuckyShare.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ INFO_PATTERN = r'<h1 class=\'file_name\'>(?P<N>\S+)</h1>\s*<span class=\'file_size\'>Filesize: (?P<S>[\d.,]+)(?P<U>[\w^_]+)</span>'
+ OFFLINE_PATTERN = r'There is no such file available'
+
+
+ def parseJson(self, rep):
+ if 'AJAX Error' in rep:
+ html = self.load(self.pyfile.url, decode=True)
+ m = re.search(r"waitingtime = (\d+);", html)
+ if m:
+ seconds = int(m.group(1))
+ self.logDebug("You have to wait %d seconds between free downloads" % seconds)
+ self.retry(wait_time=seconds)
+ else:
+ self.error(_("Unable to detect wait time between free downloads"))
+ elif 'Hash expired' in rep:
+ self.retry(reason=_("Hash expired"))
+ return json_loads(rep)
+
+
+ # TODO: There should be a filesize limit for free downloads
+ # TODO: Some files could not be downloaded in free mode
+ def handleFree(self):
+ file_id = re.match(self.__pattern__, self.pyfile.url).group('ID')
+ self.logDebug("File ID: " + file_id)
+ rep = self.load(r"http://luckyshare.net/download/request/type/time/file/" + file_id, decode=True)
+ self.logDebug("JSON: " + rep)
+ json = self.parseJson(rep)
+
+ self.wait(int(json['time']))
+
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ challenge, response = recaptcha.challenge()
+ rep = self.load(r"http://luckyshare.net/download/verify/challenge/%s/response/%s/hash/%s" %
+ (challenge, response, json['hash']), decode=True)
+ self.logDebug("JSON: " + rep)
+ if 'link' in rep:
+ json.update(self.parseJson(rep))
+ self.correctCaptcha()
+ break
+ elif 'Verification failed' in rep:
+ self.invalidCaptcha()
+ else:
+ self.error(_("Unable to get downlaod link"))
+
+ if not json['link']:
+ self.fail(_("No Download url retrieved/all captcha attempts failed"))
+
+ self.logDebug("Direct URL: " + json['link'])
+ self.download(json['link'])
+
+
+getInfo = create_getInfo(LuckyShareNet)
diff --git a/pyload/plugins/hoster/MediafireCom.py b/pyload/plugins/hoster/MediafireCom.py
new file mode 100644
index 000000000..10d25ab9d
--- /dev/null
+++ b/pyload/plugins/hoster/MediafireCom.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+from pyload.network.RequestFactory import getURL
+
+
+def replace_eval(js_expr):
+ return js_expr.replace(r'eval("', '').replace(r"\'", r"'").replace(r'\"', r'"')
+
+
+def checkHTMLHeader(url):
+ try:
+ for _i in xrange(3):
+ header = getURL(url, just_header=True)
+ for line in header.splitlines():
+ line = line.lower()
+ if 'location' in line:
+ url = line.split(':', 1)[1].strip()
+ if 'error.php?errno=320' in url:
+ return url, 1
+ if not url.startswith('http://'):
+ url = 'http://www.mediafire.com' + url
+ break
+ elif 'content-disposition' in line:
+ return url, 2
+ else:
+ break
+ except:
+ return url, 3
+
+ return url, 0
+
+
+def getInfo(urls):
+ for url in urls:
+ location, status = checkHTMLHeader(url)
+
+ if status:
+ file_info = (url, 0, status, url)
+ else:
+ file_info = parseFileInfo(MediafireCom, url, getURL(url, decode=True))
+
+ yield file_info
+
+
+class MediafireCom(SimpleHoster):
+ __name__ = "MediafireCom"
+ __type__ = "hoster"
+ __version__ = "0.80"
+
+ __pattern__ = r'http://(?:www\.)?mediafire\.com/(file/|(view/?|download\.php)?\?)(\w{11}|\w{15})($|/)'
+
+ __description__ = """Mediafire.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ LINK_PATTERN = r'<div class="download_link"[^>]*(?:z-index:(?P<zindex>\d+))?[^>]*>\s*<a href="(?P<href>http://[^"]+)"'
+ JS_KEY_PATTERN = r'DoShow\(\'mfpromo1\'\);[^{]*{((\w+)=\'\';.*?)eval\(\2\);'
+ JS_ZMODULO_PATTERN = r'\(\'z-index\'\)\) \% (\d+)\)\);'
+ PAGE1_ACTION_PATTERN = r'<link rel="canonical" href="([^"]+)"/>'
+ PASSWORD_PATTERN = r'<form name="form_password"'
+
+ NAME_PATTERN = r'<META NAME="description" CONTENT="(?P<N>[^"]+)"/>'
+ INFO_PATTERN = r'oFileSharePopup\.ald\(\'(?P<ID>[^\']*)\',\'(?P<N>[^\']*)\',\'(?P<S>[^\']*)\',\'\',\'(?P<sha256>[^\']*)\'\)'
+ OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>'
+
+
+ def setup(self):
+ self.multiDL = False
+
+
+ def process(self, pyfile):
+ pyfile.url = re.sub(r'/view/?\?', '/?', pyfile.url)
+
+ self.url, result = checkHTMLHeader(pyfile.url)
+ self.logDebug("Location (%d): %s" % (result, self.url))
+
+ if result == 0:
+ self.html = self.load(self.url, decode=True)
+ self.checkCaptcha()
+ self.multiDL = True
+ self.check_data = self.getFileInfo()
+
+ if self.account:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ elif result == 1:
+ self.offline()
+ else:
+ self.multiDL = True
+ self.download(self.url, disposition=True)
+
+
+ def handleFree(self):
+ passwords = self.getPassword().splitlines()
+ while self.PASSWORD_PATTERN in self.html:
+ if len(passwords):
+ password = passwords.pop(0)
+ self.logInfo(_("Password protected link, trying ") + password)
+ self.html = self.load(self.url, post={"downloadp": password})
+ else:
+ self.fail(_("No or incorrect password"))
+
+ m = re.search(r'kNO = r"(http://.*?)";', self.html)
+ if m is None:
+ self.error(_("No download URL"))
+
+ download_url = m.group(1)
+ self.download(download_url)
+
+
+ def checkCaptcha(self):
+ solvemedia = SolveMedia(self)
+ captcha_challenge, captcha_response = solvemedia.challenge()
+ self.html = self.load(self.url, post={"adcopy_challenge": captcha_challenge,
+ "adcopy_response": captcha_response}, decode=True)
diff --git a/pyload/plugins/hoster/MegaCoNz.py b/pyload/plugins/hoster/MegaCoNz.py
new file mode 100644
index 000000000..f09a5cdd5
--- /dev/null
+++ b/pyload/plugins/hoster/MegaCoNz.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+
+import random
+import re
+
+from array import array
+from base64 import standard_b64decode
+from os import remove
+
+from Crypto.Cipher import AES
+from Crypto.Util import Counter
+from pycurl import SSL_CIPHER_LIST
+
+from pyload.utils import json_loads, json_dumps
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class MegaCoNz(Hoster):
+ __name__ = "MegaCoNz"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'https?://(\w+\.)?mega\.co\.nz/#!([\w!-]+)'
+
+ __description__ = """Mega.co.nz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "ranan@pyload.org")]
+
+
+ API_URL = "https://g.api.mega.co.nz/cs?id=%d"
+ FILE_SUFFIX = ".crypted"
+
+
+ def b64_decode(self, data):
+ data = data.replace("-", "+").replace("_", "/")
+ return standard_b64decode(data + '=' * (-len(data) % 4))
+
+
+ def getCipherKey(self, key):
+ """ Construct the cipher key from the given data """
+ a = array("I", key)
+ key_array = array("I", [a[0] ^ a[4], a[1] ^ a[5], a[2] ^ a[6], a[3] ^ a[7]])
+ return key_array
+
+
+ def callApi(self, **kwargs):
+ """ Dispatch a call to the api, see https://mega.co.nz/#developers """
+ # generate a session id, no idea where to obtain elsewhere
+ uid = random.randint(10 << 9, 10 ** 10)
+
+ res = self.load(self.API_URL % uid, post=json_dumps([kwargs]))
+ self.logDebug("Api Response: " + res)
+ return json_loads(res)
+
+
+ def decryptAttr(self, data, key):
+ cbc = AES.new(self.getCipherKey(key), AES.MODE_CBC, "\0" * 16)
+ attr = cbc.decrypt(self.b64_decode(data))
+ self.logDebug("Decrypted Attr: " + attr)
+ if not attr.startswith("MEGA"):
+ self.fail(_("Decryption failed"))
+
+ # Data is padded, 0-bytes must be stripped
+ return json_loads(re.search(r'{.+?}', attr).group(0))
+
+
+ def decryptFile(self, key):
+ """ Decrypts the file at lastDownload` """
+
+ # upper 64 bit of counter start
+ n = key[16:24]
+
+ # convert counter to long and shift bytes
+ ctr = Counter.new(128, initial_value=long(n.encode("hex"), 16) << 64)
+ cipher = AES.new(self.getCipherKey(key), AES.MODE_CTR, counter=ctr)
+
+ self.pyfile.setStatus("decrypting")
+
+ file_crypted = self.lastDownload
+ file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]
+
+ try:
+ f = open(file_crypted, "rb")
+ df = open(file_decrypted, "wb")
+ except IOError, e:
+ self.fail(str(e))
+
+ # TODO: calculate CBC-MAC for checksum
+
+ size = 2 ** 15 # buffer size, 32k
+ while True:
+ buf = f.read(size)
+ if not buf:
+ break
+
+ df.write(cipher.decrypt(buf))
+
+ f.close()
+ df.close()
+ remove(file_crypted)
+
+ self.lastDownload = file_decrypted
+
+
+ def process(self, pyfile):
+ key = None
+
+ # match is guaranteed because plugin was chosen to handle url
+ node = re.match(self.__pattern__, pyfile.url).group(2)
+ if "!" in node:
+ node, key = node.split("!")
+
+ self.logDebug("File id: %s | Key: %s" % (node, key))
+
+ if not key:
+ self.fail(_("No file key provided in the URL"))
+
+ # g is for requesting a download url
+ # this is similar to the calls in the mega js app, documentation is very bad
+ dl = self.callApi(a="g", g=1, p=node, ssl=1)[0]
+
+ if "e" in dl:
+ e = dl['e']
+ # ETEMPUNAVAIL (-18): Resource temporarily not available, please try again later
+ if e == -18:
+ self.retry()
+ else:
+ self.fail(_("Error code:") + e)
+
+ # TODO: map other error codes, e.g
+ # EACCESS (-11): Access violation (e.g., trying to write to a read-only share)
+
+ key = self.b64_decode(key)
+ attr = self.decryptAttr(dl['at'], key)
+
+ pyfile.name = attr['n'] + self.FILE_SUFFIX
+
+ self.req.http.c.setopt(SSL_CIPHER_LIST, "RC4-MD5:DEFAULT")
+
+ self.download(dl['g'])
+ self.decryptFile(key)
+
+ # Everything is finished and final name can be set
+ pyfile.name = attr['n']
diff --git a/pyload/plugins/hoster/MegaDebridEu.py b/pyload/plugins/hoster/MegaDebridEu.py
new file mode 100644
index 000000000..1354c0e3d
--- /dev/null
+++ b/pyload/plugins/hoster/MegaDebridEu.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote_plus
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class MegaDebridEu(Hoster):
+ __name__ = "MegaDebridEu"
+ __type__ = "hoster"
+ __version__ = "0.4"
+
+ __pattern__ = r'^https?://(?:w{3}\d+\.mega-debrid\.eu|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/download/file/[^/]+/.+$'
+
+ __description__ = """mega-debrid.eu hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("D.Ducatel", "dducatel@je-geek.fr")]
+
+
+ API_URL = "https://www.mega-debrid.eu/api.php"
+
+
+ def getFilename(self, url):
+ try:
+ return unquote_plus(url.rsplit("/", 1)[1])
+ except IndexError:
+ return ""
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.exitOnFail("Please enter your %s account or deactivate this plugin" % "Mega-debrid.eu")
+ else:
+ if not self.connectToApi():
+ self.exitOnFail("Unable to connect to Mega-debrid.eu")
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+ new_url = self.debridLink(pyfile.url)
+ self.logDebug("New URL: " + new_url)
+
+ filename = self.getFilename(new_url)
+ if filename != "":
+ pyfile.name = filename
+ self.download(new_url, disposition=True)
+
+
+ def connectToApi(self):
+ """
+ Connexion to the mega-debrid API
+ Return True if succeed
+ """
+ user, data = self.account.selectAccount()
+ jsonResponse = self.load(self.API_URL,
+ get={'action': 'connectUser', 'login': user, 'password': data['password']})
+ res = json_loads(jsonResponse)
+
+ if res['response_code'] == "ok":
+ self.token = res['token']
+ return True
+ else:
+ return False
+
+
+ def debridLink(self, linkToDebrid):
+ """
+ Debrid a link
+ Return The debrided link if succeed or original link if fail
+ """
+ jsonResponse = self.load(self.API_URL, get={'action': 'getLink', 'token': self.token},
+ post={"link": linkToDebrid})
+ res = json_loads(jsonResponse)
+
+ if res['response_code'] == "ok":
+ debridedLink = res['debridLink'][1:-1]
+ return debridedLink
+ else:
+ self.exitOnFail("Unable to debrid %s" % linkToDebrid)
+
+
+ def exitOnFail(self, msg):
+ """
+ exit the plugin on fail case
+ And display the reason of this failure
+ """
+ if self.getConfig("unloadFailing"):
+ self.logError(_(msg))
+ self.resetAccount()
+ else:
+ self.fail(_(msg))
diff --git a/pyload/plugins/hoster/MegaFilesSe.py b/pyload/plugins/hoster/MegaFilesSe.py
new file mode 100644
index 000000000..4bcaa22fb
--- /dev/null
+++ b/pyload/plugins/hoster/MegaFilesSe.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class MegaFilesSe(DeadHoster):
+ __name__ = "MegaFilesSe"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?megafiles\.se/\w{12}'
+
+ __description__ = """MegaFiles.se hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("t4skforce", "t4skforce1337[AT]gmail[DOT]com")]
+
+
+getInfo = create_getInfo(MegaFilesSe)
diff --git a/pyload/plugins/hoster/MegaRapidCz.py b/pyload/plugins/hoster/MegaRapidCz.py
new file mode 100644
index 000000000..f49d0e1ec
--- /dev/null
+++ b/pyload/plugins/hoster/MegaRapidCz.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.network.RequestFactory import getRequest
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+
+
+def getInfo(urls):
+ h = getRequest()
+ h.c.setopt(HTTPHEADER,
+ ["Accept: text/html",
+ "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"])
+
+ for url in urls:
+ html = h.load(url, decode=True)
+ yield parseFileInfo(MegaRapidCz, url, html)
+
+
+class MegaRapidCz(SimpleHoster):
+ __name__ = "MegaRapidCz"
+ __type__ = "hoster"
+ __version__ = "0.54"
+
+ __pattern__ = r'http://(?:www\.)?(share|mega)rapid\.cz/soubor/\d+/.+'
+
+ __description__ = """MegaRapid.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("MikyWoW", "mikywow@seznam.cz"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<h1[^>]*><span[^>]*>(?:<a[^>]*>)?(?P<N>[^<]+)'
+ SIZE_PATTERN = r'<td class="i">Velikost:</td>\s*<td class="h"><strong>\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong></td>'
+ OFFLINE_PATTERN = ur'Nastala chyba 404|Soubor byl smazán'
+
+ FORCE_CHECK_TRAFFIC = True
+
+ LINK_PATTERN = r'<a href="([^"]+)" title="Stahnout">([^<]+)</a>'
+ ERR_LOGIN_PATTERN = ur'<div class="error_div"><strong>Stahování je přístupné pouze přihlášenÃœm uÅŸivatelům'
+ ERR_CREDIT_PATTERN = ur'<div class="error_div"><strong>Stahování zdarma je moÅŸné jen přes náš'
+
+
+ def setup(self):
+ self.chunkLimit = 1
+
+
+ def handlePremium(self):
+ try:
+ self.html = self.load(self.pyfile.url, decode=True)
+ except BadHeader, e:
+ self.account.relogin(self.user)
+ self.retry(wait_time=60, reason=str(e))
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ link = m.group(1)
+ self.logDebug("Premium link: %s" % link)
+ self.download(link, disposition=True)
+ else:
+ if re.search(self.ERR_LOGIN_PATTERN, self.html):
+ self.relogin(self.user)
+ self.retry(wait_time=60, reason=_("User login failed"))
+ elif re.search(self.ERR_CREDIT_PATTERN, self.html):
+ self.fail(_("Not enough credit left"))
+ else:
+ self.fail(_("Download link not found"))
diff --git a/pyload/plugins/hoster/MegacrypterCom.py b/pyload/plugins/hoster/MegacrypterCom.py
new file mode 100644
index 000000000..1ebd167f3
--- /dev/null
+++ b/pyload/plugins/hoster/MegacrypterCom.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import json_loads, json_dumps
+
+from pyload.plugins.hoster.MegaCoNz import MegaCoNz
+
+
+class MegacrypterCom(MegaCoNz):
+ __name__ = "MegacrypterCom"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'(https?://\w{0,10}\.?megacrypter\.com/[\w!-]+)'
+
+ __description__ = """Megacrypter.com decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("GonzaloSR", "gonzalo@gonzalosr.com")]
+
+
+ API_URL = "http://megacrypter.com/api"
+ FILE_SUFFIX = ".crypted"
+
+
+ def callApi(self, **kwargs):
+ """ Dispatch a call to the api, see megacrypter.com/api_doc """
+ self.logDebug("JSON request: " + json_dumps(kwargs))
+ res = self.load(self.API_URL, post=json_dumps(kwargs))
+ self.logDebug("API Response: " + res)
+ return json_loads(res)
+
+
+ def process(self, pyfile):
+ # match is guaranteed because plugin was chosen to handle url
+ node = re.match(self.__pattern__, pyfile.url).group(1)
+
+ # get Mega.co.nz link info
+ info = self.callApi(link=node, m="info")
+
+ # get crypted file URL
+ dl = self.callApi(link=node, m="dl")
+
+ # TODO: map error codes, implement password protection
+ # if info['pass'] is True:
+ # crypted_file_key, md5_file_key = info['key'].split("#")
+
+ key = self.b64_decode(info['key'])
+
+ pyfile.name = info['name'] + self.FILE_SUFFIX
+
+ self.download(dl['url'])
+ self.decryptFile(key)
+
+ # Everything is finished and final name can be set
+ pyfile.name = info['name']
diff --git a/pyload/plugins/hoster/MegareleaseOrg.py b/pyload/plugins/hoster/MegareleaseOrg.py
new file mode 100644
index 000000000..7f978c94e
--- /dev/null
+++ b/pyload/plugins/hoster/MegareleaseOrg.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class MegareleaseOrg(DeadHoster):
+ __name__ = "MegareleaseOrg"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?megarelease\.org/\w{12}'
+
+ __description__ = """Megarelease.org hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("derek3x", "derek3x@vmail.me"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(MegareleaseOrg)
diff --git a/pyload/plugins/hoster/MegasharesCom.py b/pyload/plugins/hoster/MegasharesCom.py
new file mode 100644
index 000000000..fcb53a486
--- /dev/null
+++ b/pyload/plugins/hoster/MegasharesCom.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class MegasharesCom(SimpleHoster):
+ __name__ = "MegasharesCom"
+ __type__ = "hoster"
+ __version__ = "0.27"
+
+ __pattern__ = r'http://(?:www\.)?(d\d{2}\.)?megashares\.com/((index\.php)?\?d\d{2}=|dl/)\w+'
+
+ __description__ = """Megashares.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<h1 class="black xxl"[^>]*title="(?P<N>[^"]+)">'
+ SIZE_PATTERN = r'<strong><span class="black">Filesize:</span></strong> (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'<dd class="red">(Invalid Link Request|Link has been deleted|Invalid link)'
+
+ LINK_PATTERN = r'<div id="show_download_button_%d"[^>]*>\s*<a href="([^"]+)">'
+
+ PASSPORT_LEFT_PATTERN = r'Your Download Passport is: <[^>]*>(\w+).*?You have.*?<[^>]*>.*?([\d.]+) (\w+)'
+ PASSPORT_RENEW_PATTERN = r'(\d+):<strong>(\d+)</strong>:<strong>(\d+)</strong>'
+ REACTIVATE_NUM_PATTERN = r'<input[^>]*id="random_num" value="(\d+)" />'
+ REACTIVATE_PASSPORT_PATTERN = r'<input[^>]*id="passport_num" value="(\w+)" />'
+ REQUEST_URI_PATTERN = r'var request_uri = "([^"]+)";'
+ NO_SLOTS_PATTERN = r'<dd class="red">All download slots for this link are currently filled'
+
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = self.premium
+
+
+ def handlePremium(self):
+ self.handleDownload(True)
+
+
+ def handleFree(self):
+ if self.NO_SLOTS_PATTERN in self.html:
+ self.retry(wait_time=5 * 60)
+
+ m = re.search(self.REACTIVATE_PASSPORT_PATTERN, self.html)
+ if m:
+ passport_num = m.group(1)
+ request_uri = re.search(self.REQUEST_URI_PATTERN, self.html).group(1)
+
+ for _i in xrange(5):
+ random_num = re.search(self.REACTIVATE_NUM_PATTERN, self.html).group(1)
+
+ verifyinput = self.decryptCaptcha(
+ "http://d01.megashares.com/index.php?secgfx=gfx&random_num=%s" % random_num)
+ self.logInfo(_("Reactivating passport %s: %s %s") % (passport_num, random_num, verifyinput))
+
+ url = ("http://d01.megashares.com%s&rs=check_passport_renewal" % request_uri +
+ "&rsargs[]=%s&rsargs[]=%s&rsargs[]=%s" % (verifyinput, random_num, passport_num) +
+ "&rsargs[]=replace_sec_pprenewal&rsrnd=%s" % str(int(time() * 1000)))
+ self.logDebug(url)
+ res = self.load(url)
+
+ if 'Thank you for reactivating your passport.' in res:
+ self.correctCaptcha()
+ self.retry()
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail(_("Failed to reactivate passport"))
+
+ m = re.search(self.PASSPORT_RENEW_PATTERN, self.html)
+ if m:
+ time = [int(x) for x in m.groups()]
+ renew = time[0] + (time[1] * 60) + (time[2] * 60)
+ self.logDebug("Waiting %d seconds for a new passport" % renew)
+ self.retry(wait_time=renew, reason=_("Passport renewal"))
+
+ # Check traffic left on passport
+ m = re.search(self.PASSPORT_LEFT_PATTERN, self.html, re.M | re.S)
+ if m is None:
+ self.fail(_("Passport not found"))
+
+ self.logInfo(_("Download passport: %s") % m.group(1))
+ data_left = float(m.group(2)) * 1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[m.group(3)]
+ self.logInfo(_("Data left: %s %s (%d MB needed)") % (m.group(2), m.group(3), self.pyfile.size / 1048576))
+
+ if not data_left:
+ self.retry(wait_time=600, reason=_("Passport renewal"))
+
+ self.handleDownload(False)
+
+
+ def handleDownload(self, premium=False):
+ # Find download link;
+ m = re.search(self.LINK_PATTERN % (1 if premium else 2), self.html)
+ msg = _('%s download URL' % ('Premium' if premium else 'Free'))
+ if m is None:
+ self.error(msg)
+
+ download_url = m.group(1)
+ self.logDebug("%s: %s" % (msg, download_url))
+ self.download(download_url)
+
+
+getInfo = create_getInfo(MegasharesCom)
diff --git a/pyload/plugins/hoster/MegauploadCom.py b/pyload/plugins/hoster/MegauploadCom.py
new file mode 100644
index 000000000..20700a3d0
--- /dev/null
+++ b/pyload/plugins/hoster/MegauploadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class MegauploadCom(DeadHoster):
+ __name__ = "MegauploadCom"
+ __type__ = "hoster"
+ __version__ = "0.31"
+
+ __pattern__ = r'http://(?:www\.)?megaupload\.com/\?.*&?(d|v)=\w+'
+
+ __description__ = """Megaupload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org")]
+
+
+getInfo = create_getInfo(MegauploadCom)
diff --git a/pyload/plugins/hoster/MegavideoCom.py b/pyload/plugins/hoster/MegavideoCom.py
new file mode 100644
index 000000000..aa458fa2c
--- /dev/null
+++ b/pyload/plugins/hoster/MegavideoCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class MegavideoCom(DeadHoster):
+ __name__ = "MegavideoCom"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?megavideo\.com/\?.*&?(d|v)=\w+'
+
+ __description__ = """Megavideo.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("mkaay", "mkaay@mkaay.de")]
+
+
+getInfo = create_getInfo(MegavideoCom)
diff --git a/pyload/plugins/hoster/MovReelCom.py b/pyload/plugins/hoster/MovReelCom.py
new file mode 100644
index 000000000..39216a295
--- /dev/null
+++ b/pyload/plugins/hoster/MovReelCom.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class MovReelCom(XFSHoster):
+ __name__ = "MovReelCom"
+ __type__ = "hoster"
+ __version__ = "1.24"
+
+ __pattern__ = r'http://(?:www\.)?movreel\.com/\w{12}'
+
+ __description__ = """MovReel.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("JorisV83", "jorisv83-pyload@yahoo.com")]
+
+
+ HOSTER_DOMAIN = "movreel.com"
+
+ NAME_PATTERN = r'Filename: <b>(?P<N>.+?)<'
+ SIZE_PATTERN = r'Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ LINK_PATTERN = r'<a href="([^"]+)">Download Link'
+
+
+getInfo = create_getInfo(MovReelCom)
diff --git a/pyload/plugins/hoster/MultishareCz.py b/pyload/plugins/hoster/MultishareCz.py
new file mode 100644
index 000000000..60d02b6e0
--- /dev/null
+++ b/pyload/plugins/hoster/MultishareCz.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class MultishareCz(SimpleHoster):
+ __name__ = "MultishareCz"
+ __type__ = "hoster"
+ __version__ = "0.34"
+
+ __pattern__ = r'http://(?:www\.)?multishare\.cz/stahnout/(?P<ID>\d+).*'
+
+ __description__ = """MultiShare.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ INFO_PATTERN = ur'(?:<li>Název|Soubor): <strong>(?P<N>[^<]+)</strong><(?:/li><li|br)>Velikost: <strong>(?P<S>[^<]+)</strong>'
+ OFFLINE_PATTERN = ur'<h1>Stáhnout soubor</h1><p><strong>PoşadovanÜ soubor neexistuje.</strong></p>'
+ SIZE_REPLACEMENTS = [('&nbsp;', '')]
+
+
+ def process(self, pyfile):
+ msurl = re.match(self.__pattern__, pyfile.url)
+ if msurl:
+ self.fileID = msurl.group('ID')
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ else:
+ self.handleOverriden()
+
+
+ def handleFree(self):
+ self.download("http://www.multishare.cz/html/download_free.php?ID=%s" % self.fileID)
+
+
+ def handlePremium(self):
+ if not self.checkCredit():
+ self.logWarning(_("Not enough credit left to download file"))
+ self.resetAccount()
+
+ self.download("http://www.multishare.cz/html/download_premium.php?ID=%s" % self.fileID)
+
+
+ def handleOverriden(self):
+ if not self.premium:
+ self.fail(_("Only premium users can download from other hosters"))
+
+ self.html = self.load('http://www.multishare.cz/html/mms_ajax.php', post={"link": self.pyfile.url}, decode=True)
+ self.getFileInfo()
+
+ if not self.checkCredit():
+ self.fail(_("Not enough credit left to download file"))
+
+ url = "http://dl%d.mms.multishare.cz/html/mms_process.php" % round(random() * 10000 * random())
+ params = {"u_ID": self.acc_info['u_ID'], "u_hash": self.acc_info['u_hash'], "link": self.pyfile.url}
+ self.logDebug(url, params)
+ self.download(url, get=params)
+
+
+ def checkCredit(self):
+ self.acc_info = self.account.getAccountInfo(self.user, True)
+ self.logInfo(_("User %s has %i MB left") % (self.user, self.acc_info['trafficleft'] / 1024))
+
+ return self.pyfile.size / 1024 <= self.acc_info['trafficleft']
+
+
+getInfo = create_getInfo(MultishareCz)
diff --git a/pyload/plugins/hoster/MyfastfileCom.py b/pyload/plugins/hoster/MyfastfileCom.py
new file mode 100644
index 000000000..cb1148389
--- /dev/null
+++ b/pyload/plugins/hoster/MyfastfileCom.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import json_loads
+
+
+class MyfastfileCom(Hoster):
+ __name__ = "MyfastfileCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/dl/'
+
+ __description__ = """Myfastfile.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Myfastfile.com")
+ self.fail(_("No Myfastfile.com account provided"))
+ else:
+ self.logDebug("Original URL: %s" % pyfile.url)
+ page = self.load('http://myfastfile.com/api.php',
+ get={'user': self.user, 'pass': self.account.getAccountData(self.user)['password'],
+ 'link': pyfile.url})
+ self.logDebug("JSON data: " + page)
+ page = json_loads(page)
+ if page['status'] != 'ok':
+ self.fail(_("Unable to unrestrict link"))
+ new_url = page['link']
+
+ if new_url != pyfile.url:
+ self.logDebug("Unrestricted URL: " + new_url)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/MyvideoDe.py b/pyload/plugins/hoster/MyvideoDe.py
new file mode 100644
index 000000000..8fbd3a772
--- /dev/null
+++ b/pyload/plugins/hoster/MyvideoDe.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import html_unescape
+
+
+class MyvideoDe(Hoster):
+ __name__ = "MyvideoDe"
+ __type__ = "hoster"
+ __version__ = "0.9"
+
+ __pattern__ = r'http://(?:www\.)?myvideo\.de/watch/'
+
+ __description__ = """Myvideo.de hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org")]
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.download_html()
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ def download_html(self):
+ self.html = self.load(self.pyfile.url)
+
+
+ def get_file_url(self):
+ videoId = re.search(r"addVariable\('_videoid','(.*)'\);p.addParam\('quality'", self.html).group(1)
+ videoServer = re.search("rel='image_src' href='(.*)thumbs/.*' />", self.html).group(1)
+ file_url = videoServer + videoId + ".flv"
+ return file_url
+
+
+ def get_file_name(self):
+ file_name_pattern = r"<h1 class='globalHd'>(.*)</h1>"
+ return html_unescape(re.search(file_name_pattern, self.html).group(1).replace("/", "") + '.flv')
+
+
+ def file_exists(self):
+ self.download_html()
+ self.load(str(self.pyfile.url), cookies=False, just_header=True)
+ if self.req.lastEffectiveURL == "http://www.myvideo.de/":
+ return False
+ return True
diff --git a/pyload/plugins/hoster/NahrajCz.py b/pyload/plugins/hoster/NahrajCz.py
new file mode 100644
index 000000000..188e2b1e4
--- /dev/null
+++ b/pyload/plugins/hoster/NahrajCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class NahrajCz(DeadHoster):
+ __name__ = "NahrajCz"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?nahraj\.cz/content/download/.+'
+
+ __description__ = """Nahraj.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(NahrajCz)
diff --git a/pyload/plugins/hoster/NarodRu.py b/pyload/plugins/hoster/NarodRu.py
new file mode 100644
index 000000000..67d940519
--- /dev/null
+++ b/pyload/plugins/hoster/NarodRu.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class NarodRu(SimpleHoster):
+ __name__ = "NarodRu"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?narod(\.yandex)?\.ru/(disk|start/\d+\.\w+-narod\.yandex\.ru)/(?P<ID>\d+)/.+'
+
+ __description__ = """Narod.ru hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<dt class="name">(?:<[^<]*>)*(?P<N>[^<]+)</dt>'
+ SIZE_PATTERN = r'<dd class="size">(?P<S>\d[^<]*)</dd>'
+ OFFLINE_PATTERN = r'<title>404</title>|Ѐайл уЎалеМ с сервОса|ЗакПМчОлся срПк храМеМОя файла\.'
+
+ SIZE_REPLACEMENTS = [(u'КБ', 'KB'), (u'МБ', 'MB'), (u'ГБ', 'GB')]
+ URL_REPLACEMENTS = [("narod.yandex.ru/", "narod.ru/"),
+ (r"/start/\d+\.\w+-narod\.yandex\.ru/(\d{6,15})/\w+/(\w+)", r"/disk/\1/\2")]
+
+ CAPTCHA_PATTERN = r'<number url="(.*?)">(\w+)</number>'
+ LINK_PATTERN = r'<a class="h-link" rel="yandex_bar" href="(.+?)">'
+
+
+ def handleFree(self):
+ for _i in xrange(5):
+ self.html = self.load('http://narod.ru/disk/getcapchaxml/?rnd=%d' % int(random() * 777))
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.error(_("Captcha"))
+ post_data = {"action": "sendcapcha"}
+ captcha_url, post_data['key'] = m.groups()
+ post_data['rep'] = self.decryptCaptcha(captcha_url)
+
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ url = 'http://narod.ru' + m.group(1)
+ self.correctCaptcha()
+ break
+ elif u'<b class="error-msg"><strong>ОшОблОсь?</strong>' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.error(_("Download link"))
+ else:
+ self.fail(_("No valid captcha code entered"))
+
+ self.download(url)
+
+
+getInfo = create_getInfo(NarodRu)
diff --git a/pyload/plugins/hoster/NetloadIn.py b/pyload/plugins/hoster/NetloadIn.py
new file mode 100644
index 000000000..b517fe455
--- /dev/null
+++ b/pyload/plugins/hoster/NetloadIn.py
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import sleep, time
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+
+
+def getInfo(urls):
+ ## returns list of tupels (name, size (in bytes), status (see FileDatabase), url)
+
+ apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id="
+ id_regex = re.compile(NetloadIn.__pattern__)
+ urls_per_query = 80
+
+ for chunk in chunks(urls, urls_per_query):
+ ids = ""
+ for url in chunk:
+ match = id_regex.search(url)
+ if match:
+ ids = ids + match.group(1) + ";"
+
+ api = getURL(apiurl + ids, decode=True)
+
+ if api is None or len(api) < 10:
+ self.logDebug("Prefetch failed")
+ return
+ if api.find("unknown_auth") >= 0:
+ print
+ self.logDebug("Outdated auth code")
+ return
+
+ result = []
+
+ for i, r in enumerate(api.splitlines()):
+ try:
+ tmp = r.split(";")
+ try:
+ size = int(tmp[2])
+ except:
+ size = 0
+ result.append((tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i]))
+ except:
+ self.logDebug("Error while processing response: %s" % r)
+
+ yield result
+
+
+class NetloadIn(Hoster):
+ __name__ = "NetloadIn"
+ __type__ = "hoster"
+ __version__ = "0.45"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?netload\.in/(?:datei(.*?)(?:\.htm|/)|index\.php?id=10&file_id=)'
+
+ __description__ = """Netload.in hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("RaNaN", "ranan@pyload.org"),
+ ("Gregy", "gregy@gregy.cz")]
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+
+
+ def process(self, pyfile):
+ self.url = pyfile.url
+ self.prepare()
+ pyfile.setStatus("downloading")
+ self.proceed(self.url)
+
+
+ def prepare(self):
+ self.download_api_data()
+
+ if self.api_data and self.api_data['filename']:
+ self.pyfile.name = self.api_data['filename']
+
+ if self.premium:
+ self.logDebug("Use Premium Account")
+ settings = self.load("http://www.netload.in/index.php?id=2&lang=en")
+ if '<option value="2" selected="selected">Direkter Download' in settings:
+ self.logDebug("Using direct download")
+ return True
+ else:
+ self.logDebug("Direct downloads not enabled. Parsing html for a download URL")
+
+ if self.download_html():
+ return True
+ else:
+ self.fail(_("Failed"))
+ return False
+
+
+ def download_api_data(self, n=0):
+ url = self.url
+ id_regex = re.compile(self.__pattern__)
+ match = id_regex.search(url)
+
+ if match:
+ #normalize url
+ self.url = 'http://www.netload.in/datei%s.htm' % match.group(1)
+ self.logDebug("URL: %s" % self.url)
+ else:
+ self.api_data = False
+ return
+
+ apiurl = "http://api.netload.in/info.php"
+ html = self.load(apiurl, cookies=False,
+ get={"file_id": match.group(1), "auth": "Zf9SnQh9WiReEsb18akjvQGqT0I830e8", "bz": "1",
+ "md5": "1"}, decode=True).strip()
+ if not html and n <= 3:
+ sleep(0.2)
+ self.download_api_data(n + 1)
+ return
+
+ self.logDebug("APIDATA: " + html)
+ self.api_data = {}
+ if html and ";" in html and html not in ("unknown file_data", "unknown_server_data", "No input file specified."):
+ lines = html.split(";")
+ self.api_data['exists'] = True
+ self.api_data['fileid'] = lines[0]
+ self.api_data['filename'] = lines[1]
+ self.api_data['size'] = lines[2]
+ self.api_data['status'] = lines[3]
+ if self.api_data['status'] == "online":
+ self.api_data['checksum'] = lines[4].strip()
+ else:
+ self.api_data = False # check manually since api data is useless sometimes
+
+ if lines[0] == lines[1] and lines[2] == "0": # useless api data
+ self.api_data = False
+ else:
+ self.api_data = False
+
+
+ def final_wait(self, page):
+ wait_time = self.get_wait_time(page)
+ self.setWait(wait_time)
+ self.logDebug("Final wait %d seconds" % wait_time)
+ self.wait()
+ self.url = self.get_file_url(page)
+
+
+ def download_html(self):
+ self.logDebug("Entering download_html")
+ page = self.load(self.url, decode=True)
+ t = time() + 30
+
+ if "/share/templates/download_hddcrash.tpl" in page:
+ self.logError(_("Netload HDD Crash"))
+ self.fail(_("File temporarily not available"))
+
+ if not self.api_data:
+ self.logDebug("API Data may be useless, get details from html page")
+
+ if "* The file was deleted" in page:
+ self.offline()
+
+ name = re.search(r'class="dl_first_filename">([^<]+)', page, re.M)
+ # the found filename is not truncated
+ if name:
+ name = name.group(1).strip()
+ if not name.endswith(".."):
+ self.pyfile.name = name
+
+ captchawaited = False
+ for i in xrange(10):
+
+ if not page:
+ page = self.load(self.url)
+ t = time() + 30
+
+ if "/share/templates/download_hddcrash.tpl" in page:
+ self.logError(_("Netload HDD Crash"))
+ self.fail(_("File temporarily not available"))
+
+ self.logDebug("Try number %d " % i)
+
+ if ">Your download is being prepared.<" in page:
+ self.logDebug("We will prepare your download")
+ self.final_wait(page)
+ return True
+ if ">An access request has been made from IP address <" in page:
+ wait = self.get_wait_time(page)
+ if not wait:
+ self.logDebug("Wait was 0 setting 30")
+ wait = 30 * 60
+ self.logInfo(_("Waiting between downloads %d seconds") % wait)
+ self.setWait(wait, True)
+ self.wait()
+
+ return self.download_html()
+
+ self.logDebug("Trying to find captcha")
+
+ try:
+ url_captcha_html = "http://netload.in/" + re.search('(index.php\?id=10&amp;.*&amp;captcha=1)',
+ page).group(1).replace("amp;", "")
+ except:
+ page = None
+ continue
+
+ try:
+ page = self.load(url_captcha_html, cookies=True)
+ captcha_url = "http://netload.in/" + re.search('(share/includes/captcha.php\?t=\d*)', page).group(1)
+ except:
+ self.logDebug("Could not find captcha, try again from beginning")
+ captchawaited = False
+ continue
+
+ file_id = re.search('<input name="file_id" type="hidden" value="(.*)" />', page).group(1)
+ if not captchawaited:
+ wait = self.get_wait_time(page)
+ if i == 0:
+ self.pyfile.waitUntil = time() # dont wait contrary to time on website
+ else:
+ self.pyfile.waitUntil = t
+ self.logInfo(_("Waiting for captcha %d seconds") % (self.pyfile.waitUntil - time()))
+ #self.setWait(wait)
+ self.wait()
+ captchawaited = True
+
+ captcha = self.decryptCaptcha(captcha_url)
+ page = self.load("http://netload.in/index.php?id=10", post={"file_id": file_id, "captcha_check": captcha},
+ cookies=True)
+
+ return False
+
+
+ def get_file_url(self, page):
+ try:
+ file_url_pattern = r'<a class="Orange_Link" href="(http://.+)".?>Or click here'
+ attempt = re.search(file_url_pattern, page)
+ if attempt is not None:
+ return attempt.group(1)
+ else:
+ self.logDebug("Backup try for final link")
+ file_url_pattern = r'<a href="(.+)" class="Orange_Link">Click here'
+ attempt = re.search(file_url_pattern, page)
+ return "http://netload.in/" + attempt.group(1)
+ except:
+ self.logDebug("Getting final link failed")
+ return None
+
+
+ def get_wait_time(self, page):
+ wait_seconds = int(re.search(r"countdown\((.+),'change\(\)'\)", page).group(1)) / 100
+ return wait_seconds
+
+
+ def proceed(self, url):
+ self.logDebug("Downloading..")
+
+ self.download(url, disposition=True)
+
+ check = self.checkDownload({"empty": re.compile(r"^$"), "offline": re.compile("The file was deleted")})
+
+ if check == "empty":
+ self.logInfo(_("Downloaded File was empty"))
+ self.retry()
+ elif check == "offline":
+ self.offline()
diff --git a/pyload/plugins/hoster/NosuploadCom.py b/pyload/plugins/hoster/NosuploadCom.py
new file mode 100644
index 000000000..f70d64551
--- /dev/null
+++ b/pyload/plugins/hoster/NosuploadCom.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class NosuploadCom(XFSHoster):
+ __name__ = "NosuploadCom"
+ __type__ = "hoster"
+ __version__ = "0.31"
+
+ __pattern__ = r'http://(?:www\.)?nosupload\.com/\?d=\w{12}'
+
+ __description__ = """Nosupload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("igel", "igelkun@myopera.com")]
+
+
+ HOSTER_DOMAIN = "nosupload.com"
+
+ SIZE_PATTERN = r'<p><strong>Size:</strong> (?P<S>[\d.,]+) (?P<U>[\w^_]+)</p>'
+ LINK_PATTERN = r'<a class="select" href="(http://.+?)">Download</a>'
+ WAIT_PATTERN = r'Please wait.*?>(\d+)</span>'
+
+
+ def getDownloadLink(self):
+ # stage1: press the "Free Download" button
+ data = self.getPostParameters()
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+
+ # stage2: wait some time and press the "Download File" button
+ data = self.getPostParameters()
+ wait_time = re.search(self.WAIT_PATTERN, self.html, re.M | re.S).group(1)
+ self.logDebug("Hoster told us to wait %s seconds" % wait_time)
+ self.wait(wait_time)
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+
+ # stage3: get the download link
+ return re.search(self.LINK_PATTERN, self.html, re.S).group(1)
+
+
+getInfo = create_getInfo(NosuploadCom)
diff --git a/pyload/plugins/hoster/NovafileCom.py b/pyload/plugins/hoster/NovafileCom.py
new file mode 100644
index 000000000..8d0677ec8
--- /dev/null
+++ b/pyload/plugins/hoster/NovafileCom.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://novafile.com/vfun4z6o2cit
+# http://novafile.com/s6zrr5wemuz4
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class NovafileCom(XFSHoster):
+ __name__ = "NovafileCom"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?novafile\.com/\w{12}'
+
+ __description__ = """Novafile.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ HOSTER_DOMAIN = "novafile.com"
+
+ SIZE_PATTERN = r'<div class="size">(?P<S>.+?)</div>'
+ ERROR_PATTERN = r'class="alert[^"]*alert-separate"[^>]*>\s*(?:<p>)?(.*?)\s*</'
+ LINK_PATTERN = r'<a href="(http://s\d+\.novafile\.com/.*?)" class="btn btn-green">Download File</a>'
+ WAIT_PATTERN = r'<p>Please wait <span id="count"[^>]*>(\d+)</span> seconds</p>'
+
+
+getInfo = create_getInfo(NovafileCom)
diff --git a/pyload/plugins/hoster/NowDownloadEu.py b/pyload/plugins/hoster/NowDownloadEu.py
new file mode 100644
index 000000000..2b1b8bc0a
--- /dev/null
+++ b/pyload/plugins/hoster/NowDownloadEu.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pyload.utils import fixup
+
+
+class NowDownloadEu(SimpleHoster):
+ __name__ = "NowDownloadEu"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?nowdownload\.(at|ch|co|eu|sx)/(dl/|download\.php\?id=)\w+'
+
+ __description__ = """NowDownload.at hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("godofdream", "soilfiction@gmail.com"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ INFO_PATTERN = r'Downloading</span> <br> (?P<N>.*) (?P<S>[\d.,]+) (?P<U>[\w^_]+) </h4>'
+ OFFLINE_PATTERN = r'>This file does not exist'
+
+ TOKEN_PATTERN = r'"(/api/token\.php\?token=\w+)"'
+ CONTINUE_PATTERN = r'"(/dl2/\w+/\w+)"'
+ WAIT_PATTERN = r'\.countdown\(\{until: \+(\d+),'
+ LINK_PATTERN = r'"(http://f\d+\.nowdownload\.at/dl/\w+/\w+)'
+
+ NAME_REPLACEMENTS = [("&#?\w+;", fixup), (r'<[^>]*>', '')]
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+ self.chunkLimit = -1
+
+
+ def handleFree(self):
+ tokenlink = re.search(self.TOKEN_PATTERN, self.html)
+ continuelink = re.search(self.CONTINUE_PATTERN, self.html)
+ if tokenlink is None or continuelink is None:
+ self.error()
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait = int(m.group(1))
+ else:
+ wait = 60
+
+ baseurl = "http://www.nowdownload.at"
+ self.html = self.load(baseurl + str(tokenlink.group(1)))
+ self.wait(wait)
+
+ self.html = self.load(baseurl + str(continuelink.group(1)))
+
+ url = re.search(self.LINK_PATTERN, self.html)
+ if url is None:
+ self.error(_("Download link not found"))
+
+ self.download(str(url.group(1)))
+
+
+getInfo = create_getInfo(NowDownloadEu)
diff --git a/pyload/plugins/hoster/NowVideoAt.py b/pyload/plugins/hoster/NowVideoAt.py
new file mode 100644
index 000000000..e17e2e256
--- /dev/null
+++ b/pyload/plugins/hoster/NowVideoAt.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class NowVideoAt(SimpleHoster):
+ __name__ = "NowVideoAt"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?nowvideo\.(at|ch|co|eu|sx)/(video|mobile/#/videos)/(?P<ID>\w+)'
+
+ __description__ = """NowVideo.at hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ URL_REPLACEMENTS = [(__pattern__, r'http://www.nowvideo.at/video/\g<ID>')]
+
+ NAME_PATTERN = r'<h4>(?P<N>.+?)<'
+ OFFLINE_PATTERN = r'>This file no longer exists'
+
+ LINK_FREE_PATTERN = r'<source src="(.+?)"'
+ LINK_PREMIUM_PATTERN = r'<div id="content_player" >\s*<a href="(.+?)"'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.resumeDownload = True
+
+
+ def handleFree(self):
+ self.html = self.load("http://www.nowvideo.at/mobile/video.php", get={'id': self.info['ID']})
+
+ m = re.search(self.LINK_FREE_PATTERN, self.html)
+ if m is None:
+ self.error(_("Free download link not found"))
+
+ self.download(m.group(1))
+
+
+getInfo = create_getInfo(NowVideoAt)
diff --git a/pyload/plugins/hoster/OboomCom.py b/pyload/plugins/hoster/OboomCom.py
new file mode 100644
index 000000000..db2c0597b
--- /dev/null
+++ b/pyload/plugins/hoster/OboomCom.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# https://www.oboom.com/B7CYZIEB/10Mio.dat
+
+import re
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+class OboomCom(Hoster):
+ __name__ = "OboomCom"
+ __type__ = "hoster"
+ __version__ = "0.3"
+
+ __pattern__ = r'https?://(?:www\.)?oboom\.com/(#(id=|/)?)?(?P<ID>\w{8})'
+
+ __description__ = """oboom.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stanley", "stanley.foerster@gmail.com")]
+
+
+ RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX"
+
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.multiDL = self.resumeDownload = self.premium
+
+
+ def process(self, pyfile):
+ self.pyfile.url.replace(".com/#id=", ".com/#")
+ self.pyfile.url.replace(".com/#/", ".com/#")
+ self.getFileId(self.pyfile.url)
+ self.getSessionToken()
+ self.getFileInfo(self.sessionToken, self.fileId)
+ self.pyfile.name = self.fileName
+ self.pyfile.size = self.fileSize
+ if not self.premium:
+ self.solveCaptcha()
+ self.getDownloadTicket()
+ self.download("https://%s/1.0/dlh" % self.downloadDomain, get={"ticket": self.downloadTicket, "http_errors": 0})
+
+
+ def loadUrl(self, url, get=None):
+ if get is None:
+ get = dict()
+ return json_loads(self.load(url, get, decode=True))
+
+
+ def getFileId(self, url):
+ self.fileId = re.match(OboomCom.__pattern__, url).group('ID')
+
+
+ def getSessionToken(self):
+ if self.premium:
+ accountInfo = self.account.getAccountInfo(self.user, True)
+ if "session" in accountInfo:
+ self.sessionToken = accountInfo['session']
+ else:
+ self.fail(_("Could not retrieve premium session"))
+ else:
+ apiUrl = "https://www.oboom.com/1.0/guestsession"
+ result = self.loadUrl(apiUrl)
+ if result[0] == 200:
+ self.sessionToken = result[1]
+ else:
+ self.fail(_("Could not retrieve token for guest session. Error code: %s") % result[0])
+
+
+ def solveCaptcha(self):
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ apiUrl = "https://www.oboom.com/1.0/download/ticket"
+ params = {"recaptcha_challenge_field": challenge,
+ "recaptcha_response_field": response,
+ "download_id": self.fileId,
+ "token": self.sessionToken}
+ result = self.loadUrl(apiUrl, params)
+
+ if result[0] == 200:
+ self.downloadToken = result[1]
+ self.downloadAuth = result[2]
+ self.correctCaptcha()
+ self.setWait(30)
+ self.wait()
+ break
+
+ elif result[0] == 400:
+ if result[1] == "incorrect-captcha-sol":
+ self.invalidCaptcha()
+ elif result[1] == "captcha-timeout":
+ self.invalidCaptcha()
+ elif result[1] == "forbidden":
+ self.retry(5, 15 * 60, _("Service unavailable"))
+
+ elif result[0] == 403:
+ if result[1] == -1: # another download is running
+ self.setWait(15 * 60)
+ else:
+ self.setWait(result[1], True)
+ self.wait()
+ self.retry(5)
+ else:
+ self.invalidCaptcha()
+ self.fail(_("Received invalid captcha 5 times"))
+
+
+ def getFileInfo(self, token, fileId):
+ apiUrl = "https://api.oboom.com/1.0/info"
+ params = {"token": token, "items": fileId, "http_errors": 0}
+
+ result = self.loadUrl(apiUrl, params)
+ if result[0] == 200:
+ item = result[1][0]
+ if item['state'] == "online":
+ self.fileSize = item['size']
+ self.fileName = item['name']
+ else:
+ self.offline()
+ else:
+ self.fail(_("Could not retrieve file info. Error code %s: %s") % (result[0], result[1]))
+
+
+ def getDownloadTicket(self):
+ apiUrl = "https://api.oboom.com/1/dl"
+ params = {"item": self.fileId, "http_errors": 0}
+ if self.premium:
+ params['token'] = self.sessionToken
+ else:
+ params['token'] = self.downloadToken
+ params['auth'] = self.downloadAuth
+
+ result = self.loadUrl(apiUrl, params)
+ if result[0] == 200:
+ self.downloadDomain = result[1]
+ self.downloadTicket = result[2]
+ elif result[0] == 421:
+ self.retry(wait_time=result[2] + 60, reason=_("Connection limit exceeded"))
+ else:
+ self.fail(_("Could not retrieve download ticket. Error code: %s") % result[0])
diff --git a/pyload/plugins/hoster/OneFichierCom.py b/pyload/plugins/hoster/OneFichierCom.py
new file mode 100644
index 000000000..6e04776b5
--- /dev/null
+++ b/pyload/plugins/hoster/OneFichierCom.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class OneFichierCom(SimpleHoster):
+ __name__ = "OneFichierCom"
+ __type__ = "hoster"
+ __version__ = "0.73"
+
+ __pattern__ = r'https?://(?:www\.)?(?:(?P<ID1>\w+)\.)?(?P<HOST>1fichier\.com|alterupload\.com|cjoint\.net|d(es)?fichiers\.com|dl4free\.com|megadl\.fr|mesfichiers\.org|piecejointe\.net|pjointe\.com|tenvoi\.com)(?:/\?(?P<ID2>\w+))?'
+
+ __description__ = """1fichier.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
+ ("the-razer", "daniel_ AT gmx DOT net"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("imclem", None),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Elrick69", "elrick69[AT]rocketmail[DOT]com"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'>FileName :</td>\s*<td.*>(?P<N>.+?)<'
+ SIZE_PATTERN = r'>Size :</td>\s*<td.*>(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'File not found !\s*<'
+
+ COOKIES = [("1fichier.com", "LG", "en")]
+
+ WAIT_PATTERN = r'>You must wait (\d+)'
+
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.resumeDownload = True
+
+
+ def handle(self, reconnect):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1)) * 60
+
+ self.wait(wait_time, reconnect)
+ self.retry(reason="You have to wait been each free download")
+
+ id = self.info['ID1'] or self.info['ID2']
+ url, inputs = self.parseHtmlForm('action="https://1fichier.com/\?%s' % id)
+
+ if not url:
+ self.fail(_("Download link not found"))
+
+ if "pass" in inputs:
+ inputs['pass'] = self.getPassword()
+
+ inputs['submit'] = "Download"
+
+ self.download(url, post=inputs)
+
+
+ def handleFree(self):
+ return self.handle(True)
+
+
+ def handlePremium(self):
+ return self.handle(False)
+
+
+getInfo = create_getInfo(OneFichierCom)
diff --git a/pyload/plugins/hoster/OronCom.py b/pyload/plugins/hoster/OronCom.py
new file mode 100644
index 000000000..22d6e65b3
--- /dev/null
+++ b/pyload/plugins/hoster/OronCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class OronCom(DeadHoster):
+ __name__ = "OronCom"
+ __type__ = "hoster"
+ __version__ = "0.14"
+
+ __pattern__ = r'https?://(?:www\.)?oron\.com/\w{12}'
+
+ __description__ = """Oron.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("chrox", "chrox@pyload.org"),
+ ("DHMH", "DHMH@pyload.org")]
+
+
+getInfo = create_getInfo(OronCom)
diff --git a/pyload/plugins/hoster/OverLoadMe.py b/pyload/plugins/hoster/OverLoadMe.py
new file mode 100644
index 000000000..3d07db489
--- /dev/null
+++ b/pyload/plugins/hoster/OverLoadMe.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import unquote
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class OverLoadMe(Hoster):
+ __name__ = "OverLoadMe"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://.*overload\.me.*'
+
+ __description__ = """Over-Load.me hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("marley", "marley@over-load.me")]
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): #: incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+
+ def setup(self):
+ self.chunkLimit = 5
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Over-Load")
+ self.fail(_("No Over-Load account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ data = self.account.getAccountData(self.user)
+
+ page = self.load("https://api.over-load.me/getdownload.php",
+ get={"auth": data['password'], "link": pyfile.url})
+ data = json_loads(page)
+
+ self.logDebug("Returned Data: %s" % data)
+
+ if data['error'] == 1:
+ self.logWarning(data['msg'])
+ self.tempOffline()
+ else:
+ if pyfile.name is not None and pyfile.name.endswith('.tmp') and data['filename']:
+ pyfile.name = data['filename']
+ pyfile.size = parseFileSize(data['filesize'])
+ new_url = data['downloadlink']
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'):
+ # only use when name wasn't already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload(
+ {"error": "<title>An error occured while processing your request</title>"})
+
+ if check == "error":
+ # usual this download can safely be retried
+ self.retry(wait_time=60, reason=_("An error occured while generating link."))
diff --git a/pyload/plugins/hoster/PandaplaNet.py b/pyload/plugins/hoster/PandaplaNet.py
new file mode 100644
index 000000000..f3124aaae
--- /dev/null
+++ b/pyload/plugins/hoster/PandaplaNet.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class PandaplaNet(DeadHoster):
+ __name__ = "PandaplaNet"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?pandapla\.net/\w{12}'
+
+ __description__ = """Pandapla.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("t4skforce", "t4skforce1337[AT]gmail[DOT]com")]
+
+
+getInfo = create_getInfo(PandaplaNet)
diff --git a/pyload/plugins/hoster/PornhostCom.py b/pyload/plugins/hoster/PornhostCom.py
new file mode 100644
index 000000000..7fd337485
--- /dev/null
+++ b/pyload/plugins/hoster/PornhostCom.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class PornhostCom(Hoster):
+ __name__ = "PornhostCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?pornhost\.com/(\d+/\d+\.html|\d+)'
+
+ __description__ = """Pornhost.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de")]
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ # Old interface
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ url = re.search(r'download this file</label>.*?<a href="(.*?)"', self.html)
+ if url is None:
+ url = re.search(r'"(http://dl\d+\.pornhost\.com/files/.*?/.*?/.*?/.*?/.*?/.*?\..*?)"', self.html)
+ if url is None:
+ url = re.search(r'width: 894px; height: 675px">.*?<img src="(.*?)"', self.html)
+ if url is None:
+ url = re.search(r'"http://file\d+\.pornhost\.com/\d+/.*?"',
+ self.html) # TODO: fix this one since it doesn't match
+
+ return url.group(1).strip()
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ name = re.search(r'<title>pornhost\.com - free file hosting with a twist - gallery(.*?)</title>', self.html)
+ if name is None:
+ name = re.search(r'id="url" value="http://www\.pornhost\.com/(.*?)/"', self.html)
+ if name is None:
+ name = re.search(r'<title>pornhost\.com - free file hosting with a twist -(.*?)</title>', self.html)
+ if name is None:
+ name = re.search(r'"http://file\d+\.pornhost\.com/.*?/(.*?)"', self.html)
+
+ name = name.group(1).strip() + ".flv"
+
+ return name
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if (re.search(r'gallery not found', self.html) is not None or
+ re.search(r'You will be redirected to', self.html) is not None):
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/PornhubCom.py b/pyload/plugins/hoster/PornhubCom.py
new file mode 100644
index 000000000..d89d24029
--- /dev/null
+++ b/pyload/plugins/hoster/PornhubCom.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class PornhubCom(Hoster):
+ __name__ = "PornhubCom"
+ __type__ = "hoster"
+ __version__ = "0.5"
+
+ __pattern__ = r'http://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=\w+'
+
+ __description__ = """Pornhub.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de")]
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ url = "http://www.pornhub.com//gateway.php"
+ video_id = self.pyfile.url.split('=')[-1]
+ # thanks to jD team for this one v
+ post_data = "\x00\x03\x00\x00\x00\x01\x00\x0c\x70\x6c\x61\x79\x65\x72\x43\x6f\x6e\x66\x69\x67\x00\x02\x2f\x31\x00\x00\x00\x44\x0a\x00\x00\x00\x03\x02\x00"
+ post_data += chr(len(video_id))
+ post_data += video_id
+ post_data += "\x02\x00\x02\x2d\x31\x02\x00\x20"
+ post_data += "add299463d4410c6d1b1c418868225f7"
+
+ content = self.load(url, post=str(post_data))
+
+ new_content = ""
+ for x in content:
+ if ord(x) < 32 or ord(x) > 176:
+ new_content += '#'
+ else:
+ new_content += x
+
+ content = new_content
+
+ return re.search(r'flv_url.*(http.*?)##post_roll', content).group(1)
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ m = re.search(r'<title[^>]+>([^<]+) - ', self.html)
+ if m:
+ name = m.group(1)
+ else:
+ matches = re.findall('<h1>(.*?)</h1>', self.html)
+ if len(matches) > 1:
+ name = matches[1]
+ else:
+ name = matches[0]
+
+ return name + '.flv'
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r'This video is no longer in our database or is in conversion', self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/PotloadCom.py b/pyload/plugins/hoster/PotloadCom.py
new file mode 100644
index 000000000..19da16b33
--- /dev/null
+++ b/pyload/plugins/hoster/PotloadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class PotloadCom(DeadHoster):
+ __name__ = "PotloadCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?potload\.com/\w{12}'
+
+ __description__ = """Potload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(PotloadCom)
diff --git a/pyload/plugins/hoster/PremiumTo.py b/pyload/plugins/hoster/PremiumTo.py
new file mode 100644
index 000000000..305b6d1d1
--- /dev/null
+++ b/pyload/plugins/hoster/PremiumTo.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+from os import remove
+from os.path import exists
+from urllib import quote
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import fs_encode
+
+
+class PremiumTo(Hoster):
+ __name__ = "PremiumTo"
+ __type__ = "hoster"
+ __version__ = "0.10"
+
+ __pattern__ = r'https?://(?:www\.)?premium\.to/.+'
+
+ __description__ = """Premium.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ def setup(self):
+ self.resumeDownload = True
+ self.chunkLimit = 1
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "premium.to")
+ self.fail(_("No premium.to account provided"))
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+
+ tra = self.getTraffic()
+
+ #raise timeout to 2min
+ self.req.setOption("timeout", 120)
+
+ self.download(
+ "http://premium.to/api/getfile.php?username=%s&password=%s&link=%s" % (self.account.username, self.account.password, quote(pyfile.url, "")),
+ disposition=True)
+
+ check = self.checkDownload({"nopremium": "No premium account available"})
+
+ if check == "nopremium":
+ self.retry(60, 5 * 60, "No premium account available")
+
+ err = ''
+ if self.req.http.code == '420':
+ # Custom error code send - fail
+ lastDownload = fs_encode(self.lastDownload)
+
+ if exists(lastDownload):
+ with open(lastDownload, "rb") as f:
+ err = f.read(256).strip()
+ remove(lastDownload)
+ else:
+ err = _('File does not exist')
+
+ trb = self.getTraffic()
+ self.logInfo(_("Filesize: %d, Traffic used %d, traffic left %d") % (pyfile.size, tra - trb, trb))
+
+ if err:
+ self.fail(err)
+
+
+ def getTraffic(self):
+ try:
+ api_r = self.load("http://premium.to/api/straffic.php",
+ get={'username': self.account.username, 'password': self.account.password})
+ traffic = sum(map(int, api_r.split(';')))
+ except:
+ traffic = 0
+ return traffic
diff --git a/pyload/plugins/hoster/PremiumizeMe.py b/pyload/plugins/hoster/PremiumizeMe.py
new file mode 100644
index 000000000..177edb1a0
--- /dev/null
+++ b/pyload/plugins/hoster/PremiumizeMe.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class PremiumizeMe(Hoster):
+ __name__ = "PremiumizeMe"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.coreReady
+
+ __description__ = """Premiumize.me hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Florian Franzen", "FlorianFranzen@gmail.com")]
+
+
+ def process(self, pyfile):
+ # Check account
+ if not self.account or not self.account.canUse():
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "premiumize.me")
+ self.fail(_("No valid premiumize.me account provided"))
+
+ # In some cases hostsers do not supply us with a filename at download, so we
+ # are going to set a fall back filename (e.g. for freakshare or xfileshare)
+ pyfile.name = pyfile.name.split('/').pop() # Remove everthing before last slash
+
+ # Correction for automatic assigned filename: Removing html at end if needed
+ suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"]
+ temp = pyfile.name.split('.')
+ if temp.pop() in suffix_to_remove:
+ pyfile.name = ".".join(temp)
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ # Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)
+ answer = self.load(
+ "https://api.premiumize.me/pm-api/v1.php?method=directdownloadlink&params[login]=%s&params[pass]=%s&params[link]=%s" % (
+ user, data['password'], pyfile.url))
+ data = json_loads(answer)
+
+ # Check status and decide what to do
+ status = data['status']
+ if status == 200:
+ self.download(data['result']['location'], disposition=True)
+ elif status == 400:
+ self.fail(_("Invalid link"))
+ elif status == 404:
+ self.offline()
+ elif status >= 500:
+ self.tempOffline()
+ else:
+ self.fail(data['statusmessage'])
diff --git a/pyload/plugins/hoster/PromptfileCom.py b/pyload/plugins/hoster/PromptfileCom.py
new file mode 100644
index 000000000..73324e6ab
--- /dev/null
+++ b/pyload/plugins/hoster/PromptfileCom.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class PromptfileCom(SimpleHoster):
+ __name__ = "PromptfileCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'https?://(?:www\.)?promptfile\.com/'
+
+ __description__ = """Promptfile.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("igel", "igelkun@myopera.com")]
+
+
+ INFO_PATTERN = r'<span style="[^"]*" title="[^"]*">(?P<N>.*?) \((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)</span>'
+ OFFLINE_PATTERN = r'<span style="[^"]*" title="File Not Found">File Not Found</span>'
+
+ CHASH_PATTERN = r'<input type="hidden" name="chash" value="([^"]*)" />'
+ LINK_PATTERN = r'<a href=\"(.+)\" target=\"_blank\" class=\"view_dl_link\">Download File</a>'
+
+
+ def handleFree(self):
+ # STAGE 1: get link to continue
+ m = re.search(self.CHASH_PATTERN, self.html)
+ if m is None:
+ self.error(_("CHASH_PATTERN not found"))
+ chash = m.group(1)
+ self.logDebug("Read chash %s" % chash)
+ # continue to stage2
+ self.html = self.load(self.pyfile.url, decode=True, post={'chash': chash})
+
+ # STAGE 2: get the direct link
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ direct = m.group(1)
+ self.logDebug("Found direct link: " + direct)
+ self.download(direct, disposition=True)
+
+
+getInfo = create_getInfo(PromptfileCom)
diff --git a/pyload/plugins/hoster/PrzeklejPl.py b/pyload/plugins/hoster/PrzeklejPl.py
new file mode 100644
index 000000000..5e6056adb
--- /dev/null
+++ b/pyload/plugins/hoster/PrzeklejPl.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class PrzeklejPl(DeadHoster):
+ __name__ = "PrzeklejPl"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'http://(?:www\.)?przeklej\.pl/plik/.+'
+
+ __description__ = """Przeklej.pl hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(PrzeklejPl)
diff --git a/pyload/plugins/hoster/QuickshareCz.py b/pyload/plugins/hoster/QuickshareCz.py
new file mode 100644
index 000000000..5123e5aa5
--- /dev/null
+++ b/pyload/plugins/hoster/QuickshareCz.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class QuickshareCz(SimpleHoster):
+ __name__ = "QuickshareCz"
+ __type__ = "hoster"
+ __version__ = "0.55"
+
+ __pattern__ = r'http://(?:[^/]*\.)?quickshare\.cz/stahnout-soubor/.*'
+
+ __description__ = """Quickshare.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<th width="145px">Název:</th>\s*<td style="word-wrap:break-word;">(?P<N>[^<]+)</td>'
+ SIZE_PATTERN = r'<th>Velikost:</th>\s*<td>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</td>'
+ OFFLINE_PATTERN = r'<script type="text/javascript">location\.href=\'/chyba\';</script>'
+
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ # parse js variables
+ self.jsvars = dict((x, y.strip("'")) for x, y in re.findall(r"var (\w+) = ([\d.]+|'[^']*')", self.html))
+ self.logDebug(self.jsvars)
+ pyfile.name = self.jsvars['ID3']
+
+ # determine download type - free or premium
+ if self.premium:
+ if 'UU_prihlasen' in self.jsvars:
+ if self.jsvars['UU_prihlasen'] == '0':
+ self.logWarning(_("User not logged in"))
+ self.relogin(self.user)
+ self.retry()
+ elif float(self.jsvars['UU_kredit']) < float(self.jsvars['kredit_odecet']):
+ self.logWarning(_("Not enough credit left"))
+ self.premium = False
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ check = self.checkDownload({"err": re.compile(r"\AChyba!")}, max_size=100)
+ if check == "err":
+ self.fail(_("File not m or plugin defect"))
+
+
+ def handleFree(self):
+ # get download url
+ download_url = '%s/download.php' % self.jsvars['server']
+ data = dict((x, self.jsvars[x]) for x in self.jsvars if x in ("ID1", "ID2", "ID3", "ID4"))
+ self.logDebug("FREE URL1:" + download_url, data)
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.load(download_url, post=data)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ m = re.search(r'Location\s*:\s*(.+)', self.header, re.I)
+ if m is None:
+ self.fail(_("File not found"))
+ download_url = m.group(1)
+ self.logDebug("FREE URL2:" + download_url)
+
+ # check errors
+ m = re.search(r'/chyba/(\d+)', download_url)
+ if m:
+ if m.group(1) == '1':
+ self.retry(60, 2 * 60, "This IP is already downloading")
+ elif m.group(1) == '2':
+ self.retry(60, 60, "No free slots available")
+ else:
+ self.fail(_("Error %d") % m.group(1))
+
+ # download file
+ self.download(download_url)
+
+
+ def handlePremium(self):
+ download_url = '%s/download_premium.php' % self.jsvars['server']
+ data = dict((x, self.jsvars[x]) for x in self.jsvars if x in ("ID1", "ID2", "ID4", "ID5"))
+ self.logDebug("PREMIUM URL:" + download_url, data)
+ self.download(download_url, get=data)
+
+
+getInfo = create_getInfo(QuickshareCz)
diff --git a/pyload/plugins/hoster/RPNetBiz.py b/pyload/plugins/hoster/RPNetBiz.py
new file mode 100644
index 000000000..b2a89f48a
--- /dev/null
+++ b/pyload/plugins/hoster/RPNetBiz.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import json_loads
+
+
+class RPNetBiz(Hoster):
+ __name__ = "RPNetBiz"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __description__ = """RPNet.biz hoster plugin"""
+ __license__ = "GPLv3"
+
+ __pattern__ = r'https?://.*rpnet\.biz'
+ __authors__ = [("Dman", "dmanugm@gmail.com")]
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ link_status = {'generated': pyfile.url}
+ elif not self.account:
+ # Check account
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rpnet")
+ self.fail(_("No rpnet account provided"))
+ else:
+ (user, data) = self.account.selectAccount()
+
+ self.logDebug("Original URL: %s" % pyfile.url)
+ # Get the download link
+ res = self.load("https://premium.rpnet.biz/client_api.php",
+ get={"username": user,
+ "password": data['password'],
+ "action": "generate",
+ "links": pyfile.url})
+
+ self.logDebug("JSON data: %s" % res)
+ link_status = json_loads(res)['links'][0] # get the first link... since we only queried one
+
+ # Check if we only have an id as a HDD link
+ if 'id' in link_status:
+ self.logDebug("Need to wait at least 30 seconds before requery")
+ self.setWait(30) # wait for 30 seconds
+ self.wait()
+ # Lets query the server again asking for the status on the link,
+ # we need to keep doing this until we reach 100
+ max_tries = 30
+ my_try = 0
+ while (my_try <= max_tries):
+ self.logDebug("Try: %d ; Max Tries: %d" % (my_try, max_tries))
+ res = self.load("https://premium.rpnet.biz/client_api.php",
+ get={"username": user,
+ "password": data['password'],
+ "action": "downloadInformation",
+ "id": link_status['id']})
+ self.logDebug("JSON data hdd query: %s" % res)
+ download_status = json_loads(res)['download']
+
+ if download_status['status'] == '100':
+ link_status['generated'] = download_status['rpnet_link']
+ self.logDebug("Successfully downloaded to rpnet HDD: %s" % link_status['generated'])
+ break
+ else:
+ self.logDebug("At %s%% for the file download" % download_status['status'])
+
+ self.setWait(30)
+ self.wait()
+ my_try += 1
+
+ if my_try > max_tries: # We went over the limit!
+ self.fail(_("Waited for about 15 minutes for download to finish but failed"))
+
+ if 'generated' in link_status:
+ self.download(link_status['generated'], disposition=True)
+ elif 'error' in link_status:
+ self.fail(link_status['error'])
+ else:
+ self.fail(_("Something went wrong, not supposed to enter here"))
diff --git a/pyload/plugins/hoster/RapidfileshareNet.py b/pyload/plugins/hoster/RapidfileshareNet.py
new file mode 100644
index 000000000..849ac2b68
--- /dev/null
+++ b/pyload/plugins/hoster/RapidfileshareNet.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class RapidfileshareNet(XFSHoster):
+ __name__ = "RapidfileshareNet"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?rapidfileshare\.net/\w{12}'
+
+ __description__ = """Rapidfileshare.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "rapidfileshare.net"
+
+ NAME_PATTERN = r'<input type="hidden" name="fname" value="(?P<N>.+?)">'
+ SIZE_PATTERN = r'>http://www.rapidfileshare.net/\w+?</font> \((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)</font>'
+
+ OFFLINE_PATTERN = r'>No such file with this filename'
+ TEMP_OFFLINE_PATTERN = r'The page may have been renamed, removed or be temporarily unavailable.<'
+
+
+ def handlePremium(self):
+ self.fail(_("Premium download not implemented"))
+
+
+getInfo = create_getInfo(RapidfileshareNet)
diff --git a/pyload/plugins/hoster/RapidgatorNet.py b/pyload/plugins/hoster/RapidgatorNet.py
new file mode 100644
index 000000000..99fec9b20
--- /dev/null
+++ b/pyload/plugins/hoster/RapidgatorNet.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.utils import json_loads
+from pyload.network.HTTPRequest import BadHeader
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import AdsCaptcha, ReCaptcha, SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class RapidgatorNet(SimpleHoster):
+ __name__ = "RapidgatorNet"
+ __type__ = "hoster"
+ __version__ = "0.26"
+
+ __pattern__ = r'http://(?:www\.)?(rapidgator\.net|rg\.to)/file/\w+'
+
+ __description__ = """Rapidgator.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("chrox", None),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ API_URL = "http://rapidgator.net/api/file"
+
+ COOKIES = [("rapidgator.net", "lang", "en")]
+
+ NAME_PATTERN = r'<title>Download file (?P<N>.*)</title>'
+ SIZE_PATTERN = r'File size:\s*<strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong>'
+ OFFLINE_PATTERN = r'>(File not found|Error 404)'
+
+ JSVARS_PATTERN = r'\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*\'?(.*?)\'?;'
+ PREMIUM_ONLY_ERROR_PATTERN = r'You can download files up to|This file can be downloaded by premium only<'
+ DOWNLOAD_LIMIT_ERROR_PATTERN = r'You have reached your (daily|hourly) downloads limit'
+ WAIT_PATTERN = r'(?:Delay between downloads must be not less than|Try again in)\s*(\d+)\s*(hour|min)'
+ LINK_PATTERN = r'return \'(http://\w+.rapidgator.net/.*)\';'
+
+ RECAPTCHA_PATTERN = r'"http://api\.recaptcha\.net/challenge\?k=(.*?)"'
+ ADSCAPTCHA_PATTERN = r'(http://api\.adscaptcha\.com/Get\.aspx[^"\']*)'
+ SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.script\?k=(.*?)"'
+
+
+ def setup(self):
+ if self.account:
+ self.sid = self.account.getAccountData(self.user).get('SID', None)
+ else:
+ self.sid = None
+
+ if self.sid:
+ self.premium = True
+
+ self.resumeDownload = self.multiDL = self.premium
+ self.chunkLimit = 1
+
+
+ def api_response(self, cmd):
+ try:
+ json = self.load('%s/%s' % (self.API_URL, cmd),
+ get={'sid': self.sid,
+ 'url': self.pyfile.url}, decode=True)
+ self.logDebug("API:%s" % cmd, json, "SID: %s" % self.sid)
+ json = json_loads(json)
+ status = json['response_status']
+ msg = json['response_details']
+
+ except BadHeader, e:
+ self.logError("API: %s" % cmd, e, "SID: %s" % self.sid)
+ status = e.code
+ msg = e
+
+ if status == 200:
+ return json['response']
+
+ elif status == 423:
+ self.account.empty(self.user)
+ self.retry()
+
+ else:
+ self.account.relogin(self.user)
+ self.retry(wait_time=60)
+
+
+ def handlePremium(self):
+ #self.logDebug("ACCOUNT_DATA", self.account.getAccountData(self.user))
+ self.api_data = self.api_response('info')
+ self.api_data['md5'] = self.api_data['hash']
+ self.pyfile.name = self.api_data['filename']
+ self.pyfile.size = self.api_data['size']
+ url = self.api_response('download')['url']
+ self.download(url)
+
+
+ def handleFree(self):
+ self.checkFree()
+
+ jsvars = dict(re.findall(self.JSVARS_PATTERN, self.html))
+ self.logDebug(jsvars)
+
+ self.req.http.lastURL = self.pyfile.url
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+
+ url = "http://rapidgator.net%s?fid=%s" % (
+ jsvars.get('startTimerUrl', '/download/AjaxStartTimer'), jsvars['fid'])
+ jsvars.update(self.getJsonResponse(url))
+
+ self.wait(int(jsvars.get('secs', 45)), False)
+
+ url = "http://rapidgator.net%s?sid=%s" % (
+ jsvars.get('getDownloadUrl', '/download/AjaxGetDownload'), jsvars['sid'])
+ jsvars.update(self.getJsonResponse(url))
+
+ self.req.http.lastURL = self.pyfile.url
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With:"])
+
+ url = "http://rapidgator.net%s" % jsvars.get('captchaUrl', '/download/captcha')
+ self.html = self.load(url)
+
+ for _i in xrange(5):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ link = m.group(1)
+ self.logDebug(link)
+ self.download(link, disposition=True)
+ break
+ else:
+ captcha, captcha_key = self.getCaptcha()
+ captcha_challenge, captcha_response = captcha.challenge(captcha_key)
+
+ self.html = self.load(url, post={
+ "DownloadCaptchaForm[captcha]": "",
+ "adcopy_challenge": captcha_challenge,
+ "adcopy_response": captcha_response
+ })
+
+ if "The verification code is incorrect" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ else:
+ self.error(_("Download link"))
+
+
+ def getCaptcha(self):
+ m = re.search(self.ADSCAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = AdsCaptcha(self)
+ else:
+ m = re.search(self.RECAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = ReCaptcha(self)
+ else:
+ m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if m:
+ captcha_key = m.group(1)
+ captcha = SolveMedia(self)
+ else:
+ self.error(_("Captcha"))
+
+ return captcha, captcha_key
+
+
+ def checkFree(self):
+ m = re.search(self.PREMIUM_ONLY_ERROR_PATTERN, self.html)
+ if m:
+ self.fail(_("Premium account needed for download"))
+ else:
+ m = re.search(self.WAIT_PATTERN, self.html)
+
+ if m:
+ wait_time = int(m.group(1)) * {"hour": 60, "min": 1}[m.group(2)]
+ else:
+ m = re.search(self.DOWNLOAD_LIMIT_ERROR_PATTERN, self.html)
+ if m is None:
+ return
+ elif m.group(1) == "daily":
+ self.logWarning(_("You have reached your daily downloads limit for today"))
+ wait_time = secondsToMidnight(gmt=2)
+ else:
+ wait_time = 1 * 60 * 60
+
+ self.logDebug("Waiting %d minutes" % wait_time / 60)
+ self.wait(wait_time, True)
+ self.retry()
+
+
+ def getJsonResponse(self, url):
+ res = self.load(url, decode=True)
+ if not res.startswith('{'):
+ self.retry()
+ self.logDebug(url, res)
+ return json_loads(res)
+
+
+getInfo = create_getInfo(RapidgatorNet)
diff --git a/pyload/plugins/hoster/RapidshareCom.py b/pyload/plugins/hoster/RapidshareCom.py
new file mode 100644
index 000000000..97823ba96
--- /dev/null
+++ b/pyload/plugins/hoster/RapidshareCom.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+
+
+def getInfo(urls):
+ ids = ""
+ names = ""
+
+ p = re.compile(RapidshareCom.__pattern__)
+
+ for url in urls:
+ r = p.search(url)
+ if r.group("name"):
+ ids += "," + r.group("id")
+ names += "," + r.group("name")
+ elif r.group("name_new"):
+ ids += "," + r.group("id_new")
+ names += "," + r.group("name_new")
+
+ url = "http://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=checkfiles&files=%s&filenames=%s" % (ids[1:], names[1:])
+
+ api = getURL(url)
+ result = []
+ i = 0
+ for res in api.split():
+ tmp = res.split(",")
+ if tmp[4] in ("0", "4", "5"):
+ status = 1
+ elif tmp[4] == "1":
+ status = 2
+ else:
+ status = 3
+
+ result.append((tmp[1], tmp[2], status, urls[i]))
+ i += 1
+
+ yield result
+
+
+class RapidshareCom(Hoster):
+ __name__ = "RapidshareCom"
+ __type__ = "hoster"
+ __version__ = "1.40"
+
+ __pattern__ = r'https?://(?:www\.)?rapidshare\.com/(?:files/(?P<id>\d+)/(?P<name>[^?]+)|#!download\|(?:\w+)\|(?P<id_new>\d+)\|(?P<name_new>[^|]+))'
+
+ __description__ = """Rapidshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("RaNaN", "ranan@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de")]
+
+
+ def setup(self):
+ self.no_download = True
+ self.api_data = None
+ self.offset = 0
+ self.dl_dict = {}
+
+ self.id = None
+ self.name = None
+
+ self.chunkLimit = -1 if self.premium else 1
+ self.multiDL = self.resumeDownload = self.premium
+
+
+ def process(self, pyfile):
+ self.url = pyfile.url
+ self.prepare()
+
+
+ def prepare(self):
+ m = re.match(self.__pattern__, self.url)
+
+ if m.group("name"):
+ self.id = m.group("id")
+ self.name = m.group("name")
+ else:
+ self.id = m.group("id_new")
+ self.name = m.group("name_new")
+
+ self.download_api_data()
+ if self.api_data['status'] == "1":
+ self.pyfile.name = self.get_file_name()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ elif self.api_data['status'] == "2":
+ self.logInfo(_("Rapidshare: Traffic Share (direct download)"))
+ self.pyfile.name = self.get_file_name()
+
+ self.download(self.pyfile.url, get={"directstart": 1})
+
+ elif self.api_data['status'] in ("0", "4", "5"):
+ self.offline()
+ elif self.api_data['status'] == "3":
+ self.tempOffline()
+ else:
+ self.error(_("Unknown response code"))
+
+
+ def handleFree(self):
+ while self.no_download:
+ self.dl_dict = self.freeWait()
+
+ #tmp = "#!download|%(server)s|%(id)s|%(name)s|%(size)s"
+ download = "http://%(host)s/cgi-bin/rsapi.cgi?sub=download&editparentlocation=0&bin=1&fileid=%(id)s&filename=%(name)s&dlauth=%(auth)s" % self.dl_dict
+
+ self.logDebug("RS API Request: %s" % download)
+ self.download(download, ref=False)
+
+ check = self.checkDownload({"ip": "You need RapidPro to download more files from your IP address",
+ "auth": "Download auth invalid"})
+ if check == "ip":
+ self.setWait(60)
+ self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
+ self.wait()
+ self.handleFree()
+ elif check == "auth":
+ self.logInfo(_("Invalid Auth Code, download will be restarted"))
+ self.offset += 5
+ self.handleFree()
+
+
+ def handlePremium(self):
+ info = self.account.getAccountInfo(self.user, True)
+ self.logDebug("Use Premium Account")
+ url = self.api_data['mirror']
+ self.download(url, get={"directstart": 1})
+
+
+ def download_api_data(self, force=False):
+ """
+ http://images.rapidshare.com/apidoc.txt
+ """
+ if self.api_data and not force:
+ return
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_file = {"sub": "checkfiles", "incmd5": "1", "files": self.id, "filenames": self.name}
+ html = self.load(api_url_base, cookies=False, get=api_param_file).strip()
+ self.logDebug("RS INFO API: %s" % html)
+ if html.startswith("ERROR"):
+ return
+ fields = html.split(",")
+
+ # status codes:
+ # 0=File not found
+ # 1=File OK (Anonymous downloading)
+ # 3=Server down
+ # 4=File marked as illegal
+ # 5=Anonymous file locked, because it has more than 10 downloads already
+ # 50+n=File OK (TrafficShare direct download type "n" without any logging.)
+ # 100+n=File OK (TrafficShare direct download type "n" with logging.
+ # Read our privacy policy to see what is logged.)
+
+ self.api_data = {"fileid": fields[0], "filename": fields[1], "size": int(fields[2]), "serverid": fields[3],
+ "status": fields[4], "shorthost": fields[5], "checksum": fields[6].strip().lower()}
+
+ if int(self.api_data['status']) > 100:
+ self.api_data['status'] = str(int(self.api_data['status']) - 100)
+ elif int(self.api_data['status']) > 50:
+ self.api_data['status'] = str(int(self.api_data['status']) - 50)
+
+ self.api_data['mirror'] = "http://rs%(serverid)s%(shorthost)s.rapidshare.com/files/%(fileid)s/%(filename)s" % self.api_data
+
+
+ def freeWait(self):
+ """downloads html with the important information
+ """
+ self.no_download = True
+
+ id = self.id
+ name = self.name
+
+ prepare = "https://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=download&fileid=%(id)s&filename=%(name)s&try=1&cbf=RSAPIDispatcher&cbid=1" % {
+ "name": name, "id": id}
+
+ self.logDebug("RS API Request: %s" % prepare)
+ result = self.load(prepare, ref=False)
+ self.logDebug("RS API Result: %s" % result)
+
+ between_wait = re.search("You need to wait (\d+) seconds", result)
+
+ if "You need RapidPro to download more files from your IP address" in result:
+ self.setWait(60)
+ self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
+ self.wait()
+ elif ("Too many users downloading from this server right now" in result or
+ "All free download slots are full" in result):
+ self.setWait(120)
+ self.logInfo(_("RapidShareCom: No free slots"))
+ self.wait()
+ elif "This file is too big to download it for free" in result:
+ self.fail(_("You need a premium account for this file"))
+ elif "Filename invalid." in result:
+ self.fail(_("Filename reported invalid"))
+ elif between_wait:
+ self.setWait(int(between_wait.group(1)), True)
+ self.wait()
+ else:
+ self.no_download = False
+
+ tmp, info = result.split(":")
+ data = info.split(",")
+
+ dl_dict = {"id": id,
+ "name": name,
+ "host": data[0],
+ "auth": data[1],
+ "server": self.api_data['serverid'],
+ "size": self.api_data['size']}
+ self.setWait(int(data[2]) + 2 + self.offset)
+ self.wait()
+
+ return dl_dict
+
+
+ def get_file_name(self):
+ if self.api_data['filename']:
+ return self.api_data['filename']
+ return self.url.split("/")[-1]
diff --git a/pyload/plugins/hoster/RarefileNet.py b/pyload/plugins/hoster/RarefileNet.py
new file mode 100644
index 000000000..ce54b4dc7
--- /dev/null
+++ b/pyload/plugins/hoster/RarefileNet.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class RarefileNet(XFSHoster):
+ __name__ = "RarefileNet"
+ __type__ = "hoster"
+ __version__ = "0.08"
+
+ __pattern__ = r'http://(?:www\.)?rarefile\.net/\w{12}'
+
+ __description__ = """Rarefile.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "rarefile.net"
+
+ NAME_PATTERN = r'<font color="red">(?P<N>.+?)<'
+ SIZE_PATTERN = r'>Size : (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ LINK_PATTERN = r'<a href="(?P<link>[^"]+)">(?P=link)</a>'
+
+
+getInfo = create_getInfo(RarefileNet)
diff --git a/pyload/plugins/hoster/RealdebridCom.py b/pyload/plugins/hoster/RealdebridCom.py
new file mode 100644
index 000000000..2ca9970e0
--- /dev/null
+++ b/pyload/plugins/hoster/RealdebridCom.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import randrange
+from urllib import quote, unquote
+from time import time
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import parseFileSize
+
+
+class RealdebridCom(Hoster):
+ __name__ = "RealdebridCom"
+ __type__ = "hoster"
+ __version__ = "0.53"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?real-debrid\..*'
+
+ __description__ = """Real-Debrid.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Devirex Hazzard", "naibaf_11@yahoo.de")]
+
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if not name or name.endswith(".."): #: incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+
+ def setup(self):
+ self.chunkLimit = 3
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Real-debrid")
+ self.fail(_("No Real-debrid account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ password = self.getPassword().splitlines()
+ if not password:
+ password = ""
+ else:
+ password = password[0]
+
+ url = "https://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % (
+ quote(pyfile.url, ""), password, int(time() * 1000))
+ page = self.load(url)
+ data = json_loads(page)
+
+ self.logDebug("Returned Data: %s" % data)
+
+ if data['error'] != 0:
+ if data['message'] == "Your file is unavailable on the hoster.":
+ self.offline()
+ else:
+ self.logWarning(data['message'])
+ self.tempOffline()
+ else:
+ if pyfile.name is not None and pyfile.name.endswith('.tmp') and data['file_name']:
+ pyfile.name = data['file_name']
+ pyfile.size = parseFileSize(data['file_size'])
+ new_url = data['generated_links'][0][-1]
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload(
+ {"error": "<title>An error occured while processing your request</title>"})
+
+ if check == "error":
+ #usual this download can safely be retried
+ self.retry(wait_time=60, reason=_("An error occured while generating link"))
diff --git a/pyload/plugins/hoster/RedtubeCom.py b/pyload/plugins/hoster/RedtubeCom.py
new file mode 100644
index 000000000..b1272f68d
--- /dev/null
+++ b/pyload/plugins/hoster/RedtubeCom.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import html_unescape
+
+
+class RedtubeCom(Hoster):
+ __name__ = "RedtubeCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?redtube\.com/\d+'
+
+ __description__ = """Redtube.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de")]
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ file_url = html_unescape(re.search(r'hashlink=(http.*?)"', self.html).group(1))
+
+ return file_url
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ return re.search('<title>(.*?)- RedTube - Free Porn Videos</title>', self.html).group(1).strip() + ".flv"
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r'This video has been removed.', self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/RehostTo.py b/pyload/plugins/hoster/RehostTo.py
new file mode 100644
index 000000000..d9855c796
--- /dev/null
+++ b/pyload/plugins/hoster/RehostTo.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+from urllib import quote, unquote
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class RehostTo(Hoster):
+ __name__ = "RehostTo"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'https?://.*rehost\.to\..*'
+
+ __description__ = """Rehost.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def getFilename(self, url):
+ return unquote(url.rsplit("/", 1)[1])
+
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rehost.to")
+ self.fail(_("No rehost.to account provided"))
+
+ data = self.account.getAccountInfo(self.user)
+ long_ses = data['long_ses']
+
+ self.logDebug("Rehost.to: Old URL: %s" % pyfile.url)
+ new_url = "http://rehost.to/process_download.php?user=cookie&pass=%s&dl=%s" % (long_ses, quote(pyfile.url, ""))
+
+ #raise timeout to 2min
+ self.req.setOption("timeout", 120)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/RemixshareCom.py b/pyload/plugins/hoster/RemixshareCom.py
new file mode 100644
index 000000000..fee898654
--- /dev/null
+++ b/pyload/plugins/hoster/RemixshareCom.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://remixshare.com/download/p946u
+#
+# Note:
+# The remixshare.com website is very very slow, so
+# if your download not starts because of pycurl timeouts:
+# Adjust timeouts in /usr/share/pyload/pyload/network/HTTPRequest.py
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class RemixshareCom(SimpleHoster):
+ __name__ = "RemixshareCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://remixshare\.com/(download|dl)/\w+'
+
+ __description__ = """Remixshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ INFO_PATTERN = r'title=\'.+?\'>(?P<N>.+?)</span><span class=\'light2\'>&nbsp;\((?P<S>\d+)&nbsp;(?P<U>[\w^_]+)\)<'
+ OFFLINE_PATTERN = r'<h1>Ooops!<'
+
+ LINK_PATTERN = r'(http://remixshare\.com/downloadfinal/.+?)"'
+ TOKEN_PATTERN = r'var acc = (\d+)'
+ WAIT_PATTERN = r'var XYZ = r"(\d+)"'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ b = re.search(self.LINK_PATTERN, self.html)
+ if not b:
+ self.error(_("Cannot parse download url"))
+ c = re.search(self.TOKEN_PATTERN, self.html)
+ if not c:
+ self.error(_("Cannot parse file token"))
+ dl_url = b.group(1) + c.group(1)
+
+ #Check if we have to wait
+ seconds = re.search(self.WAIT_PATTERN, self.html)
+ if seconds:
+ self.logDebug("Wait " + seconds.group(1))
+ self.wait(seconds.group(1))
+
+ # Finally start downloading...
+ self.download(dl_url, disposition=True)
+
+
+getInfo = create_getInfo(RemixshareCom)
diff --git a/pyload/plugins/hoster/RgHostNet.py b/pyload/plugins/hoster/RgHostNet.py
new file mode 100644
index 000000000..82a5b88c5
--- /dev/null
+++ b/pyload/plugins/hoster/RgHostNet.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class RgHostNet(SimpleHoster):
+ __name__ = "RgHostNet"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?rghost\.net/\d+(?:r=\d+)?'
+
+ __description__ = """RgHost.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("z00nx", "z00nx0@gmail.com")]
+
+
+ INFO_PATTERN = r'<h1>\s+(<a[^>]+>)?(?P<N>[^<]+)(</a>)?\s+<small[^>]+>\s+\((?P<S>[^)]+)\)\s+</small>\s+</h1>'
+ OFFLINE_PATTERN = r'File is deleted|this page is not found'
+ LINK_PATTERN = r'''<a\s+href="([^"]+)"\s+class="btn\s+large\s+download"[^>]+>Download</a>'''
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ download_link = m.group(1)
+ self.download(download_link, disposition=True)
+
+
+getInfo = create_getInfo(RgHostNet)
diff --git a/pyload/plugins/hoster/RyushareCom.py b/pyload/plugins/hoster/RyushareCom.py
new file mode 100644
index 000000000..ab6bf1017
--- /dev/null
+++ b/pyload/plugins/hoster/RyushareCom.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://ryushare.com/cl0jy8ric2js/random.bin
+
+import re
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+from pyload.plugins.internal.CaptchaService import SolveMedia
+
+
+class RyushareCom(XFSHoster):
+ __name__ = "RyushareCom"
+ __type__ = "hoster"
+ __version__ = "0.20"
+
+ __pattern__ = r'http://(?:www\.)?ryushare\.com/\w+'
+
+ __description__ = """Ryushare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("quareevo", "quareevo@arcor.de")]
+
+
+ HOSTER_DOMAIN = "ryushare.com"
+
+ SIZE_PATTERN = r'You have requested <font color="red">[^<]+</font> \((?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ WAIT_PATTERN = r'You have to wait ((?P<hour>\d+) hour[s]?, )?((?P<min>\d+) minute[s], )?(?P<sec>\d+) second[s]'
+ LINK_PATTERN = r'<a href="([^"]+)">Click here to download<'
+
+
+ def getDownloadLink(self):
+ retry = False
+ self.html = self.load(self.pyfile.url)
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+ if "method_premium" in inputs:
+ del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post=inputs)
+ action, inputs = self.parseHtmlForm('F1')
+
+ self.setWait(65)
+ # Wait 1 hour
+ if "You have reached the download-limit" in self.html:
+ self.setWait(1 * 60 * 60, True)
+ retry = True
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait = m.groupdict(0)
+ waittime = int(wait['hour']) * 60 * 60 + int(wait['min']) * 60 + int(wait['sec'])
+ self.setWait(waittime, True)
+ retry = True
+
+ self.wait()
+ if retry:
+ self.retry()
+
+ for _i in xrange(5):
+ solvemedia = SolveMedia(self)
+ challenge, response = solvemedia.challenge()
+
+ inputs['adcopy_challenge'] = challenge
+ inputs['adcopy_response'] = response
+
+ self.html = self.load(self.pyfile.url, post=inputs)
+ if "WRONG CAPTCHA" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("You have entered 5 invalid captcha codes"))
+
+ if "Click here to download" in self.html:
+ return re.search(r'<a href="([^"]+)">Click here to download</a>', self.html).group(1)
+
+
+getInfo = create_getInfo(RyushareCom)
diff --git a/pyload/plugins/hoster/SecureUploadEu.py b/pyload/plugins/hoster/SecureUploadEu.py
new file mode 100644
index 000000000..6939e4f6d
--- /dev/null
+++ b/pyload/plugins/hoster/SecureUploadEu.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class SecureUploadEu(XFSHoster):
+ __name__ = "SecureUploadEu"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'https?://(?:www\.)?secureupload\.eu/\w{12}'
+
+ __description__ = """SecureUpload.eu hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("z00nx", "z00nx0@gmail.com")]
+
+
+ HOSTER_DOMAIN = "secureupload.eu"
+
+ INFO_PATTERN = r'<h3>Downloading (?P<N>[^<]+) \((?P<S>[^<]+)\)</h3>'
+
+
+getInfo = create_getInfo(SecureUploadEu)
diff --git a/pyload/plugins/hoster/SendmywayCom.py b/pyload/plugins/hoster/SendmywayCom.py
new file mode 100644
index 000000000..cb82fb19d
--- /dev/null
+++ b/pyload/plugins/hoster/SendmywayCom.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class SendmywayCom(XFSHoster):
+ __name__ = "SendmywayCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?sendmyway\.com/\w{12}'
+
+ __description__ = """SendMyWay hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "sendmyway.com"
+
+ NAME_PATTERN = r'<p class="file-name" ><.*?>\s*(?P<N>.+)'
+ SIZE_PATTERN = r'<small>\((?P<S>\d+) bytes\)</small>'
+
+
+getInfo = create_getInfo(SendmywayCom)
diff --git a/pyload/plugins/hoster/SendspaceCom.py b/pyload/plugins/hoster/SendspaceCom.py
new file mode 100644
index 000000000..2915d47c0
--- /dev/null
+++ b/pyload/plugins/hoster/SendspaceCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class SendspaceCom(SimpleHoster):
+ __name__ = "SendspaceCom"
+ __type__ = "hoster"
+ __version__ = "0.14"
+
+ __pattern__ = r'http://(?:www\.)?sendspace\.com/file/.*'
+
+ __description__ = """Sendspace.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<h2 class="bgray">\s*<(?:b|strong)>(?P<N>[^<]+)</'
+ SIZE_PATTERN = r'<div class="file_description reverse margin_center">\s*<b>File Size:</b>\s*(?P<S>[\d.,]+)(?P<U>[\w^_]+)\s*</div>'
+ OFFLINE_PATTERN = r'<div class="msg error" style="cursor: default">Sorry, the file you requested is not available.</div>'
+
+ LINK_PATTERN = r'<a id="download_button" href="([^"]+)"'
+ CAPTCHA_PATTERN = r'<td><img src="(/captchas/captcha\.php?captcha=([^"]+))"></td>'
+ USER_CAPTCHA_PATTERN = r'<td><img src="/captchas/captcha\.php?user=([^"]+))"></td>'
+
+
+ def handleFree(self):
+ params = {}
+ for _i in xrange(3):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ if 'captcha_hash' in params:
+ self.correctCaptcha()
+ download_url = m.group(1)
+ break
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ if 'captcha_hash' in params:
+ self.invalidCaptcha()
+ captcha_url1 = "http://www.sendspace.com/" + m.group(1)
+ m = re.search(self.USER_CAPTCHA_PATTERN, self.html)
+ captcha_url2 = "http://www.sendspace.com/" + m.group(1)
+ params = {'captcha_hash': m.group(2),
+ 'captcha_submit': 'Verify',
+ 'captcha_answer': self.decryptCaptcha(captcha_url1) + " " + self.decryptCaptcha(captcha_url2)}
+ else:
+ params = {'download': "Regular Download"}
+
+ self.logDebug(params)
+ self.html = self.load(self.pyfile.url, post=params)
+ else:
+ self.fail(_("Download link not found"))
+
+ self.download(download_url)
+
+
+getInfo = create_getInfo(SendspaceCom)
diff --git a/pyload/plugins/hoster/Share4webCom.py b/pyload/plugins/hoster/Share4webCom.py
new file mode 100644
index 000000000..9449ec959
--- /dev/null
+++ b/pyload/plugins/hoster/Share4webCom.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.hoster.UnibytesCom import UnibytesCom
+from pyload.plugins.internal.SimpleHoster import create_getInfo
+
+
+class Share4webCom(UnibytesCom):
+ __name__ = "Share4webCom"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'https?://(?:www\.)?share4web\.com/get/\w+'
+
+ __description__ = """Share4web.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "share4web.com"
+
+
+getInfo = create_getInfo(UnibytesCom)
diff --git a/pyload/plugins/hoster/Share76Com.py b/pyload/plugins/hoster/Share76Com.py
new file mode 100644
index 000000000..fb6fc041f
--- /dev/null
+++ b/pyload/plugins/hoster/Share76Com.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class Share76Com(DeadHoster):
+ __name__ = "Share76Com"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'http://(?:www\.)?share76\.com/\w{12}'
+
+ __description__ = """Share76.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+
+getInfo = create_getInfo(Share76Com)
diff --git a/pyload/plugins/hoster/ShareFilesCo.py b/pyload/plugins/hoster/ShareFilesCo.py
new file mode 100644
index 000000000..db51c2024
--- /dev/null
+++ b/pyload/plugins/hoster/ShareFilesCo.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class ShareFilesCo(DeadHoster):
+ __name__ = "ShareFilesCo"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?sharefiles\.co/\w{12}'
+
+ __description__ = """Sharefiles.co hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(ShareFilesCo)
diff --git a/pyload/plugins/hoster/SharebeesCom.py b/pyload/plugins/hoster/SharebeesCom.py
new file mode 100644
index 000000000..07f0e8bfd
--- /dev/null
+++ b/pyload/plugins/hoster/SharebeesCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SharebeesCom(DeadHoster):
+ __name__ = "SharebeesCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?sharebees\.com/\w{12}'
+
+ __description__ = """ShareBees hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(SharebeesCom)
diff --git a/pyload/plugins/hoster/ShareonlineBiz.py b/pyload/plugins/hoster/ShareonlineBiz.py
new file mode 100644
index 000000000..0769cfe17
--- /dev/null
+++ b/pyload/plugins/hoster/ShareonlineBiz.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+
+
+def getInfo(urls):
+ api_url_base = "http://api.share-online.biz/linkcheck.php"
+
+ urls = [url.replace("https://", "http://") for url in urls]
+
+ for chunk in chunks(urls, 90):
+ api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/", "").rstrip("/") for x in
+ chunk)} # api only supports old style links
+ html = getURL(api_url_base, post=api_param_file, decode=True)
+ result = []
+ for i, res in enumerate(html.split("\n")):
+ if not res:
+ continue
+ fields = res.split(";")
+
+ if fields[1] == "OK":
+ status = 2
+ elif fields[1] in ("DELETED", "NOT FOUND"):
+ status = 1
+ else:
+ status = 3
+
+ result.append((fields[2], int(fields[3]), status, chunk[i]))
+ yield result
+
+
+class ShareonlineBiz(Hoster):
+ __name__ = "ShareonlineBiz"
+ __type__ = "hoster"
+ __version__ = "0.41"
+
+ __pattern__ = r'https?://(?:www\.)?(share-online\.biz|egoshare\.com)/(download\.php\?id=|dl/)(?P<ID>\w+)'
+
+ __description__ = """Shareonline.biz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ ERROR_INFO_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>'
+
+
+ def setup(self):
+ self.file_id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ self.pyfile.url = "http://www.share-online.biz/dl/" + self.file_id
+
+ self.resumeDownload = self.premium
+ self.multiDL = False
+
+ self.check_data = None
+
+
+ def process(self, pyfile):
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ if self.api_data:
+ self.check_data = {"size": int(self.api_data['size']), "md5": self.api_data['md5']}
+
+
+ def loadAPIData(self):
+ api_url_base = "http://api.share-online.biz/linkcheck.php?md5=1"
+ api_param_file = {"links": self.file_id} #: api only supports old style links
+ html = self.load(api_url_base, cookies=False, post=api_param_file, decode=True)
+
+ fields = html.split(";")
+ self.api_data = {"fileid": fields[0],
+ "status": fields[1]}
+ if not self.api_data['status'] == "OK":
+ self.offline()
+ else:
+ self.api_data['filename'] = fields[2]
+ self.api_data['size'] = fields[3] #: in bytes
+ self.api_data['md5'] = fields[4].strip().lower().replace("\n\n", "") #: md5
+
+
+ def handleFree(self):
+ self.loadAPIData()
+ self.pyfile.name = self.api_data['filename']
+ self.pyfile.size = int(self.api_data['size'])
+
+ self.html = self.load(self.pyfile.url, cookies=True) #: refer, stuff
+ self.setWait(3)
+ self.wait()
+
+ self.html = self.load("%s/free/" % self.pyfile.url, post={"dl_free": "1", "choice": "free"}, decode=True)
+ self.checkErrors()
+
+ m = re.search(r'var wait=(\d+);', self.html)
+
+ recaptcha = ReCaptcha(self)
+ for _i in xrange(5):
+ challenge, response = recaptcha.challenge("6LdatrsSAAAAAHZrB70txiV5p-8Iv8BtVxlTtjKX")
+ self.setWait(int(m.group(1)) if m else 30)
+ res = self.load("%s/free/captcha/%d" % (self.pyfile.url, int(time() * 1000)),
+ post={'dl_free': '1',
+ 'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response})
+
+ if not res == '0':
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.invalidCaptcha()
+ self.fail(_("No valid captcha solution received"))
+
+ download_url = res.decode("base64")
+ if not download_url.startswith("http://"):
+ self.error(_("Wrong download url"))
+
+ self.wait()
+ self.download(download_url)
+ # check download
+ check = self.checkDownload({
+ "cookie": re.compile(r'<div id="dl_failure"'),
+ "fail": re.compile(r"<title>Share-Online")
+ })
+ if check == "cookie":
+ self.invalidCaptcha()
+ self.retry(5, 60, "Cookie failure")
+ elif check == "fail":
+ self.invalidCaptcha()
+ self.retry(5, 5 * 60, "Download failed")
+ else:
+ self.correctCaptcha()
+
+
+ def handlePremium(self): #: should be working better loading (account) api internally
+ self.account.getAccountInfo(self.user, True)
+ html = self.load("http://api.share-online.biz/account.php",
+ {"username": self.user, "password": self.account.accounts[self.user]['password'],
+ "act": "download", "lid": self.file_id})
+
+ self.api_data = dlinfo = {}
+ for line in html.splitlines():
+ key, value = line.split(": ")
+ dlinfo[key.lower()] = value
+
+ self.logDebug(dlinfo)
+ if not dlinfo['status'] == "online":
+ self.offline()
+ else:
+ self.pyfile.name = dlinfo['name']
+ self.pyfile.size = int(dlinfo['size'])
+
+ dlLink = dlinfo['url']
+ if dlLink == "server_under_maintenance":
+ self.tempOffline()
+ else:
+ self.multiDL = True
+ self.download(dlLink)
+
+
+ def checkErrors(self):
+ m = re.search(r"/failure/(.*?)/1", self.req.lastEffectiveURL)
+ if m is None:
+ return
+
+ err = m.group(1)
+ try:
+ self.logError(err, re.search(self.ERROR_INFO_PATTERN, self.html).group(1))
+ except:
+ self.logError(err, "Unknown error occurred")
+
+ if err == "invalid":
+ self.fail(_("File not available"))
+ elif err in ("freelimit", "size", "proxy"):
+ self.fail(_("Premium account needed"))
+ else:
+ if err in 'server':
+ self.setWait(600, False)
+ elif err in 'expired':
+ self.setWait(30, False)
+ else:
+ self.setWait(300, True)
+
+ self.wait()
+ self.retry(max_tries=25, reason=err)
diff --git a/pyload/plugins/hoster/ShareplaceCom.py b/pyload/plugins/hoster/ShareplaceCom.py
new file mode 100644
index 000000000..6e11de05c
--- /dev/null
+++ b/pyload/plugins/hoster/ShareplaceCom.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class ShareplaceCom(Hoster):
+ __name__ = "ShareplaceCom"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'(http://)?(?:www\.)?shareplace\.(com|org)/\?\w+'
+
+ __description__ = """Shareplace.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("ACCakut", None)]
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.prepare()
+ self.download(self.get_file_url())
+
+
+ def prepare(self):
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+
+ wait_time = self.get_waiting_time()
+ self.setWait(wait_time)
+ self.wait()
+
+
+ def get_waiting_time(self):
+ if not self.html:
+ self.download_html()
+
+ #var zzipitime = 15;
+ m = re.search(r'var zzipitime = (\d+);', self.html)
+ if m:
+ sec = int(m.group(1))
+ else:
+ sec = 0
+
+ return sec
+
+
+ def download_html(self):
+ url = re.sub("shareplace.com\/\?", "shareplace.com//index1.php/?a=", self.pyfile.url)
+ self.html = self.load(url, decode=True)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = re.search(r"var beer = '(.*?)';", self.html)
+ if url:
+ url = url.group(1)
+ url = unquote(
+ url.replace("http://http:/", "").replace("vvvvvvvvv", "").replace("lllllllll", "").replace(
+ "teletubbies", ""))
+ self.logDebug("URL: %s" % url)
+ return url
+ else:
+ self.error(_("Absolute filepath not found"))
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ return re.search("<title>\s*(.*?)\s*</title>", self.html).group(1)
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r"HTTP Status 404", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/SharingmatrixCom.py b/pyload/plugins/hoster/SharingmatrixCom.py
new file mode 100644
index 000000000..7459c12b6
--- /dev/null
+++ b/pyload/plugins/hoster/SharingmatrixCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SharingmatrixCom(DeadHoster):
+ __name__ = "SharingmatrixCom"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?sharingmatrix\.com/file/\w+'
+
+ __description__ = """Sharingmatrix.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("paulking", None)]
+
+
+getInfo = create_getInfo(SharingmatrixCom)
diff --git a/pyload/plugins/hoster/ShragleCom.py b/pyload/plugins/hoster/ShragleCom.py
new file mode 100644
index 000000000..792457bbc
--- /dev/null
+++ b/pyload/plugins/hoster/ShragleCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class ShragleCom(DeadHoster):
+ __name__ = "ShragleCom"
+ __type__ = "hoster"
+ __version__ = "0.22"
+
+ __pattern__ = r'http://(?:www\.)?(cloudnator|shragle)\.com/files/(?P<ID>.*?)/'
+
+ __description__ = """Cloudnator.com (Shragle.com) hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(ShragleCom)
diff --git a/pyload/plugins/hoster/SimplyPremiumCom.py b/pyload/plugins/hoster/SimplyPremiumCom.py
new file mode 100644
index 000000000..bb431a5dd
--- /dev/null
+++ b/pyload/plugins/hoster/SimplyPremiumCom.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from datetime import datetime, timedelta
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+
+
+class SimplyPremiumCom(Hoster):
+ __name__ = "SimplyPremiumCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://.*(simply-premium)\.com'
+
+ __description__ = """Simply-Premium.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("EvolutionClip", "evolutionclip@live.de")]
+
+
+ def setup(self):
+ self.chunkLimit = 16
+ self.resumeDownload = False
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Simply-Premium.com")
+ self.fail(_("No Simply-Premium.com account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ for i in xrange(5):
+ page = self.load('http://www.simply-premium.com/premium.php?info&link=' + pyfile.url)
+ self.logDebug("JSON data: " + page)
+ if page != '':
+ break
+ else:
+ self.logInfo(_("Unable to get API data, waiting 1 minute and retry"))
+ self.retry(5, 60, "Unable to get API data")
+
+ if '<valid>0</valid>' in page or (
+ "You are not allowed to download from this host" in page and self.premium):
+ self.account.relogin(self.user)
+ self.retry()
+ elif "NOTFOUND" in page:
+ self.offline()
+ elif "downloadlimit" in page:
+ self.logWarning(_("Reached maximum connctions"))
+ self.retry(5, 60, "Reached maximum connctions")
+ elif "trafficlimit" in page:
+ self.logWarning(_("Reached daily limit for this host"))
+ self.retry(wait_time=secondsToMidnight(gmt=2), "Daily limit for this host reached")
+ elif "hostererror" in page:
+ self.logWarning(_("Hoster temporarily unavailable, waiting 1 minute and retry"))
+ self.retry(5, 60, "Hoster is temporarily unavailable")
+ #page = json_loads(page)
+ #new_url = page.keys()[0]
+ #self.api_data = page[new_url]
+
+ try:
+ self.pyfile.name = re.search(r'<name>([^<]+)</name>', page).group(1)
+ except AttributeError:
+ self.pyfile.name = ""
+
+ try:
+ self.pyfile.size = re.search(r'<size>(\d+)</size>', page).group(1)
+ except AttributeError:
+ self.pyfile.size = 0
+
+ try:
+ new_url = re.search(r'<download>([^<]+)</download>', page).group(1)
+ except AttributeError:
+ new_url = 'http://www.simply-premium.com/premium.php?link=' + pyfile.url
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: " + new_url)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/SimplydebridCom.py b/pyload/plugins/hoster/SimplydebridCom.py
new file mode 100644
index 000000000..5092be32a
--- /dev/null
+++ b/pyload/plugins/hoster/SimplydebridCom.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class SimplydebridCom(Hoster):
+ __name__ = "SimplydebridCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/sd\.php/*'
+
+ __description__ = """Simply-debrid.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Kagenoshin", "kagenoshin@gmx.ch")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "simply-debrid.com")
+ self.fail(_("No simply-debrid.com account provided"))
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+
+ #fix the links for simply-debrid.com!
+ new_url = pyfile.url
+ new_url = new_url.replace("clz.to", "cloudzer.net/file")
+ new_url = new_url.replace("http://share-online", "http://www.share-online")
+ new_url = new_url.replace("ul.to", "uploaded.net/file")
+ new_url = new_url.replace("uploaded.com", "uploaded.net")
+ new_url = new_url.replace("filerio.com", "filerio.in")
+ new_url = new_url.replace("lumfile.com", "lumfile.se")
+ if('fileparadox' in new_url):
+ new_url = new_url.replace("http://", "https://")
+
+ if re.match(self.__pattern__, new_url):
+ new_url = new_url
+
+ self.logDebug("New URL: %s" % new_url)
+
+ if not re.match(self.__pattern__, new_url):
+ page = self.load('http://simply-debrid.com/api.php', get={'dl': new_url}) # +'&u='+self.user+'&p='+self.account.getAccountData(self.user)['password'])
+ if 'tiger Link' in page or 'Invalid Link' in page or ('API' in page and 'ERROR' in page):
+ self.fail(_("Unable to unrestrict link"))
+ new_url = page
+
+ self.setWait(5)
+ self.wait()
+ self.logDebug("Unrestricted URL: " + new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"bad1": "No address associated with hostname", "bad2": "<html"})
+
+ if check == "bad1" or check == "bad2":
+ self.retry(24, 3 * 60, "Bad file downloaded")
diff --git a/pyload/plugins/hoster/SockshareCom.py b/pyload/plugins/hoster/SockshareCom.py
new file mode 100644
index 000000000..d28041893
--- /dev/null
+++ b/pyload/plugins/hoster/SockshareCom.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SockshareCom(DeadHoster):
+ __name__ = "SockshareCom"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?sockshare\.com/(mobile/)?(file|embed)/(?P<ID>\w+)'
+
+ __description__ = """Sockshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+getInfo = create_getInfo(SockshareCom)
diff --git a/pyload/plugins/hoster/SoundcloudCom.py b/pyload/plugins/hoster/SoundcloudCom.py
new file mode 100644
index 000000000..284f6ffbf
--- /dev/null
+++ b/pyload/plugins/hoster/SoundcloudCom.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+import pycurl
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class SoundcloudCom(Hoster):
+ __name__ = "SoundcloudCom"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'https?://(?:www\.)?soundcloud\.com/(?P<UID>.*?)/(?P<SID>.*)'
+
+ __description__ = """SoundCloud.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Peekayy", "peekayy.dev@gmail.com")]
+
+
+ def process(self, pyfile):
+ # default UserAgent of HTTPRequest fails for this hoster so we use this one
+ self.req.http.c.setopt(pycurl.USERAGENT, 'Mozilla/5.0')
+ page = self.load(pyfile.url)
+ m = re.search(r'<div class="haudio.*?large.*?" data-sc-track="(?P<ID>\d*)"', page)
+ songId = clientId = ""
+ if m:
+ songId = m.group("ID")
+ if len(songId) <= 0:
+ self.logError(_("Could not find song id"))
+ self.offline()
+ else:
+ m = re.search(r'"clientID":"(?P<CID>.*?)"', page)
+ if m:
+ clientId = m.group("CID")
+
+ if len(clientId) <= 0:
+ clientId = "b45b1aa10f1ac2941910a7f0d10f8e28"
+
+ m = re.search(r'<em itemprop="name">\s(?P<TITLE>.*?)\s</em>', page)
+ if m:
+ pyfile.name = m.group("TITLE") + ".mp3"
+ else:
+ pyfile.name = re.match(self.__pattern__, pyfile.url).group("SID") + ".mp3"
+
+ # url to retrieve the actual song url
+ page = self.load("https://api.sndcdn.com/i1/tracks/%s/streams" % songId, get={"client_id": clientId})
+ # getting streams
+ # for now we choose the first stream found in all cases
+ # it could be improved if relevant for this hoster
+ streams = [
+ (result.group("QUALITY"), result.group("URL"))
+ for result in re.finditer(r'"(?P<QUALITY>.*?)":"(?P<URL>.*?)"', page)
+ ]
+ self.logDebug("Found Streams", streams)
+ self.logDebug("Downloading", streams[0][0], streams[0][1])
+ self.download(streams[0][1])
diff --git a/pyload/plugins/hoster/SpeedLoadOrg.py b/pyload/plugins/hoster/SpeedLoadOrg.py
new file mode 100644
index 000000000..35bd84803
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedLoadOrg.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SpeedLoadOrg(DeadHoster):
+ __name__ = "SpeedLoadOrg"
+ __type__ = "hoster"
+ __version__ = "1.02"
+
+ __pattern__ = r'http://(?:www\.)?speedload\.org/(?P<ID>\w+)'
+
+ __description__ = """Speedload.org hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+getInfo = create_getInfo(SpeedLoadOrg)
diff --git a/pyload/plugins/hoster/SpeedfileCz.py b/pyload/plugins/hoster/SpeedfileCz.py
new file mode 100644
index 000000000..549a146d4
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedfileCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class SpeedfileCz(DeadHoster):
+ __name__ = "SpeedFileCz"
+ __type__ = "hoster"
+ __version__ = "0.32"
+
+ __pattern__ = r'http://(?:www\.)?speedfile\.cz/.*'
+
+ __description__ = """Speedfile.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(SpeedfileCz)
diff --git a/pyload/plugins/hoster/SpeedyshareCom.py b/pyload/plugins/hoster/SpeedyshareCom.py
new file mode 100644
index 000000000..5128e42e7
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedyshareCom.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://speedy.sh/ep2qY/Zapp-Brannigan.jpg
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class SpeedyshareCom(SimpleHoster):
+ __name__ = "SpeedyshareCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?(speedyshare\.com|speedy\.sh)/\w+'
+
+ __description__ = """Speedyshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
+
+
+ NAME_PATTERN = r'class=downloadfilename>(?P<N>.*)</span></td>'
+ SIZE_PATTERN = r'class=sizetagtext>(?P<S>.*) (?P<U>[kKmM]?[iI]?[bB]?)</div>'
+
+ OFFLINE_PATTERN = r'class=downloadfilenamenotfound>.*</span>'
+
+ LINK_PATTERN = r'<a href=\'(.*)\'><img src=/gf/slowdownload\.png alt=\'Slow Download\' border=0'
+
+
+ def setup(self):
+ self.multiDL = False
+ self.chunkLimit = 1
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download link not found"))
+
+ dl_link = urljoin("http://www.speedyshare.com", m.group(1))
+ self.download(dl_link, disposition=True)
+
+ check = self.checkDownload({'html': re.compile("html")})
+ if check == "html":
+ self.error(_("Downloaded file is an html page"))
+
+
+getInfo = create_getInfo(SpeedyshareCom)
diff --git a/pyload/plugins/hoster/StorageTo.py b/pyload/plugins/hoster/StorageTo.py
new file mode 100644
index 000000000..5ba0c145b
--- /dev/null
+++ b/pyload/plugins/hoster/StorageTo.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class StorageTo(DeadHoster):
+ __name__ = "StorageTo"
+ __type__ = "hoster"
+ __version__ = "0.01"
+
+ __pattern__ = r'http://(?:www\.)?storage\.to/get/.+'
+
+ __description__ = """Storage.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+getInfo = create_getInfo(StorageTo)
diff --git a/pyload/plugins/hoster/StreamCz.py b/pyload/plugins/hoster/StreamCz.py
new file mode 100644
index 000000000..c9d00863e
--- /dev/null
+++ b/pyload/plugins/hoster/StreamCz.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+
+ html = getURL(url)
+ if re.search(StreamCz.OFFLINE_PATTERN, html):
+ # File offline
+ result.append((url, 0, 1, url))
+ else:
+ result.append((url, 0, 2, url))
+ yield result
+
+
+class StreamCz(Hoster):
+ __name__ = "StreamCz"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'https?://(?:www\.)?stream\.cz/[^/]+/\d+.*'
+
+ __description__ = """Stream.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<link rel="video_src" href="http://www\.stream\.cz/\w+/(\d+)-([^"]+)" />'
+ OFFLINE_PATTERN = r'<h1 class="commonTitle">Str.nku nebylo mo.n. nal.zt \(404\)</h1>'
+
+ CDN_PATTERN = r'<param name="flashvars" value="[^"]*&id=(?P<ID>\d+)(?:&cdnLQ=(?P<cdnLQ>\d*))?(?:&cdnHQ=(?P<cdnHQ>\d*))?(?:&cdnHD=(?P<cdnHD>\d*))?&'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.search(self.OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ m = re.search(self.CDN_PATTERN, self.html)
+ if m is None:
+ self.error(_("CDN_PATTERN not found"))
+ cdn = m.groupdict()
+ self.logDebug(cdn)
+ for cdnkey in ("cdnHD", "cdnHQ", "cdnLQ"):
+ if cdnkey in cdn and cdn[cdnkey] > '':
+ cdnid = cdn[cdnkey]
+ break
+ else:
+ self.fail(_("Stream URL not found"))
+
+ m = re.search(self.NAME_PATTERN, self.html)
+ if m is None:
+ self.error(_("NAME_PATTERN not found"))
+ pyfile.name = "%s-%s.%s.mp4" % (m.group(2), m.group(1), cdnkey[-2:])
+
+ download_url = "http://cdn-dispatcher.stream.cz/?id=" + cdnid
+ self.logInfo(_("STREAM: %s") % cdnkey[-2:], download_url)
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/StreamcloudEu.py b/pyload/plugins/hoster/StreamcloudEu.py
new file mode 100644
index 000000000..5ae617095
--- /dev/null
+++ b/pyload/plugins/hoster/StreamcloudEu.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class StreamcloudEu(XFSHoster):
+ __name__ = "StreamcloudEu"
+ __type__ = "hoster"
+ __version__ = "0.09"
+
+ __pattern__ = r'http://(?:www\.)?streamcloud\.eu/\w{12}'
+
+ __description__ = """Streamcloud.eu hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("seoester", "seoester@googlemail.com")]
+
+
+ HOSTER_DOMAIN = "streamcloud.eu"
+
+ LINK_PATTERN = r'file: "(http://(stor|cdn)\d+\.streamcloud\.eu:?\d*/.*/video\.(mp4|flv))",'
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+ self.resumeDownload = self.premium
+
+
+getInfo = create_getInfo(StreamcloudEu)
diff --git a/pyload/plugins/hoster/TurbobitNet.py b/pyload/plugins/hoster/TurbobitNet.py
new file mode 100644
index 000000000..b069e7a84
--- /dev/null
+++ b/pyload/plugins/hoster/TurbobitNet.py
@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+
+import random
+import re
+import time
+
+from Crypto.Cipher import ARC4
+from binascii import hexlify, unhexlify
+from pycurl import HTTPHEADER
+from urllib import quote
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+
+
+class TurbobitNet(SimpleHoster):
+ __name__ = "TurbobitNet"
+ __type__ = "hoster"
+ __version__ = "0.14"
+
+ __pattern__ = r'http://(?:www\.)?turbobit\.net/(?:download/free/)?(?P<ID>\w+)'
+
+ __description__ = """Turbobit.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("prOq", None)]
+
+
+ URL_REPLACEMENTS = [(__pattern__, "http://turbobit.net/\g<ID>.html")]
+
+ COOKIES = [("turbobit.net", "user_lang", "en")]
+
+ NAME_PATTERN = r'id="file-title">(?P<N>.+?)<'
+ SIZE_PATTERN = r'class="file-size">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'<h2>File Not Found</h2>|html\(\'File (?:was )?not found'
+
+ LINK_PATTERN = r'(?P<url>/download/redirect/[^"\']+)'
+ LIMIT_WAIT_PATTERN = r'<div id=\'timeout\'>(\d+)<'
+
+ CAPTCHA_PATTERN = r'<img alt="Captcha" src="(.+?)"'
+
+
+ def handleFree(self):
+ self.url = "http://turbobit.net/download/free/%s" % self.info['ID']
+ self.html = self.load(self.url, ref=True, decode=True)
+
+ rtUpdate = self.getRtUpdate()
+
+ self.solveCaptcha()
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.url = self.getDownloadUrl(rtUpdate)
+
+ self.wait()
+ self.html = self.load(self.url)
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With:"])
+ self.downloadFile()
+
+
+ def solveCaptcha(self):
+ for _i in xrange(5):
+ m = re.search(self.LIMIT_WAIT_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1))
+ self.wait(wait_time, wait_time > 60)
+ self.retry()
+
+ action, inputs = self.parseHtmlForm("action='#'")
+ if not inputs:
+ self.error(_("Captcha form not found"))
+ self.logDebug(inputs)
+
+ if inputs['captcha_type'] == 'recaptcha':
+ recaptcha = ReCaptcha(self)
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge()
+ else:
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.error(_("captcha"))
+ captcha_url = m.group(1)
+ inputs['captcha_response'] = self.decryptCaptcha(captcha_url)
+
+ self.logDebug(inputs)
+ self.html = self.load(self.url, post=inputs)
+
+ if '<div class="captcha-error">Incorrect, try again!<' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail(_("Invalid captcha"))
+
+
+ def getRtUpdate(self):
+ rtUpdate = self.getStorage("rtUpdate")
+ if not rtUpdate:
+ if self.getStorage("version") != self.__version__ \
+ or int(self.getStorage("timestamp", 0)) + 86400000 < timestamp():
+ # that's right, we are even using jdownloader updates
+ rtUpdate = getURL("http://update0.jdownloader.org/pluginstuff/tbupdate.js")
+ rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
+ # but we still need to fix the syntax to work with other engines than rhino
+ rtUpdate = re.sub(r'for each\(var (\w+) in(\[[^\]]+\])\)\{',
+ r'zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];', rtUpdate)
+ rtUpdate = re.sub(r"for\((\w+)=", r"for(var \1=", rtUpdate)
+
+ self.setStorage("rtUpdate", rtUpdate)
+ self.setStorage("timestamp", timestamp())
+ self.setStorage("version", self.__version__)
+ else:
+ self.logError(_("Unable to download, wait for update..."))
+ self.tempOffline()
+
+ return rtUpdate
+
+
+ def getDownloadUrl(self, rtUpdate):
+ self.req.http.lastURL = self.url
+
+ m = re.search("(/\w+/timeout\.js\?\w+=)([^\"\'<>]+)", self.html)
+ if m:
+ url = "http://turbobit.net%s%s" % m.groups()
+ else:
+ url = "http://turbobit.net/files/timeout.js?ver=%s" % "".join(random.choice('0123456789ABCDEF') for _i in xrange(32))
+
+ fun = self.load(url)
+
+ self.setWait(65, False)
+
+ for b in [1, 3]:
+ self.jscode = "var id = \'%s\';var b = %d;var inn = \'%s\';%sout" % (
+ self.info['ID'], b, quote(fun), rtUpdate)
+
+ try:
+ out = self.js.eval(self.jscode)
+ self.logDebug("URL", self.js.engine, out)
+ if out.startswith('/download/'):
+ return "http://turbobit.net%s" % out.strip()
+ except Exception, e:
+ self.logError(e)
+ else:
+ if self.retries >= 2:
+ # retry with updated js
+ self.delStorage("rtUpdate")
+ self.retry()
+
+
+ def decrypt(self, data):
+ cipher = ARC4.new(hexlify('E\x15\xa1\x9e\xa3M\xa0\xc6\xa0\x84\xb6H\x83\xa8o\xa0'))
+ return unhexlify(cipher.encrypt(unhexlify(data)))
+
+
+ def getLocalTimeString(self):
+ lt = time.localtime()
+ tz = time.altzone if lt.tm_isdst else time.timezone
+ return "%s GMT%+03d%02d" % (time.strftime("%a %b %d %Y %H:%M:%S", lt), -tz // 3600, tz % 3600)
+
+
+ def handlePremium(self):
+ self.logDebug("Premium download as user %s" % self.user)
+ self.downloadFile()
+
+
+ def downloadFile(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download link not found"))
+ self.url = "http://turbobit.net" + m.group('url')
+ self.download(self.url)
+
+
+getInfo = create_getInfo(TurbobitNet)
diff --git a/pyload/plugins/hoster/TurbouploadCom.py b/pyload/plugins/hoster/TurbouploadCom.py
new file mode 100644
index 000000000..80ad6ef1f
--- /dev/null
+++ b/pyload/plugins/hoster/TurbouploadCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class TurbouploadCom(DeadHoster):
+ __name__ = "TurbouploadCom"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'http://(?:www\.)?turboupload\.com/(\w+).*'
+
+ __description__ = """Turboupload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(TurbouploadCom)
diff --git a/pyload/plugins/hoster/TusfilesNet.py b/pyload/plugins/hoster/TusfilesNet.py
new file mode 100644
index 000000000..3b4d7f92c
--- /dev/null
+++ b/pyload/plugins/hoster/TusfilesNet.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class TusfilesNet(XFSHoster):
+ __name__ = "TusfilesNet"
+ __type__ = "hoster"
+ __version__ = "0.07"
+
+ __pattern__ = r'https?://(?:www\.)?tusfiles\.net/\w{12}'
+
+ __description__ = """Tusfiles.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
+ ("guidobelix", "guidobelix@hotmail.it")]
+
+
+ HOSTER_DOMAIN = "tusfiles.net"
+
+ INFO_PATTERN = r'\](?P<N>.+) - (?P<S>[\d.,]+) (?P<U>[\w^_]+)\['
+ OFFLINE_PATTERN = r'>File Not Found|<Title>TusFiles - Fast Sharing Files!'
+
+
+ def setup(self):
+ self.multiDL = False
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def handlePremium(self):
+ return self.handleFree()
+
+
+getInfo = create_getInfo(TusfilesNet)
diff --git a/pyload/plugins/hoster/TwoSharedCom.py b/pyload/plugins/hoster/TwoSharedCom.py
new file mode 100644
index 000000000..24dd92895
--- /dev/null
+++ b/pyload/plugins/hoster/TwoSharedCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class TwoSharedCom(SimpleHoster):
+ __name__ = "TwoSharedCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?2shared\.com/(account/)?(download|get|file|document|photo|video|audio)/.*'
+
+ __description__ = """2Shared.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<h1>(?P<N>.*)</h1>'
+ SIZE_PATTERN = r'<span class="dtitle">File size:</span>\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted\.'
+
+ LINK_PATTERN = r'window.location =\'(.+?)\';'
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("Download link"))
+
+ link = m.group(1)
+ self.download(link)
+
+
+getInfo = create_getInfo(TwoSharedCom)
diff --git a/pyload/plugins/hoster/UlozTo.py b/pyload/plugins/hoster/UlozTo.py
new file mode 100644
index 000000000..402a5e3e6
--- /dev/null
+++ b/pyload/plugins/hoster/UlozTo.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+
+import re
+import time
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+def convertDecimalPrefix(m):
+ # decimal prefixes used in filesize and traffic
+ return ("%%.%df" % {'k': 3, 'M': 6, 'G': 9}[m.group(2)] % float(m.group(1))).replace('.', '')
+
+
+class UlozTo(SimpleHoster):
+ __name__ = "UlozTo"
+ __type__ = "hoster"
+ __version__ = "1.00"
+
+ __pattern__ = r'http://(?:www\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj\.cz|zachowajto\.pl)/(?:live/)?(?P<id>\w+/[^/?]*)'
+
+ __description__ = """Uloz.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ INFO_PATTERN = r'<p>File <strong>(?P<N>[^<]+)</strong> is password protected</p>'
+ NAME_PATTERN = r'<title>(?P<N>[^<]+) \| Uloz\.to</title>'
+ SIZE_PATTERN = r'<span id="fileSize">.*?(?P<S>[\d.,]+\s[kMG]?B)</span>'
+ OFFLINE_PATTERN = r'<title>404 - Page not found</title>|<h1 class="h1">File (has been deleted|was banned)</h1>'
+
+ SIZE_REPLACEMENTS = [('([\d.]+)\s([kMG])B', convertDecimalPrefix)]
+ URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "www.ulozto.net")]
+
+ ADULT_PATTERN = r'<form action="(?P<link>[^\"]*)" method="post" id="frm-askAgeForm">'
+ PASSWD_PATTERN = r'<div class="passwordProtectedFile">'
+ VIPLINK_PATTERN = r'<a href="[^"]*\?disclaimer=1" class="linkVip">'
+ FREE_URL_PATTERN = r'<div class="freeDownloadForm"><form action="([^"]+)"'
+ PREMIUM_URL_PATTERN = r'<div class="downloadForm"><form action="([^"]+)"'
+ TOKEN_PATTERN = r'<input type="hidden" name="_token_" id="[^\"]*" value="(?P<token>.+?)"'
+
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ pyfile.url = re.sub(r"(?<=http://)([^/]+)", "www.ulozto.net", pyfile.url)
+ self.html = self.load(pyfile.url, decode=True, cookies=True)
+
+ if re.search(self.ADULT_PATTERN, self.html):
+ self.logInfo(_("Adult content confirmation needed"))
+
+ m = re.search(self.TOKEN_PATTERN, self.html)
+ if m is None:
+ self.error(_("TOKEN_PATTERN not found"))
+ token = m.group(1)
+
+ self.html = self.load(pyfile.url, get={"do": "askAgeForm-submit"},
+ post={"agree": "Confirm", "_token_": token}, cookies=True)
+
+ passwords = self.getPassword().splitlines()
+ while self.PASSWD_PATTERN in self.html:
+ if passwords:
+ password = passwords.pop(0)
+ self.logInfo(_("Password protected link, trying ") + password)
+ self.html = self.load(pyfile.url, get={"do": "passwordProtectedForm-submit"},
+ post={"password": password, "password_send": 'Send'}, cookies=True)
+ else:
+ self.fail(_("No or incorrect password"))
+
+ if re.search(self.VIPLINK_PATTERN, self.html):
+ self.html = self.load(pyfile.url, get={"disclaimer": "1"})
+
+ self.getFileInfo()
+
+ if self.premium and self.checkTrafficLeft():
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ self.doCheckDownload()
+
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('id="frm-downloadDialog-freeDownloadForm"')
+ if not action or not inputs:
+ self.error(_("Free download form not found"))
+
+ self.logDebug("inputs.keys = " + str(inputs.keys()))
+ # get and decrypt captcha
+ if all(key in inputs for key in ("captcha_value", "captcha_id", "captcha_key")):
+ # Old version - last seen 9.12.2013
+ self.logDebug('Using "old" version')
+
+ captcha_value = self.decryptCaptcha("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id'])
+ self.logDebug("CAPTCHA ID: " + inputs['captcha_id'] + ", CAPTCHA VALUE: " + captcha_value)
+
+ inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value})
+
+ elif all(key in inputs for key in ("captcha_value", "timestamp", "salt", "hash")):
+ # New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013
+ self.logDebug('Using "new" version')
+
+ xapca = self.load("http://www.ulozto.net/reloadXapca.php", get={"rnd": str(int(time.time()))})
+ self.logDebug("xapca = " + str(xapca))
+
+ data = json_loads(xapca)
+ captcha_value = self.decryptCaptcha(str(data['image']))
+ self.logDebug("CAPTCHA HASH: " + data['hash'], "CAPTCHA SALT: " + str(data['salt']), "CAPTCHA VALUE: " + captcha_value)
+
+ inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value})
+ else:
+ self.error(_("CAPTCHA form changed"))
+
+ self.multiDL = True
+ self.download("http://www.ulozto.net" + action, post=inputs, cookies=True, disposition=True)
+
+
+ def handlePremium(self):
+ self.download(self.pyfile.url + "?do=directDownload", disposition=True)
+ #parsed_url = self.findDownloadURL(premium=True)
+ #self.download(parsed_url, post={"download": "Download"})
+
+
+ def findDownloadURL(self, premium=False):
+ msg = _("%s link" % ("Premium" if premium else "Free"))
+ m = re.search(self.PREMIUM_URL_PATTERN if premium else self.FREE_URL_PATTERN, self.html)
+ if m is None:
+ self.error(msg)
+ parsed_url = "http://www.ulozto.net" + m.group(1)
+ self.logDebug("%s: %s" % (msg, parsed_url))
+ return parsed_url
+
+
+ def doCheckDownload(self):
+ check = self.checkDownload({
+ "wrong_captcha": re.compile(r'<ul class="error">\s*<li>Error rewriting the text.</li>'),
+ "offline": re.compile(self.OFFLINE_PATTERN),
+ "passwd": self.PASSWD_PATTERN,
+ "server_error": 'src="http://img.ulozto.cz/error403/vykricnik.jpg"', # paralell dl, server overload etc.
+ "not_found": "<title>UloÅŸ.to</title>"
+ })
+
+ if check == "wrong_captcha":
+ #self.delStorage("captcha_id")
+ #self.delStorage("captcha_text")
+ self.invalidCaptcha()
+ self.retry(reason=_("Wrong captcha code"))
+ elif check == "offline":
+ self.offline()
+ elif check == "passwd":
+ self.fail(_("Wrong password"))
+ elif check == "server_error":
+ self.logError(_("Server error, try downloading later"))
+ self.multiDL = False
+ self.wait(1 * 60 * 60, True)
+ self.retry()
+ elif check == "not_found":
+ self.fail(_("Server error - file not downloadable"))
+
+
+getInfo = create_getInfo(UlozTo)
diff --git a/pyload/plugins/hoster/UloziskoSk.py b/pyload/plugins/hoster/UloziskoSk.py
new file mode 100644
index 000000000..e34f74a3f
--- /dev/null
+++ b/pyload/plugins/hoster/UloziskoSk.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UloziskoSk(SimpleHoster):
+ __name__ = "UloziskoSk"
+ __type__ = "hoster"
+ __version__ = "0.24"
+
+ __pattern__ = r'http://(?:www\.)?ulozisko\.sk/.*'
+
+ __description__ = """Ulozisko.sk hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<div class="down1">(?P<N>[^<]+)</div>'
+ SIZE_PATTERN = ur'Veğkosť súboru: <strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong><br />'
+ OFFLINE_PATTERN = ur'<span class = "red">ZadanÜ súbor neexistuje z jedného z nasledujúcich dÎvodov:</span>'
+
+ LINK_PATTERN = r'<form name = "formular" action = "([^"]+)" method = "post">'
+ ID_PATTERN = r'<input type = "hidden" name = "id" value = "([^"]+)" />'
+ CAPTCHA_PATTERN = r'<img src="(/obrazky/obrazky\.php\?fid=[^"]+)" alt="" />'
+ IMG_PATTERN = ur'<strong>PRE ZVÄČŠENIE KLIKNITE NA OBRÁZOK</strong><br /><a href = "([^"]+)">'
+
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ m = re.search(self.IMG_PATTERN, self.html)
+ if m:
+ url = "http://ulozisko.sk" + m.group(1)
+ self.download(url)
+ else:
+ self.handleFree()
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ parsed_url = 'http://www.ulozisko.sk' + m.group(1)
+
+ m = re.search(self.ID_PATTERN, self.html)
+ if m is None:
+ self.error(_("ID_PATTERN not found"))
+ id = m.group(1)
+
+ self.logDebug("URL:" + parsed_url + ' ID:' + id)
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.error(_("CAPTCHA_PATTERN not found"))
+ captcha_url = 'http://www.ulozisko.sk' + m.group(1)
+
+ captcha = self.decryptCaptcha(captcha_url, cookies=True)
+
+ self.logDebug("CAPTCHA_URL:" + captcha_url + ' CAPTCHA:' + captcha)
+
+ self.download(parsed_url, post={
+ "antispam": captcha,
+ "id": id,
+ "name": self.pyfile.name,
+ "but": "++++STIAHNI+S%DABOR++++"
+ })
+
+
+getInfo = create_getInfo(UloziskoSk)
diff --git a/pyload/plugins/hoster/UnibytesCom.py b/pyload/plugins/hoster/UnibytesCom.py
new file mode 100644
index 000000000..6ed3805b1
--- /dev/null
+++ b/pyload/plugins/hoster/UnibytesCom.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UnibytesCom(SimpleHoster):
+ __name__ = "UnibytesCom"
+ __type__ = "hoster"
+ __version__ = "0.11"
+
+ __pattern__ = r'https?://(?:www\.)?unibytes\.com/[\w .-]{11}B'
+
+ __description__ = """UniBytes.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ HOSTER_DOMAIN = "unibytes.com"
+
+ INFO_PATTERN = r'<span[^>]*?id="fileName"[^>]*>(?P<N>[^>]+)</span>\s*\((?P<S>\d.*?)\)'
+
+ WAIT_PATTERN = r'Wait for <span id="slowRest">(\d+)</span> sec'
+ LINK_PATTERN = r'<a href="([^"]+)">Download</a>'
+
+
+ def handleFree(self):
+ domain = "http://www.%s/" % self.HOSTER_DOMAIN
+ action, post_data = self.parseHtmlForm('id="startForm"')
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ for _i in xrange(8):
+ self.logDebug(action, post_data)
+ self.html = self.load(urljoin(domain, action), post=post_data)
+
+ m = re.search(r'location:\s*(\S+)', self.req.http.header, re.I)
+ if m:
+ url = m.group(1)
+ break
+
+ if '>Somebody else is already downloading using your IP-address<' in self.html:
+ self.wait(10 * 60, True)
+ self.retry()
+
+ if post_data['step'] == 'last':
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+
+ last_step = post_data['step']
+ action, post_data = self.parseHtmlForm('id="stepForm"')
+
+ if last_step == 'timer':
+ m = re.search(self.WAIT_PATTERN, self.html)
+ self.wait(int(m.group(1)) if m else 60, False)
+ elif last_step in ("captcha", "last"):
+ post_data['captcha'] = self.decryptCaptcha(urljoin(domain, "/captcha.jpg"))
+ else:
+ self.fail(_("No valid captcha code entered"))
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.download(url)
+
+
+getInfo = create_getInfo(UnibytesCom)
diff --git a/pyload/plugins/hoster/UnrestrictLi.py b/pyload/plugins/hoster/UnrestrictLi.py
new file mode 100644
index 000000000..583a9f4a9
--- /dev/null
+++ b/pyload/plugins/hoster/UnrestrictLi.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from datetime import datetime, timedelta
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+
+
+def secondsToMidnight(gmt=0):
+ now = datetime.utcnow() + timedelta(hours=gmt)
+ if now.hour is 0 and now.minute < 10:
+ midnight = now
+ else:
+ midnight = now + timedelta(days=1)
+ midnight = midnight.replace(hour=0, minute=10, second=0, microsecond=0)
+ return int((midnight - now).total_seconds())
+
+
+class UnrestrictLi(Hoster):
+ __name__ = "UnrestrictLi"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?(unrestrict|unr)\.li'
+
+ __description__ = """Unrestrict.li hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def setup(self):
+ self.chunkLimit = 16
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ elif not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Unrestrict.li")
+ self.fail(_("No Unrestrict.li account provided"))
+ else:
+ self.logDebug("Old URL: %s" % pyfile.url)
+ for _i in xrange(5):
+ page = self.load('https://unrestrict.li/unrestrict.php',
+ post={'link': pyfile.url, 'domain': 'long'})
+ self.logDebug("JSON data: " + page)
+ if page != '':
+ break
+ else:
+ self.logInfo(_("Unable to get API data, waiting 1 minute and retry"))
+ self.retry(5, 60, "Unable to get API data")
+
+ if 'Expired session' in page or ("You are not allowed to "
+ "download from this host" in page and self.premium):
+ self.account.relogin(self.user)
+ self.retry()
+ elif "File offline" in page:
+ self.offline()
+ elif "You are not allowed to download from this host" in page:
+ self.fail(_("You are not allowed to download from this host"))
+ elif "You have reached your daily limit for this host" in page:
+ self.logWarning(_("Reached daily limit for this host"))
+ self.retry(5, secondsToMidnight(gmt=2), "Daily limit for this host reached")
+ elif "ERROR_HOSTER_TEMPORARILY_UNAVAILABLE" in page:
+ self.logInfo(_("Hoster temporarily unavailable, waiting 1 minute and retry"))
+ self.retry(5, 60, "Hoster is temporarily unavailable")
+ page = json_loads(page)
+ new_url = page.keys()[0]
+ self.api_data = page[new_url]
+
+ if new_url != pyfile.url:
+ self.logDebug("New URL: " + new_url)
+
+ if hasattr(self, 'api_data'):
+ self.setNameSize()
+
+ self.download(new_url, disposition=True)
+
+ if self.getConfig("history"):
+ self.load("https://unrestrict.li/history/&delete=all")
+ self.logInfo(_("Download history deleted"))
+
+
+ def setNameSize(self):
+ if 'name' in self.api_data:
+ self.pyfile.name = self.api_data['name']
+ if 'size' in self.api_data:
+ self.pyfile.size = self.api_data['size']
diff --git a/pyload/plugins/hoster/UpleaCom.py b/pyload/plugins/hoster/UpleaCom.py
new file mode 100644
index 000000000..7a7dd4870
--- /dev/null
+++ b/pyload/plugins/hoster/UpleaCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urljoin
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class UpleaCom(XFSHoster):
+ __name__ = "UpleaCom"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
+
+ __description__ = """Uplea.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Redleon", None)]
+
+
+ HOSTER_DOMAIN = "uplea.com"
+
+ NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
+ SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>'
+
+ OFFLINE_PATTERN = r'>You followed an invalid or expired link'
+
+ LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"'
+ WAIT_PATTERN = r'timeText:([\d.]+),'
+ VARS_PATTERN = r'class="cel_tbl_step1_foot">\s<a href="(/step/.+)">'
+
+
+ def setup(self):
+ self.multiDL = False
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+
+ def handleFree(self):
+ m = re.search(self.VARS_PATTERN, self.html)
+ if m is None:
+ self.error("VARS_PATTERN not found")
+
+ self.html = self.load(urljoin("http://uplea.com/", m.groups(1)))
+
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ self.wait(m.group(1), True)
+ self.retry()
+
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error("LINK_PATTERN not found")
+
+ self.wait(15)
+ self.download(m.group(1), disposition=True)
+
+
+getInfo = create_getInfo(UpleaCom)
diff --git a/pyload/plugins/hoster/UploadStationCom.py b/pyload/plugins/hoster/UploadStationCom.py
new file mode 100644
index 000000000..ad6779364
--- /dev/null
+++ b/pyload/plugins/hoster/UploadStationCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class UploadStationCom(DeadHoster):
+ __name__ = "UploadStationCom"
+ __type__ = "hoster"
+ __version__ = "0.52"
+
+ __pattern__ = r'http://(?:www\.)?uploadstation\.com/file/(?P<id>\w+)'
+
+ __description__ = """UploadStation.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(UploadStationCom)
diff --git a/pyload/plugins/hoster/UploadboxCom.py b/pyload/plugins/hoster/UploadboxCom.py
new file mode 100644
index 000000000..89a525c67
--- /dev/null
+++ b/pyload/plugins/hoster/UploadboxCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class UploadboxCom(DeadHoster):
+ __name__ = "Uploadbox"
+ __type__ = "hoster"
+ __version__ = "0.05"
+
+ __pattern__ = r'http://(?:www\.)?uploadbox\.com/files/.+'
+
+ __description__ = """UploadBox.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(UploadboxCom)
diff --git a/pyload/plugins/hoster/UploadedTo.py b/pyload/plugins/hoster/UploadedTo.py
new file mode 100644
index 000000000..c39df66ea
--- /dev/null
+++ b/pyload/plugins/hoster/UploadedTo.py
@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://ul.to/044yug9o
+# http://ul.to/gzfhd0xs
+
+import re
+
+from time import sleep
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.Plugin import chunks
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.utils import html_unescape, parseFileSize
+
+
+key = "bGhGMkllZXByd2VEZnU5Y2NXbHhYVlZ5cEE1bkEzRUw=".decode('base64')
+
+
+def getID(url):
+ """ returns id from file url"""
+ m = re.match(UploadedTo.__pattern__, url)
+ return m.group('ID')
+
+
+def getAPIData(urls):
+ post = {"apikey": key}
+
+ idMap = {}
+
+ for i, url in enumerate(urls):
+ id = getID(url)
+ post['id_%s' % i] = id
+ idMap[id] = url
+
+ for _i in xrange(5):
+ api = unicode(getURL("http://uploaded.net/api/filemultiple", post=post, decode=False), 'iso-8859-1')
+ if api != "can't find request":
+ break
+ else:
+ sleep(3)
+
+ result = {}
+
+ if api:
+ for line in api.splitlines():
+ data = line.split(",", 4)
+ if data[1] in idMap:
+ result[data[1]] = (data[0], data[2], data[4], data[3], idMap[data[1]])
+
+ return result
+
+
+def parseFileInfo(self, url='', html=''):
+ if not html and hasattr(self, "html"):
+ html = self.html
+
+ name = url
+ size = 0
+ fileid = None
+
+ if re.search(self.OFFLINE_PATTERN, html):
+ # File offline
+ status = 1
+ else:
+ m = re.search(self.INFO_PATTERN, html)
+ if m:
+ name, fileid = html_unescape(m.group('N')), m.group('ID')
+ size = parseFileSize(m.group('S'))
+ status = 2
+ else:
+ status = 3
+
+ return name, size, status, fileid
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 80):
+ result = []
+
+ api = getAPIData(chunk)
+
+ for data in api.itervalues():
+ if data[0] == "online":
+ result.append((html_unescape(data[2]), data[1], 2, data[4]))
+
+ elif data[0] == "offline":
+ result.append((data[4], 0, 1, data[4]))
+
+ yield result
+
+
+class UploadedTo(Hoster):
+ __name__ = "UploadedTo"
+ __type__ = "hoster"
+ __version__ = "0.75"
+
+ __pattern__ = r'https?://(?:www\.)?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)'
+
+ __description__ = """Uploaded.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("netpok", "netpok@gmail.com"),
+ ("stickell", "l.stickell@yahoo.it")]
+
+
+ INFO_PATTERN = r'<a href="file/(?P<ID>\w+)" id="filename">(?P<N>[^<]+)</a> &nbsp;\s*<small[^>]*>(?P<S>[^<]+)</small>'
+ OFFLINE_PATTERN = r'<small class="cL">Error: 404</small>'
+ DL_LIMIT_PATTERN = r'You have reached the max. number of possible free downloads for this hour'
+
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+ self.chunkLimit = 1 # critical problems with more chunks
+
+ self.fileID = getID(self.pyfile.url)
+ self.pyfile.url = "http://uploaded.net/file/%s" % self.fileID
+
+
+ def process(self, pyfile):
+ self.load("http://uploaded.net/language/en", just_header=True)
+
+ api = getAPIData([pyfile.url])
+
+ # TODO: fallback to parse from site, because api sometimes delivers wrong status codes
+
+ if not api:
+ self.logWarning(_("No response for API call"))
+
+ self.html = unicode(self.load(pyfile.url, decode=False), 'iso-8859-1')
+ name, size, status, self.fileID = parseFileInfo(self)
+ self.logDebug(name, size, status, self.fileID)
+ if status == 1:
+ self.offline()
+ elif status == 2:
+ pyfile.name, pyfile.size = name, size
+ else:
+ self.error(_("file info"))
+
+ elif api == 'Access denied':
+ self.fail(_("API key invalid"))
+
+ else:
+ if self.fileID not in api:
+ self.offline()
+
+ self.data = api[self.fileID]
+ if self.data[0] != "online":
+ self.offline()
+
+ pyfile.name = html_unescape(self.data[2])
+
+ # pyfile.name = self.get_file_name()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+
+ def handlePremium(self):
+ info = self.account.getAccountInfo(self.user, True)
+ self.logDebug("%(name)s: Use Premium Account (%(left)sGB left)" % {"name": self.__name__,
+ "left": info['trafficleft'] / 1024 / 1024})
+ if int(self.data[1]) / 1024 > info['trafficleft']:
+ self.logInfo(_("Not enough traffic left"))
+ self.account.empty(self.user)
+ self.resetAccount()
+ self.fail(_("Traffic exceeded"))
+
+ header = self.load("http://uploaded.net/file/%s" % self.fileID, just_header=True)
+ if 'location' in header:
+ #Direct download
+ self.logDebug("Direct download link detected")
+ self.download(header['location'])
+ else:
+ #Indirect download
+ self.html = self.load("http://uploaded.net/file/%s" % self.fileID)
+ m = re.search(r'<div class="tfree".*\s*<form method="post" action="(.*?)"', self.html)
+ if m is None:
+ self.fail(_("Download URL not m. Try to enable direct downloads"))
+ url = m.group(1)
+ self.download(url, post={})
+
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ if 'var free_enabled = false;' in self.html:
+ self.logError(_("Free-download capacities exhausted"))
+ self.retry(24, 5 * 60)
+
+ m = re.search(r"Current waiting period: <span>(\d+)</span> seconds", self.html)
+ if m is None:
+ self.fail(_("File not downloadable for free users"))
+ self.setWait(int(m.group(1)))
+
+ self.html = self.load("http://uploaded.net/js/download.js", decode=True)
+
+ url = "http://uploaded.net/io/ticket/captcha/%s" % self.fileID
+ downloadURL = ""
+
+ recaptcha = ReCaptcha(self)
+
+ for _i in xrange(5):
+ challenge, result = recaptcha.challenge()
+ options = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": result}
+ self.wait()
+
+ result = self.load(url, post=options)
+ self.logDebug("Result: %s" % result)
+
+ if "limit-size" in result:
+ self.fail(_("File too big for free download"))
+ elif "limit-slot" in result: # Temporary restriction so just wait a bit
+ self.setWait(30 * 60, True)
+ self.wait()
+ self.retry()
+ elif "limit-parallel" in result:
+ self.fail(_("Cannot download in parallel"))
+ elif "limit-dl" in result or self.DL_LIMIT_PATTERN in result: # limit-dl
+ self.setWait(3 * 60 * 60, True)
+ self.wait()
+ self.retry()
+ elif '"err":"captcha"' in result:
+ self.invalidCaptcha()
+ elif "type:'download'" in result:
+ self.correctCaptcha()
+ downloadURL = re.search("url:'([^']+)", result).group(1)
+ break
+ else:
+ self.error(_("Unknown error: %s") % result)
+
+ if not downloadURL:
+ self.fail(_("No Download url retrieved/all captcha attempts failed"))
+
+ self.download(downloadURL, disposition=True)
+ check = self.checkDownload({"limit-dl": self.DL_LIMIT_PATTERN})
+ if check == "limit-dl":
+ self.setWait(3 * 60 * 60, True)
+ self.wait()
+ self.retry()
diff --git a/pyload/plugins/hoster/UploadhereCom.py b/pyload/plugins/hoster/UploadhereCom.py
new file mode 100644
index 000000000..ba02ea07c
--- /dev/null
+++ b/pyload/plugins/hoster/UploadhereCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class UploadhereCom(DeadHoster):
+ __name__ = "UploadhereCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?uploadhere\.com/\w{10}'
+
+ __description__ = """Uploadhere.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(UploadhereCom)
diff --git a/pyload/plugins/hoster/UploadheroCom.py b/pyload/plugins/hoster/UploadheroCom.py
new file mode 100644
index 000000000..97100b17d
--- /dev/null
+++ b/pyload/plugins/hoster/UploadheroCom.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# http://uploadhero.co/dl/wQBRAVSM
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UploadheroCom(SimpleHoster):
+ __name__ = "UploadheroCom"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'http://(?:www\.)?uploadhero\.com?/dl/\w+'
+
+ __description__ = """UploadHero.co plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mcmyst", "mcmyst@hotmail.fr"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'<div class="nom_de_fichier">(?P<N>.*?)</div>'
+ SIZE_PATTERN = r'Taille du fichier : </span><strong>(?P<S>.*?)</strong>'
+ OFFLINE_PATTERN = r'<p class="titre_dl_2">|<div class="raison"><strong>Le lien du fichier ci-dessus n\'existe plus.'
+
+ COOKIES = [("uploadhero.co", "lang", "en")]
+
+ IP_BLOCKED_PATTERN = r'href="(/lightbox_block_download\.php\?min=.*?)"'
+ IP_WAIT_PATTERN = r'<span id="minutes">(\d+)</span>.*\s*<span id="seconds">(\d+)</span>'
+
+ CAPTCHA_PATTERN = r'"(/captchadl\.php\?\w+)"'
+ FREE_URL_PATTERN = r'var magicomfg = \'<a href="(http://[^<>"]*?)"|"(http://storage\d+\.uploadhero\.co/\?d=\w+/[^<>"/]+)"'
+ PREMIUM_URL_PATTERN = r'<a href="([^"]+)" id="downloadnow"'
+
+
+ def handleFree(self):
+ self.checkErrors()
+
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m is None:
+ self.error(_("CAPTCHA_PATTERN not found"))
+ captcha_url = "http://uploadhero.co" + m.group(1)
+
+ for _i in xrange(5):
+ captcha = self.decryptCaptcha(captcha_url)
+ self.html = self.load(self.pyfile.url, get={"code": captcha})
+ m = re.search(self.FREE_URL_PATTERN, self.html)
+ if m:
+ self.correctCaptcha()
+ download_url = m.group(1) or m.group(2)
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail(_("No valid captcha code entered"))
+
+ self.download(download_url)
+
+
+ def handlePremium(self):
+ self.logDebug("%s: Use Premium Account" % self.__name__)
+ link = re.search(self.PREMIUM_URL_PATTERN, self.html).group(1)
+ self.download(link)
+
+
+ def checkErrors(self):
+ m = re.search(self.IP_BLOCKED_PATTERN, self.html)
+ if m:
+ self.html = self.load("http://uploadhero.co%s" % m.group(1))
+
+ m = re.search(self.IP_WAIT_PATTERN, self.html)
+ wait_time = (int(m.group(1)) * 60 + int(m.group(2))) if m else 5 * 60
+ self.wait(wait_time, True)
+ self.retry()
+
+
+getInfo = create_getInfo(UploadheroCom)
diff --git a/pyload/plugins/hoster/UploadingCom.py b/pyload/plugins/hoster/UploadingCom.py
new file mode 100644
index 000000000..3c0bc7ff9
--- /dev/null
+++ b/pyload/plugins/hoster/UploadingCom.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+
+
+class UploadingCom(SimpleHoster):
+ __name__ = "UploadingCom"
+ __type__ = "hoster"
+ __version__ = "0.38"
+
+ __pattern__ = r'http://(?:www\.)?uploading\.com/files/(?:get/)?(?P<ID>\w+)'
+
+ __description__ = """Uploading.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("mkaay", "mkaay@mkaay.de"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'id="file_title">(?P<N>.+)</'
+ SIZE_PATTERN = r'size tip_container">(?P<S>[\d.,]+) (?P<U>[\w^_]+)<'
+ OFFLINE_PATTERN = r'(Page|file) not found'
+
+ COOKIES = [("uploading.com", "lang", "1"),
+ (".uploading.com", "language", "1"),
+ (".uploading.com", "setlang", "en"),
+ (".uploading.com", "_lang", "en")]
+
+
+ def process(self, pyfile):
+ if not "/get/" in pyfile.url:
+ pyfile.url = pyfile.url.replace("/files", "/files/get")
+
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+
+ def handlePremium(self):
+ postData = {'action': 'get_link',
+ 'code': self.info['ID'],
+ 'pass': 'undefined'}
+
+ self.html = self.load('http://uploading.com/files/get/?JsHttpRequest=%d-xml' % timestamp(), post=postData)
+ url = re.search(r'"link"\s*:\s*"(.*?)"', self.html)
+ if url:
+ url = url.group(1).replace("\\/", "/")
+ self.download(url)
+
+ raise Exception("Plugin defect")
+
+
+ def handleFree(self):
+ m = re.search('<h2>((Daily )?Download Limit)</h2>', self.html)
+ if m:
+ self.pyfile.error = m.group(1)
+ self.logWarning(self.pyfile.error)
+ self.retry(6, (6 * 60 if m.group(2) else 15) * 60, self.pyfile.error)
+
+ ajax_url = "http://uploading.com/files/get/?ajax"
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.req.http.lastURL = self.pyfile.url
+
+ res = json_loads(self.load(ajax_url, post={'action': 'second_page', 'code': self.info['ID']}))
+
+ if 'answer' in res and 'wait_time' in res['answer']:
+ wait_time = int(res['answer']['wait_time'])
+ self.logInfo(_("Waiting %d seconds") % wait_time)
+ self.wait(wait_time)
+ else:
+ self.error(_("No AJAX/WAIT"))
+
+ res = json_loads(self.load(ajax_url, post={'action': 'get_link', 'code': self.info['ID'], 'pass': 'false'}))
+
+ if 'answer' in res and 'link' in res['answer']:
+ url = res['answer']['link']
+ else:
+ self.error(_("No AJAX/URL"))
+
+ self.html = self.load(url)
+ m = re.search(r'<form id="file_form" action="(.*?)"', self.html)
+ if m:
+ url = m.group(1)
+ else:
+ self.error(_("No URL"))
+
+ self.download(url)
+
+ check = self.checkDownload({"html": re.compile("\A<!DOCTYPE html PUBLIC")})
+ if check == "html":
+ self.logWarning(_("Redirected to a HTML page, wait 10 minutes and retry"))
+ self.wait(10 * 60, True)
+
+
+getInfo = create_getInfo(UploadingCom)
diff --git a/pyload/plugins/hoster/UploadkingCom.py b/pyload/plugins/hoster/UploadkingCom.py
new file mode 100644
index 000000000..e9c536819
--- /dev/null
+++ b/pyload/plugins/hoster/UploadkingCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class UploadkingCom(DeadHoster):
+ __name__ = "UploadkingCom"
+ __type__ = "hoster"
+ __version__ = "0.14"
+
+ __pattern__ = r'http://(?:www\.)?uploadking\.com/\w{10}'
+
+ __description__ = """UploadKing.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+getInfo = create_getInfo(UploadkingCom)
diff --git a/pyload/plugins/hoster/UpstoreNet.py b/pyload/plugins/hoster/UpstoreNet.py
new file mode 100644
index 000000000..255526aa2
--- /dev/null
+++ b/pyload/plugins/hoster/UpstoreNet.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.CaptchaService import ReCaptcha
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class UpstoreNet(SimpleHoster):
+ __name__ = "UpstoreNet"
+ __type__ = "hoster"
+ __version__ = "0.03"
+
+ __pattern__ = r'https?://(?:www\.)?upstore\.net/'
+
+ __description__ = """Upstore.Net File Download Hoster"""
+ __license__ = "GPLv3"
+ __authors__ = [("igel", "igelkun@myopera.com")]
+
+
+ INFO_PATTERN = r'<div class="comment">.*?</div>\s*\n<h2 style="margin:0">(?P<N>.*?)</h2>\s*\n<div class="comment">\s*\n\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+ OFFLINE_PATTERN = r'<span class="error">File not found</span>'
+
+ WAIT_PATTERN = r'var sec = (\d+)'
+ CHASH_PATTERN = r'<input type="hidden" name="hash" value="([^"]*)">'
+ LINK_PATTERN = r'<a href="(https?://.*?)" target="_blank"><b>'
+
+
+ def handleFree(self):
+ # STAGE 1: get link to continue
+ m = re.search(self.CHASH_PATTERN, self.html)
+ if m is None:
+ self.error(_("CHASH_PATTERN not found"))
+ chash = m.group(1)
+ self.logDebug("Read hash " + chash)
+ # continue to stage2
+ post_data = {'hash': chash, 'free': 'Slow download'}
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+
+ # STAGE 2: solv captcha and wait
+ # first get the infos we need: recaptcha key and wait time
+ recaptcha = ReCaptcha(self)
+
+ # try the captcha 5 times
+ for i in xrange(5):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m is None:
+ self.error(_("Wait pattern not found"))
+ wait_time = m.group(1)
+
+ # then, do the waiting
+ self.wait(wait_time)
+
+ # then, handle the captcha
+ challenge, code = recaptcha.challenge()
+ post_data['recaptcha_challenge_field'] = challenge
+ post_data['recaptcha_response_field'] = code
+
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+
+ # STAGE 3: get direct link
+ m = re.search(self.LINK_PATTERN, self.html, re.S)
+ if m:
+ break
+
+ if m is None:
+ self.error(_("Download link not found"))
+
+ direct = m.group(1)
+ self.download(direct, disposition=True)
+
+
+getInfo = create_getInfo(UpstoreNet)
diff --git a/pyload/plugins/hoster/UptoboxCom.py b/pyload/plugins/hoster/UptoboxCom.py
new file mode 100644
index 000000000..ad14307c2
--- /dev/null
+++ b/pyload/plugins/hoster/UptoboxCom.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class UptoboxCom(XFSHoster):
+ __name__ = "UptoboxCom"
+ __type__ = "hoster"
+ __version__ = "0.16"
+
+ __pattern__ = r'https?://(?:www\.)?uptobox\.com/\w{12}'
+
+ __description__ = """Uptobox.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = "uptobox.com"
+
+ INFO_PATTERN = r'"para_title">(?P<N>.+) \((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)'
+ OFFLINE_PATTERN = r'>(File not found|Access Denied|404 Not Found)'
+
+ LINK_PATTERN = r'"(https?://\w+\.uptobox\.com/d/.*?)"'
+
+ ERROR_PATTERN = r'>(You have to wait.+till next download.)<' #@TODO: Check XFSHoster ERROR_PATTERN
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+
+getInfo = create_getInfo(UptoboxCom)
diff --git a/pyload/plugins/hoster/VeehdCom.py b/pyload/plugins/hoster/VeehdCom.py
new file mode 100644
index 000000000..326d0a18e
--- /dev/null
+++ b/pyload/plugins/hoster/VeehdCom.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class VeehdCom(Hoster):
+ __name__ = "VeehdCom"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://veehd\.com/video/\d+_\S+'
+ __config__ = [("filename_spaces", "bool", "Allow spaces in filename", False),
+ ("replacement_char", "str", "Filename replacement character", "_")]
+
+ __description__ = """Veehd.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("cat", "cat@pyload")]
+
+
+ def setup(self):
+ self.multiDL = True
+ self.req.canContinue = True
+
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.logDebug("Requesting page: %s" % url)
+ self.html = self.load(url)
+
+
+ def file_exists(self):
+ if not self.html:
+ self.download_html()
+
+ if '<title>Veehd</title>' in self.html:
+ return False
+ return True
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ m = re.search(r'<title[^>]*>([^<]+) on Veehd</title>', self.html)
+ if m is None:
+ self.error(_("Video title not found"))
+
+ name = m.group(1)
+
+ # replace unwanted characters in filename
+ if self.getConfig('filename_spaces'):
+ pattern = '[^\w ]+'
+ else:
+ pattern = '[^\w.]+'
+
+ return re.sub(pattern, self.getConfig('replacement_char'), name) + '.avi'
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ m = re.search(r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/[^"]+)"',
+ self.html)
+ if m is None:
+ self.error(_("Embedded video url not found"))
+
+ return m.group(1)
diff --git a/pyload/plugins/hoster/VeohCom.py b/pyload/plugins/hoster/VeohCom.py
new file mode 100644
index 000000000..0f756211c
--- /dev/null
+++ b/pyload/plugins/hoster/VeohCom.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class VeohCom(SimpleHoster):
+ __name__ = "VeohCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?veoh\.com/(tv/)?(watch|videos)/(?P<ID>v\w+)'
+ __config__ = [("quality", "Low;High;Auto", "Quality", "Auto")]
+
+ __description__ = """Veoh.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<meta name="title" content="(?P<N>.*?)"'
+ OFFLINE_PATTERN = r'>Sorry, we couldn\'t find the video you were looking for'
+
+ URL_REPLACEMENTS = [(__pattern__, r'http://www.veoh.com/watch/\g<ID>')]
+
+ COOKIES = [("veoh.com", "lassieLocale", "en")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = -1
+
+
+ def handleFree(self):
+ quality = self.getConfig("quality")
+ if quality == "Auto":
+ quality = ("High", "Low")
+ for q in quality:
+ pattern = r'"fullPreviewHash%sPath":"(.+?)"' % q
+ m = re.search(pattern, self.html)
+ if m:
+ self.pyfile.name += ".mp4"
+ link = m.group(1).replace("\\", "")
+ self.download(link)
+ return
+ else:
+ self.logInfo(_("No %s quality video found") % q.upper())
+ else:
+ self.fail(_("No video found!"))
+
+
+getInfo = create_getInfo(VeohCom)
diff --git a/pyload/plugins/hoster/VidPlayNet.py b/pyload/plugins/hoster/VidPlayNet.py
new file mode 100644
index 000000000..3a5198973
--- /dev/null
+++ b/pyload/plugins/hoster/VidPlayNet.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# Test links:
+# BigBuckBunny_320x180.mp4 - 61.7 Mb - http://vidplay.net/38lkev0h3jv0
+
+from pyload.plugins.internal.XFSHoster import XFSHoster, create_getInfo
+
+
+class VidPlayNet(XFSHoster):
+ __name__ = "VidPlayNet"
+ __type__ = "hoster"
+ __version__ = "0.04"
+
+ __pattern__ = r'https?://(?:www\.)?vidplay\.net/\w{12}'
+
+ __description__ = """VidPlay.net hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("t4skforce", "t4skforce1337[AT]gmail[DOT]com")]
+
+
+ HOSTER_DOMAIN = "vidplay.net"
+
+ NAME_PATTERN = r'<b>Password:</b></div>\s*<h[1-6]>(?P<N>[^<]+)</h[1-6]>'
+
+
+getInfo = create_getInfo(VidPlayNet)
diff --git a/pyload/plugins/hoster/VimeoCom.py b/pyload/plugins/hoster/VimeoCom.py
new file mode 100644
index 000000000..5bd1b9afb
--- /dev/null
+++ b/pyload/plugins/hoster/VimeoCom.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class VimeoCom(SimpleHoster):
+ __name__ = "VimeoCom"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'https?://(?:www\.)?(player\.)?vimeo\.com/(video/)?(?P<ID>\d+)'
+ __config__ = [("quality", "Lowest;Mobile;SD;HD;Highest", "Quality", "Highest"),
+ ("original", "bool", "Try to download the original file first", True)]
+
+ __description__ = """Vimeo.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'<title>(?P<N>.+) on Vimeo<'
+ OFFLINE_PATTERN = r'class="exception_header"'
+ TEMP_OFFLINE_PATTERN = r'Please try again in a few minutes.<'
+
+ URL_REPLACEMENTS = [(__pattern__, r'https://www.vimeo.com/\g<ID>')]
+
+ COOKIES = [("vimeo.com", "language", "en")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = -1
+
+
+ def handleFree(self):
+ password = self.getPassword()
+
+ if self.js and 'class="btn iconify_down_b"' in self.html:
+ html = self.js.eval(self.load(self.pyfile.url, get={'action': "download", 'password': password}, decode=True))
+ pattern = r'href="(?P<URL>http://vimeo\.com.+?)".*?\>(?P<QL>.+?) '
+ else:
+ id = re.match(self.__pattern__, self.pyfile.url).group("ID")
+ html = self.load("https://player.vimeo.com/video/" + id, get={'password': password})
+ pattern = r'"(?P<QL>\w+)":{"profile".*?"(?P<URL>http://pdl\.vimeocdn\.com.+?)"'
+
+ link = dict([(l.group('QL').lower(), l.group('URL')) for l in re.finditer(pattern, html)])
+
+ if self.getConfig("original"):
+ if "original" in link:
+ self.download(link[q])
+ return
+ else:
+ self.logInfo(_("Original file not downloadable"))
+
+ quality = self.getConfig("quality")
+ if quality == "Highest":
+ qlevel = ("hd", "sd", "mobile")
+ elif quality == "Lowest":
+ qlevel = ("mobile", "sd", "hd")
+ else:
+ qlevel = quality.lower()
+
+ for q in qlevel:
+ if q in link:
+ self.download(link[q])
+ return
+ else:
+ self.logInfo(_("No %s quality video found") % q.upper())
+ else:
+ self.fail(_("No video found!"))
+
+
+getInfo = create_getInfo(VimeoCom)
diff --git a/pyload/plugins/hoster/Vipleech4uCom.py b/pyload/plugins/hoster/Vipleech4uCom.py
new file mode 100644
index 000000000..c5a444433
--- /dev/null
+++ b/pyload/plugins/hoster/Vipleech4uCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class Vipleech4uCom(DeadHoster):
+ __name__ = "Vipleech4uCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?vipleech4u\.com/manager\.php'
+
+ __description__ = """Vipleech4u.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Kagenoshin", "kagenoshin@gmx.ch")]
+
+
+getInfo = create_getInfo(Vipleech4uCom)
diff --git a/pyload/plugins/hoster/WarserverCz.py b/pyload/plugins/hoster/WarserverCz.py
new file mode 100644
index 000000000..cb6efeba6
--- /dev/null
+++ b/pyload/plugins/hoster/WarserverCz.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class WarserverCz(DeadHoster):
+ __name__ = "WarserverCz"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'http://(?:www\.)?warserver\.cz/stahnout/\d+'
+
+ __description__ = """Warserver.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+getInfo = create_getInfo(WarserverCz)
diff --git a/pyload/plugins/hoster/WebshareCz.py b/pyload/plugins/hoster/WebshareCz.py
new file mode 100644
index 000000000..bb41fbd26
--- /dev/null
+++ b/pyload/plugins/hoster/WebshareCz.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.SimpleHoster import SimpleHoster
+
+
+def getInfo(urls):
+ for url in urls:
+ fid = re.search(WebshareCz.__pattern__, url).group('ID')
+ api_data = getURL("https://webshare.cz/api/file_info/", post={'ident': fid})
+
+ if 'File not found' in api_data:
+ file_info = (url, 0, 1, url)
+ else:
+ name = re.search('<name>(.+)</name>', api_data).group(1)
+ size = re.search('<size>(.+)</size>', api_data).group(1)
+ file_info = (name, size, 2, url)
+
+ yield file_info
+
+
+class WebshareCz(SimpleHoster):
+ __name__ = "WebshareCz"
+ __type__ = "hoster"
+ __version__ = "0.14"
+
+ __pattern__ = r'https?://(?:www\.)?webshare\.cz/(?:#/)?file/(?P<ID>\w+)'
+
+ __description__ = """WebShare.cz hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def handleFree(self):
+ api_data = self.load('https://webshare.cz/api/file_link/', post={'ident': self.fid})
+ self.logDebug("API data: " + api_data)
+ m = re.search('<link>(.+)</link>', api_data)
+ if m is None:
+ self.error(_("Unable to detect direct link"))
+ direct = m.group(1)
+ self.logDebug("Direct link: " + direct)
+ self.download(direct, disposition=True)
+
+
+ def getFileInfo(self):
+ self.logDebug("URL: %s" % self.pyfile.url)
+
+ self.fid = re.match(self.__pattern__, self.pyfile.url).group('ID')
+
+ self.load(self.pyfile.url)
+ api_data = self.load('https://webshare.cz/api/file_info/', post={'ident': self.fid})
+
+ if 'File not found' in api_data:
+ self.offline()
+ else:
+ self.pyfile.name = re.search('<name>(.+)</name>', api_data).group(1)
+ self.pyfile.size = re.search('<size>(.+)</size>', api_data).group(1)
+
+ self.logDebug("FILE NAME: %s FILE SIZE: %s" % (self.pyfile.name, self.pyfile.size))
diff --git a/pyload/plugins/hoster/WrzucTo.py b/pyload/plugins/hoster/WrzucTo.py
new file mode 100644
index 000000000..dbd09f138
--- /dev/null
+++ b/pyload/plugins/hoster/WrzucTo.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pycurl import HTTPHEADER
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class WrzucTo(SimpleHoster):
+ __name__ = "WrzucTo"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'http://(?:www\.)?wrzuc\.to/(\w+(\.wt|\.html)|(\w+/?linki/\w+))'
+
+ __description__ = """Wrzuc.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'id="file_info">\s*<strong>(?P<N>.*?)</strong>'
+ SIZE_PATTERN = r'class="info">\s*<tr>\s*<td>(?P<S>.*?)</td>'
+
+ COOKIES = [("wrzuc.to", "language", "en")]
+
+
+ def setup(self):
+ self.multiDL = True
+
+
+ def handleFree(self):
+ data = dict(re.findall(r'(md5|file): "(.*?)"', self.html))
+ if len(data) != 2:
+ self.error(_("No file ID"))
+
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.req.http.lastURL = self.pyfile.url
+ self.load("http://www.wrzuc.to/ajax/server/prepair", post={"md5": data['md5']})
+
+ self.req.http.lastURL = self.pyfile.url
+ self.html = self.load("http://www.wrzuc.to/ajax/server/download_link", post={"file": data['file']})
+
+ data.update(re.findall(r'"(download_link|server_id)":"(.*?)"', self.html))
+ if len(data) != 4:
+ self.error(_("No download URL"))
+
+ download_url = "http://%s.wrzuc.to/pobierz/%s" % (data['server_id'], data['download_link'])
+ self.download(download_url)
+
+
+getInfo = create_getInfo(WrzucTo)
diff --git a/pyload/plugins/hoster/WuploadCom.py b/pyload/plugins/hoster/WuploadCom.py
new file mode 100644
index 000000000..d2c202849
--- /dev/null
+++ b/pyload/plugins/hoster/WuploadCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class WuploadCom(DeadHoster):
+ __name__ = "WuploadCom"
+ __type__ = "hoster"
+ __version__ = "0.23"
+
+ __pattern__ = r'http://(?:www\.)?wupload\..*?/file/((\w+/)?\d+)(/.*)?'
+
+ __description__ = """Wupload.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("Paul King", None)]
+
+
+getInfo = create_getInfo(WuploadCom)
diff --git a/pyload/plugins/hoster/X7To.py b/pyload/plugins/hoster/X7To.py
new file mode 100644
index 000000000..72e7d08db
--- /dev/null
+++ b/pyload/plugins/hoster/X7To.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class X7To(DeadHoster):
+ __name__ = "X7To"
+ __type__ = "hoster"
+ __version__ = "0.41"
+
+ __pattern__ = r'http://(?:www\.)?x7\.to/'
+
+ __description__ = """X7.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("ernieb", "ernieb")]
+
+
+getInfo = create_getInfo(X7To)
diff --git a/pyload/plugins/hoster/XHamsterCom.py b/pyload/plugins/hoster/XHamsterCom.py
new file mode 100644
index 000000000..503022ec9
--- /dev/null
+++ b/pyload/plugins/hoster/XHamsterCom.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.Hoster import Hoster
+
+
+def clean_json(json_expr):
+ json_expr = re.sub('[\n\r]', '', json_expr)
+ json_expr = re.sub(' +', '', json_expr)
+ json_expr = re.sub('\'', '"', json_expr)
+
+ return json_expr
+
+
+class XHamsterCom(Hoster):
+ __name__ = "XHamsterCom"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'http://(?:www\.)?xhamster\.com/movies/.+'
+ __config__ = [("type", ".mp4;.flv", "Preferred type", ".mp4")]
+
+ __description__ = """XHamster.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ if not self.file_exists():
+ self.offline()
+
+ if self.getConfig("type"):
+ self.desired_fmt = self.getConfig("type")
+
+ pyfile.name = self.get_file_name() + self.desired_fmt
+ self.download(self.get_file_url())
+
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ flashvar_pattern = re.compile('flashvars = ({.*?});', re.S)
+ json_flashvar = flashvar_pattern.search(self.html)
+
+ if not json_flashvar:
+ self.error(_("flashvar not found"))
+
+ j = clean_json(json_flashvar.group(1))
+ flashvars = json_loads(j)
+
+ if flashvars['srv']:
+ srv_url = flashvars['srv'] + '/'
+ else:
+ self.error(_("srv_url not found"))
+
+ if flashvars['url_mode']:
+ url_mode = flashvars['url_mode']
+
+
+ else:
+ self.error(_("url_mode not found"))
+
+ if self.desired_fmt == ".mp4":
+ file_url = re.search(r"<a href=\"" + srv_url + "(.+?)\"", self.html)
+ if file_url is None:
+ self.error(_("file_url not found"))
+ file_url = file_url.group(1)
+ long_url = srv_url + file_url
+ self.logDebug("long_url = " + long_url)
+ else:
+ if flashvars['file']:
+ file_url = unquote(flashvars['file'])
+ else:
+ self.error(_("file_url not found"))
+
+ if url_mode == '3':
+ long_url = file_url
+ self.logDebug("long_url = " + long_url)
+ else:
+ long_url = srv_url + "key=" + file_url
+ self.logDebug("long_url = " + long_url)
+
+ return long_url
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ pattern = r'<title>(.*?) - xHamster\.com</title>'
+ name = re.search(pattern, self.html)
+ if name is None:
+ pattern = r'<h1 >(.*)</h1>'
+ name = re.search(pattern, self.html)
+ if name is None:
+ pattern = r'http://[www.]+xhamster\.com/movies/.*/(.*?)\.html?'
+ name = re.match(file_name_pattern, self.pyfile.url)
+ if name is None:
+ pattern = r'<div id="element_str_id" style="display:none;">(.*)</div>'
+ name = re.search(pattern, self.html)
+ if name is None:
+ return "Unknown"
+
+ return name.group(1)
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+ if re.search(r"(.*Video not found.*)", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/XVideosCom.py b/pyload/plugins/hoster/XVideosCom.py
new file mode 100644
index 000000000..9ab9e4484
--- /dev/null
+++ b/pyload/plugins/hoster/XVideosCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class XVideosCom(Hoster):
+ __name__ = "XVideos.com"
+ __type__ = "hoster"
+ __version__ = "0.1"
+
+ __pattern__ = r'http://(?:www\.)?xvideos\.com/video(\d+)/.*'
+
+ __description__ = """XVideos.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+
+ def process(self, pyfile):
+ site = self.load(pyfile.url)
+ pyfile.name = "%s (%s).flv" % (
+ re.search(r"<h2>([^<]+)<span", site).group(1),
+ re.match(self.__pattern__, pyfile.url).group(1),
+ )
+ self.download(unquote(re.search(r"flv_url=([^&]+)&", site).group(1)))
diff --git a/pyload/plugins/hoster/Xdcc.py b/pyload/plugins/hoster/Xdcc.py
new file mode 100644
index 000000000..de750ee58
--- /dev/null
+++ b/pyload/plugins/hoster/Xdcc.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+
+import re
+import socket
+import struct
+import sys
+import time
+
+from os import makedirs
+from os.path import exists, join
+from select import select
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import safe_join
+
+
+class Xdcc(Hoster):
+ __name__ = "Xdcc"
+ __type__ = "hoster"
+ __version__ = "0.32"
+
+ __config__ = [("nick", "str", "Nickname", "pyload"),
+ ("ident", "str", "Ident", "pyloadident"),
+ ("realname", "str", "Realname", "pyloadreal")]
+
+ __description__ = """Download from IRC XDCC bot"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.com")]
+
+
+ def setup(self):
+ self.debug = 0 # 0,1,2
+ self.timeout = 30
+ self.multiDL = False
+
+
+ def process(self, pyfile):
+ # change request type
+ self.req = pyfile.m.core.requestFactory.getRequest(self.__name__, type="XDCC")
+
+ self.pyfile = pyfile
+ for _i in xrange(0, 3):
+ try:
+ nmn = self.doDownload(pyfile.url)
+ self.logDebug("Download of %s finished." % nmn)
+ return
+ except socket.error, e:
+ if hasattr(e, "errno"):
+ errno = e.errno
+ else:
+ errno = e.args[0]
+
+ if errno == 10054:
+ self.logDebug("Server blocked our ip, retry in 5 min")
+ self.setWait(300)
+ self.wait()
+ continue
+
+ self.fail(_("Failed due to socket errors. Code: %d") % errno)
+
+ self.fail(_("Server blocked our ip, retry again later manually"))
+
+
+ def doDownload(self, url):
+ self.pyfile.setStatus("waiting") # real link
+
+ m = re.match(r'xdcc://(.*?)/#?(.*?)/(.*?)/#?(\d+)/?', url)
+ server = m.group(1)
+ chan = m.group(2)
+ bot = m.group(3)
+ pack = m.group(4)
+ nick = self.getConfig('nick')
+ ident = self.getConfig('ident')
+ real = self.getConfig('realname')
+
+ temp = server.split(':')
+ ln = len(temp)
+ if ln == 2:
+ host, port = temp
+ elif ln == 1:
+ host, port = temp[0], 6667
+ else:
+ self.fail(_("Invalid hostname for IRC Server: %s") % server)
+
+ #######################
+ # CONNECT TO IRC AND IDLE FOR REAL LINK
+ dl_time = time.time()
+
+ sock = socket.socket()
+ sock.connect((host, int(port)))
+ if nick == "pyload":
+ nick = "pyload-%d" % (time.time() % 1000) # last 3 digits
+ sock.send("NICK %s\r\n" % nick)
+ sock.send("USER %s %s bla :%s\r\n" % (ident, host, real))
+ time.sleep(3)
+ sock.send("JOIN #%s\r\n" % chan)
+ sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
+
+ # IRC recv loop
+ readbuffer = ""
+ done = False
+ retry = None
+ m = None
+ while True:
+
+ # done is set if we got our real link
+ if done:
+ break
+
+ if retry:
+ if time.time() > retry:
+ retry = None
+ dl_time = time.time()
+ sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
+
+ else:
+ if (dl_time + self.timeout) < time.time(): # todo: add in config
+ sock.send("QUIT :byebye\r\n")
+ sock.close()
+ self.fail(_("XDCC Bot did not answer"))
+
+ fdset = select([sock], [], [], 0)
+ if sock not in fdset[0]:
+ continue
+
+ readbuffer += sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ if self.debug is 2:
+ print "*> " + unicode(line, errors='ignore')
+ line = line.rstrip()
+ first = line.split()
+
+ if first[0] == "PING":
+ sock.send("PONG %s\r\n" % first[1])
+
+ if first[0] == "ERROR":
+ self.fail(_("IRC-Error: %s") % line)
+
+ msg = line.split(None, 3)
+ if len(msg) != 4:
+ continue
+
+ msg = {
+ "origin": msg[0][1:],
+ "action": msg[1],
+ "target": msg[2],
+ "text": msg[3][1:]
+ }
+
+ if nick == msg['target'][0:len(nick)] and "PRIVMSG" == msg['action']:
+ if msg['text'] == "\x01VERSION\x01":
+ self.logDebug("Sending CTCP VERSION")
+ sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
+ elif msg['text'] == "\x01TIME\x01":
+ self.logDebug("Sending CTCP TIME")
+ sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
+ elif msg['text'] == "\x01LAG\x01":
+ pass # don't know how to answer
+
+ if not (bot == msg['origin'][0:len(bot)]
+ and nick == msg['target'][0:len(nick)]
+ and msg['action'] in ("PRIVMSG", "NOTICE")):
+ continue
+
+ if self.debug is 1:
+ print "%s: %s" % (msg['origin'], msg['text'])
+
+ if "You already requested that pack" in msg['text']:
+ retry = time.time() + 300
+
+ if "you must be on a known channel to request a pack" in msg['text']:
+ self.fail(_("Wrong channel"))
+
+ m = re.match('\x01DCC SEND (.*?) (\d+) (\d+)(?: (\d+))?\x01', msg['text'])
+ if m:
+ done = True
+
+ # get connection data
+ ip = socket.inet_ntoa(struct.pack('L', socket.ntohl(int(m.group(2)))))
+ port = int(m.group(3))
+ packname = m.group(1)
+
+ if len(m.groups()) > 3:
+ self.req.filesize = int(m.group(4))
+
+ self.pyfile.name = packname
+
+ download_folder = self.config['general']['download_folder']
+ filename = safe_join(download_folder, packname)
+
+ self.logInfo(_("Downloading %s from %s:%d") % (packname, ip, port))
+
+ self.pyfile.setStatus("downloading")
+ newname = self.req.download(ip, port, filename, sock, self.pyfile.setProgress)
+ if newname and newname != filename:
+ self.logInfo(_("%(name)s saved as %(newname)s") % {"name": self.pyfile.name, "newname": newname})
+ filename = newname
+
+ # kill IRC socket
+ # sock.send("QUIT :byebye\r\n")
+ sock.close()
+
+ self.lastDownload = filename
+ return self.lastDownload
diff --git a/pyload/plugins/hoster/YibaishiwuCom.py b/pyload/plugins/hoster/YibaishiwuCom.py
new file mode 100644
index 000000000..883dfa947
--- /dev/null
+++ b/pyload/plugins/hoster/YibaishiwuCom.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.utils import json_loads
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class YibaishiwuCom(SimpleHoster):
+ __name__ = "YibaishiwuCom"
+ __type__ = "hoster"
+ __version__ = "0.13"
+
+ __pattern__ = r'http://(?:www\.)?(?:u\.)?115\.com/file/(?P<ID>\w+)'
+
+ __description__ = """115.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ NAME_PATTERN = r'file_name: \'(?P<N>.+?)\''
+ SIZE_PATTERN = r'file_size: \'(?P<S>.+?)\''
+ OFFLINE_PATTERN = ur'<h3><i style="color:red;">哎呀提取码䞍存圚䞍劚搜搜看吧</i></h3>'
+
+ LINK_PATTERN = r'(/\?ct=(pickcode|download)[^"\']+)'
+
+
+ def handleFree(self):
+ m = re.search(self.LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("LINK_PATTERN not found"))
+ url = m.group(1)
+ self.logDebug(('FREEUSER' if m.group(2) == 'download' else 'GUEST') + ' URL', url)
+
+ res = json_loads(self.load("http://115.com" + url, decode=False))
+ if "urls" in res:
+ mirrors = res['urls']
+ elif "data" in res:
+ mirrors = res['data']
+ else:
+ mirrors = None
+
+ for mr in mirrors:
+ try:
+ url = mr['url'].replace("\\", "")
+ self.logDebug("Trying URL: " + url)
+ self.download(url)
+ break
+ except:
+ continue
+ else:
+ self.fail(_("No working link found"))
+
+
+getInfo = create_getInfo(YibaishiwuCom)
diff --git a/pyload/plugins/hoster/YoupornCom.py b/pyload/plugins/hoster/YoupornCom.py
new file mode 100644
index 000000000..6063e6816
--- /dev/null
+++ b/pyload/plugins/hoster/YoupornCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class YoupornCom(Hoster):
+ __name__ = "YoupornCom"
+ __type__ = "hoster"
+ __version__ = "0.2"
+
+ __pattern__ = r'http://(?:www\.)?youporn\.com/watch/.+'
+
+ __description__ = """Youporn.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("willnix", "willnix@pyload.org")]
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url, post={"user_choice": "Enter"}, cookies=False)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if not self.html:
+ self.download_html()
+
+ return re.search(r'(http://download\.youporn\.com/download/\d+\?save=1)">', self.html).group(1)
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ file_name_pattern = r'<title>(.+) - '
+ return re.search(file_name_pattern, self.html).group(1).replace("&amp;", "&").replace("/", "") + '.flv'
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+ if re.search(r"(.*invalid video_id.*)", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/YourfilesTo.py b/pyload/plugins/hoster/YourfilesTo.py
new file mode 100644
index 000000000..7cb94a2df
--- /dev/null
+++ b/pyload/plugins/hoster/YourfilesTo.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class YourfilesTo(Hoster):
+ __name__ = "YourfilesTo"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'(http://)?(?:www\.)?yourfiles\.(to|biz)/\?d=\w+'
+
+ __description__ = """Youfiles.to hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("jeix", "jeix@hasnomail.de"),
+ ("skydancer", "skydancer@hasnomail.de")]
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.prepare()
+ self.download(self.get_file_url())
+
+
+ def prepare(self):
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+
+ wait_time = self.get_waiting_time()
+ self.setWait(wait_time)
+ self.wait()
+
+
+ def get_waiting_time(self):
+ if not self.html:
+ self.download_html()
+
+ #var zzipitime = 15;
+ m = re.search(r'var zzipitime = (\d+);', self.html)
+ if m:
+ sec = int(m.group(1))
+ else:
+ sec = 0
+
+ return sec
+
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = re.search(r"var bla = '(.*?)';", self.html)
+ if url:
+ url = url.group(1)
+ url = unquote(url.replace("http://http:/http://", "http://").replace("dumdidum", ""))
+ return url
+ else:
+ self.error(_("Absolute filepath not found"))
+
+
+ def get_file_name(self):
+ if not self.html:
+ self.download_html()
+
+ return re.search("<title>(.*)</title>", self.html).group(1)
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if not self.html:
+ self.download_html()
+
+ if re.search(r"HTTP Status 404", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/YoutubeCom.py b/pyload/plugins/hoster/YoutubeCom.py
new file mode 100644
index 000000000..7fdf848c1
--- /dev/null
+++ b/pyload/plugins/hoster/YoutubeCom.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+import subprocess
+
+from urllib import unquote
+
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.plugins.internal.SimpleHoster import replace_patterns
+from pyload.utils import html_unescape
+
+
+def which(program):
+ """Works exactly like the unix command which
+
+ Courtesy of http://stackoverflow.com/a/377028/675646"""
+
+
+ def is_exe(fpath):
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ fpath, fname = os.path.split(program)
+ if fpath:
+ if is_exe(program):
+ return program
+ else:
+ for path in os.environ['PATH'].split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+
+ return None
+
+
+class YoutubeCom(Hoster):
+ __name__ = "YoutubeCom"
+ __type__ = "hoster"
+ __version__ = "0.40"
+
+ __pattern__ = r'https?://(?:[^/]*\.)?(?:youtube\.com|youtu\.be)/watch.*?[?&]v=.*'
+ __config__ = [("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;3072p", "Quality Setting", "hd"),
+ ("fmt", "int", "FMT/ITAG Number (5-102, 0 for auto)", 0),
+ (".mp4", "bool", "Allow .mp4", True),
+ (".flv", "bool", "Allow .flv", True),
+ (".webm", "bool", "Allow .webm", False),
+ (".3gp", "bool", "Allow .3gp", False),
+ ("3d", "bool", "Prefer 3D", False)]
+
+ __description__ = """Youtube.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("spoob", "spoob@pyload.org"),
+ ("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ URL_REPLACEMENTS = [(r'youtu\.be/', 'youtube.com/')]
+
+ # Invalid characters that must be removed from the file name
+ invalidChars = u'\u2605:?><"|\\'
+
+ # name, width, height, quality ranking, 3D
+ formats = {5: (".flv", 400, 240, 1, False),
+ 6: (".flv", 640, 400, 4, False),
+ 17: (".3gp", 176, 144, 0, False),
+ 18: (".mp4", 480, 360, 2, False),
+ 22: (".mp4", 1280, 720, 8, False),
+ 43: (".webm", 640, 360, 3, False),
+ 34: (".flv", 640, 360, 4, False),
+ 35: (".flv", 854, 480, 6, False),
+ 36: (".3gp", 400, 240, 1, False),
+ 37: (".mp4", 1920, 1080, 9, False),
+ 38: (".mp4", 4096, 3072, 10, False),
+ 44: (".webm", 854, 480, 5, False),
+ 45: (".webm", 1280, 720, 7, False),
+ 46: (".webm", 1920, 1080, 9, False),
+ 82: (".mp4", 640, 360, 3, True),
+ 83: (".mp4", 400, 240, 1, True),
+ 84: (".mp4", 1280, 720, 8, True),
+ 85: (".mp4", 1920, 1080, 9, True),
+ 100: (".webm", 640, 360, 3, True),
+ 101: (".webm", 640, 360, 4, True),
+ 102: (".webm", 1280, 720, 8, True)}
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+
+ def process(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
+ html = self.load(pyfile.url, decode=True)
+
+ if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*">', html):
+ self.offline()
+
+ if "We have been receiving a large volume of requests from your network." in html:
+ self.tempOffline()
+
+ #get config
+ use3d = self.getConfig("3d")
+ if use3d:
+ quality = {"sd": 82, "hd": 84, "fullhd": 85, "240p": 83, "360p": 82,
+ "480p": 82, "720p": 84, "1080p": 85, "3072p": 85}
+ else:
+ quality = {"sd": 18, "hd": 22, "fullhd": 37, "240p": 5, "360p": 18,
+ "480p": 35, "720p": 22, "1080p": 37, "3072p": 38}
+ desired_fmt = self.getConfig("fmt")
+ if desired_fmt and desired_fmt not in self.formats:
+ self.logWarning(_("FMT %d unknown, using default") % desired_fmt)
+ desired_fmt = 0
+ if not desired_fmt:
+ desired_fmt = quality.get(self.getConfig("quality"), 18)
+
+ #parse available streams
+ streams = re.search(r'"url_encoded_fmt_stream_map": "(.*?)",', html).group(1)
+ streams = [x.split('\u0026') for x in streams.split(',')]
+ streams = [dict((y.split('=', 1)) for y in x) for x in streams]
+ streams = [(int(x['itag']), unquote(x['url'])) for x in streams]
+ #self.logDebug("Found links: %s" % streams)
+ self.logDebug("AVAILABLE STREAMS: %s" % [x[0] for x in streams])
+
+ #build dictionary of supported itags (3D/2D)
+ allowed = lambda x: self.getConfig(self.formats[x][0])
+ streams = [x for x in streams if x[0] in self.formats and allowed(x[0])]
+ if not streams:
+ self.fail(_("No available stream meets your preferences"))
+ fmt_dict = dict([x for x in streams if self.formats[x[0]][4] == use3d] or streams)
+
+ self.logDebug("DESIRED STREAM: ITAG:%d (%s) %sfound, %sallowed" %
+ (desired_fmt, "%s %dx%d Q:%d 3D:%s" % self.formats[desired_fmt],
+ "" if desired_fmt in fmt_dict else "NOT ", "" if allowed(desired_fmt) else "NOT "))
+
+ #return fmt nearest to quality index
+ if desired_fmt in fmt_dict and allowed(desired_fmt):
+ fmt = desired_fmt
+ else:
+ sel = lambda x: self.formats[x][3] # select quality index
+ comp = lambda x, y: abs(sel(x) - sel(y))
+
+ self.logDebug("Choosing nearest fmt: %s" % [(x, allowed(x), comp(x, desired_fmt)) for x in fmt_dict.keys()])
+ fmt = reduce(lambda x, y: x if comp(x, desired_fmt) <= comp(y, desired_fmt) and
+ sel(x) > sel(y) else y, fmt_dict.keys())
+
+ self.logDebug("Chosen fmt: %s" % fmt)
+ url = fmt_dict[fmt]
+ self.logDebug("URL: %s" % url)
+
+ #set file name
+ file_suffix = self.formats[fmt][0] if fmt in self.formats else ".flv"
+ file_name_pattern = '<meta name="title" content="(.+?)">'
+ name = re.search(file_name_pattern, html).group(1).replace("/", "")
+
+ # Cleaning invalid characters from the file name
+ name = name.encode('ascii', 'replace')
+ for c in self.invalidChars:
+ name = name.replace(c, '_')
+
+ pyfile.name = html_unescape(name)
+
+ time = re.search(r"t=((\d+)m)?(\d+)s", pyfile.url)
+ ffmpeg = which("ffmpeg")
+ if ffmpeg and time:
+ m, s = time.groups()[1:]
+ if m is None:
+ m = "0"
+
+ pyfile.name += " (starting at %s:%s)" % (m, s)
+ pyfile.name += file_suffix
+
+ filename = self.download(url)
+
+ if ffmpeg and time:
+ inputfile = filename + "_"
+ os.rename(filename, inputfile)
+
+ subprocess.call([
+ ffmpeg,
+ "-ss", "00:%s:%s" % (m, s),
+ "-i", inputfile,
+ "-vcodec", "copy",
+ "-acodec", "copy",
+ filename])
+ os.remove(inputfile)
diff --git a/pyload/plugins/hoster/ZDF.py b/pyload/plugins/hoster/ZDF.py
new file mode 100644
index 000000000..53939ae6c
--- /dev/null
+++ b/pyload/plugins/hoster/ZDF.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from xml.etree.ElementTree import fromstring
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+# Based on zdfm by Roland Beermann (http://github.com/enkore/zdfm/)
+class ZDF(Hoster):
+ __name__ = "ZDF Mediathek"
+ __type__ = "hoster"
+ __version__ = "0.8"
+
+ __pattern__ = r'http://(?:www\.)?zdf\.de/ZDFmediathek/\D*(\d+)\D*'
+
+ __description__ = """ZDF.de hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = []
+
+ XML_API = "http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?id=%i"
+
+
+ @staticmethod
+ def video_key(video):
+ return (
+ int(video.findtext("videoBitrate", "0")),
+ any(f.text == "progressive" for f in video.iter("facet")),
+ )
+
+
+ @staticmethod
+ def video_valid(video):
+ return video.findtext("url").startswith("http") and video.findtext("url").endswith(".mp4") and \
+ video.findtext("facets/facet").startswith("progressive")
+
+
+ @staticmethod
+ def get_id(url):
+ return int(re.search(r"\D*(\d{4,})\D*", url).group(1))
+
+
+ def process(self, pyfile):
+ xml = fromstring(self.load(self.XML_API % self.get_id(pyfile.url)))
+
+ status = xml.findtext("./status/statuscode")
+ if status != "ok":
+ self.fail(_("Error retrieving manifest"))
+
+ video = xml.find("video")
+ title = video.findtext("information/title")
+
+ pyfile.name = title
+
+ target_url = sorted((v for v in video.iter("formitaet") if self.video_valid(v)),
+ key=self.video_key)[-1].findtext("url")
+
+ self.download(target_url)
diff --git a/pyload/plugins/hoster/ZeveraCom.py b/pyload/plugins/hoster/ZeveraCom.py
new file mode 100644
index 000000000..c0c10215d
--- /dev/null
+++ b/pyload/plugins/hoster/ZeveraCom.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Hoster import Hoster
+
+
+class ZeveraCom(Hoster):
+ __name__ = "ZeveraCom"
+ __type__ = "hoster"
+ __version__ = "0.21"
+
+ __pattern__ = r'http://(?:www\.)?zevera\.com/.*'
+
+ __description__ = """Zevera.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "zevera.com")
+ self.fail(_("No zevera.com account provided"))
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+
+ if self.account.getAPIData(self.req, cmd="checklink", olink=pyfile.url) != "Alive":
+ self.fail(_("Offline or not downloadable - contact Zevera support"))
+
+ header = self.account.getAPIData(self.req, just_header=True, cmd="generatedownloaddirect", olink=pyfile.url)
+ if not "location" in header:
+ self.fail(_("Unable to initialize download"))
+
+ self.download(header['location'], disposition=True)
+
+ check = self.checkDownload({"error": 'action="ErrorDownload.aspx'})
+ if check == "error":
+ self.fail(_("Error response received - contact Zevera support"))
diff --git a/pyload/plugins/hoster/ZippyshareCom.py b/pyload/plugins/hoster/ZippyshareCom.py
new file mode 100644
index 000000000..8f9062cfc
--- /dev/null
+++ b/pyload/plugins/hoster/ZippyshareCom.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from os import path
+from urllib import unquote
+from urlparse import urljoin
+
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class ZippyshareCom(SimpleHoster):
+ __name__ = "ZippyshareCom"
+ __type__ = "hoster"
+ __version__ = "0.60"
+
+ __pattern__ = r'(?P<HOST>http://www\d{0,2}\.zippyshare\.com)/v(?:/|iew\.jsp.*key=)(?P<KEY>\d+)'
+
+ __description__ = """Zippyshare.com hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ NAME_PATTERN = r'("\d{6,}/"[ ]*\+.+?"/|<title>Zippyshare.com - )(?P<N>.+?)("|</title>)'
+ SIZE_PATTERN = r'>Size:.+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'>File does not exist on this server<'
+
+ COOKIES = [("zippyshare.com", "ziplocale", "en")]
+
+
+ def setup(self):
+ self.multiDL = True
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def handleFree(self):
+ url = self.get_link()
+ self.download(url)
+
+
+ def getFileInfo(self):
+ info = super(ZippyshareCom, self).getFileInfo()
+ self.pyfile.name = info['name'] = unquote(info['name'])
+ return info
+
+
+ def get_checksum(self):
+ try:
+ m = re.search(r'\+[ ]*\((\d+)[ ]*\%[ ]*(\d+)[ ]*\+[ ]*(\d+)[ ]*\%[ ]*(\d+)\)[ ]*\+', self.html)
+ if m:
+ a1, a2, c1, c2 = map(int, m.groups())
+ else:
+ a1, a2 = map(int, re.search(r'\(\'downloadB\'\).omg = (\d+)%(\d+)', self.html).groups())
+ c1, c2 = map(int, re.search(r'\(\'downloadB\'\).omg\) \* \((\d+)%(\d+)', self.html).groups())
+
+ b = (a1 % a2) * (c1 % c2)
+ except:
+ self.error(_("Unable to calculate checksum"))
+ else:
+ return b + 18
+
+
+ def get_link(self):
+ checksum = self.get_checksum()
+ p_url = path.join("d", self.info['KEY'], str(checksum), self.pyfile.name)
+ dl_link = urljoin(self.info['HOST'], p_url)
+ return dl_link
+
+
+getInfo = create_getInfo(ZippyshareCom)
diff --git a/pyload/plugins/hoster/__init__.py b/pyload/plugins/hoster/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/hoster/__init__.py
diff --git a/pyload/plugins/internal/AbstractExtractor.py b/pyload/plugins/internal/AbstractExtractor.py
new file mode 100644
index 000000000..54ea9b348
--- /dev/null
+++ b/pyload/plugins/internal/AbstractExtractor.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+
+class ArchiveError(Exception):
+ pass
+
+
+class CRCError(Exception):
+ pass
+
+
+class WrongPassword(Exception):
+ pass
+
+
+class AbtractExtractor:
+ __name__ = "AbtractExtractor"
+ __version__ = "0.1"
+
+ __description__ = """Abtract extractor plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ @staticmethod
+ def checkDeps():
+ """ Check if system statisfy dependencies
+ :return: boolean
+ """
+ return True
+
+
+ @staticmethod
+ def getTargets(files_ids):
+ """ Filter suited targets from list of filename id tuple list
+ :param files_ids: List of filepathes
+ :return: List of targets, id tuple list
+ """
+ raise NotImplementedError
+
+
+ def __init__(self, m, file, out, fullpath, overwrite, excludefiles, renice):
+ """Initialize extractor for specific file
+
+ :param m: ExtractArchive Addon plugin
+ :param file: Absolute filepath
+ :param out: Absolute path to destination directory
+ :param fullpath: extract to fullpath
+ :param overwrite: Overwrite existing archives
+ :param renice: Renice value
+ """
+ self.m = m
+ self.file = file
+ self.out = out
+ self.fullpath = fullpath
+ self.overwrite = overwrite
+ self.excludefiles = excludefiles
+ self.renice = renice
+ self.files = [] #: Store extracted files here
+
+
+ def init(self):
+ """ Initialize additional data structures """
+ pass
+
+
+ def checkArchive(self):
+ """Check if password if needed. Raise ArchiveError if integrity is
+ questionable.
+
+ :return: boolean
+ :raises ArchiveError
+ """
+ return False
+
+
+ def checkPassword(self, password):
+ """ Check if the given password is/might be correct.
+ If it can not be decided at this point return true.
+
+ :param password:
+ :return: boolean
+ """
+ return True
+
+
+ def extract(self, progress, password=None):
+ """Extract the archive. Raise specific errors in case of failure.
+
+ :param progress: Progress function, call this to update status
+ :param password password to use
+ :raises WrongPassword
+ :raises CRCError
+ :raises ArchiveError
+ :return:
+ """
+ raise NotImplementedError
+
+
+ def getDeleteFiles(self):
+ """Return list of files to delete, do *not* delete them here.
+
+ :return: List with paths of files to delete
+ """
+ raise NotImplementedError
+
+
+ def getExtractedFiles(self):
+ """Populate self.files at some point while extracting"""
+ return self.files
diff --git a/pyload/plugins/internal/Account.py b/pyload/plugins/internal/Account.py
new file mode 100644
index 000000000..e6895f119
--- /dev/null
+++ b/pyload/plugins/internal/Account.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+
+from random import choice
+from time import time
+from traceback import print_exc
+from threading import RLock
+
+from pyload.plugins.Plugin import Base
+from pyload.utils import compare_time, parseFileSize, lock
+
+
+class WrongPassword(Exception):
+ pass
+
+
+class Account(Base):
+ """
+ Base class for every Account plugin.
+ Just overwrite `login` and cookies will be stored and account becomes accessible in\
+ associated hoster plugin. Plugin should also provide `loadAccountInfo`
+ """
+ __name__ = "Account"
+ __type__ = "account"
+ __version__ = "0.03"
+
+ __description__ = """Base account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ #: after that time (in minutes) pyload will relogin the account
+ login_timeout = 10 * 60
+ #: after that time (in minutes) account data will be reloaded
+ info_threshold = 10 * 60
+
+
+ def __init__(self, manager, accounts):
+ Base.__init__(self, manager.core)
+
+ self.manager = manager
+ self.accounts = {}
+ self.infos = {} #: cache for account information
+ self.lock = RLock()
+ self.timestamps = {}
+
+ self.init()
+
+ self.setAccounts(accounts)
+
+
+ def init(self):
+ pass
+
+
+ def login(self, user, data, req):
+ """login into account, the cookies will be saved so user can be recognized
+
+ :param user: loginname
+ :param data: data dictionary
+ :param req: `Request` instance
+ """
+ pass
+
+
+ @lock
+ def _login(self, user, data):
+ # set timestamp for login
+ self.timestamps[user] = time()
+
+ req = self.getAccountRequest(user)
+ try:
+ self.login(user, data, req)
+ except WrongPassword:
+ self.logWarning(
+ _("Could not login with account %(user)s | %(msg)s") % {"user": user,
+ "msg": _("Wrong Password")})
+ success = data['valid'] = False
+ except Exception, e:
+ self.logWarning(
+ _("Could not login with account %(user)s | %(msg)s") % {"user": user,
+ "msg": e})
+ success = data['valid'] = False
+ if self.core.debug:
+ print_exc()
+ else:
+ success = True
+ finally:
+ if req:
+ req.close()
+ return success
+
+
+ def relogin(self, user):
+ req = self.getAccountRequest(user)
+ if req:
+ req.cj.clear()
+ req.close()
+ if user in self.infos:
+ del self.infos[user] #delete old information
+
+ return self._login(user, self.accounts[user])
+
+
+ def setAccounts(self, accounts):
+ self.accounts = accounts
+ for user, data in self.accounts.iteritems():
+ self._login(user, data)
+ self.infos[user] = {}
+
+
+ def updateAccounts(self, user, password=None, options={}):
+ """ updates account and return true if anything changed """
+
+ if user in self.accounts:
+ self.accounts[user]['valid'] = True #do not remove or accounts will not login
+ if password:
+ self.accounts[user]['password'] = password
+ self.relogin(user)
+ return True
+ if options:
+ before = self.accounts[user]['options']
+ self.accounts[user]['options'].update(options)
+ return self.accounts[user]['options'] != before
+ else:
+ self.accounts[user] = {"password": password, "options": options, "valid": True}
+ self._login(user, self.accounts[user])
+ return True
+
+
+ def removeAccount(self, user):
+ if user in self.accounts:
+ del self.accounts[user]
+ if user in self.infos:
+ del self.infos[user]
+ if user in self.timestamps:
+ del self.timestamps[user]
+
+
+ @lock
+ def getAccountInfo(self, name, force=False):
+ """retrieve account infos for an user, do **not** overwrite this method!\\
+ just use it to retrieve infos in hoster plugins. see `loadAccountInfo`
+
+ :param name: username
+ :param force: reloads cached account information
+ :return: dictionary with information
+ """
+ data = Account.loadAccountInfo(self, name)
+
+ if force or name not in self.infos:
+ self.logDebug("Get Account Info for %s" % name)
+ req = self.getAccountRequest(name)
+
+ try:
+ infos = self.loadAccountInfo(name, req)
+ if not type(infos) == dict:
+ raise Exception("Wrong return format")
+ except Exception, e:
+ infos = {"error": str(e)}
+ print_exc()
+
+ if req:
+ req.close()
+
+ self.logDebug("Account Info: %s" % infos)
+
+ infos['timestamp'] = time()
+ self.infos[name] = infos
+ elif "timestamp" in self.infos[name] and self.infos[name][
+ "timestamp"] + self.info_threshold * 60 < time():
+ self.logDebug("Reached timeout for account data")
+ self.scheduleRefresh(name)
+
+ data.update(self.infos[name])
+ return data
+
+
+ def isPremium(self, user):
+ info = self.getAccountInfo(user)
+ return info['premium']
+
+
+ def loadAccountInfo(self, name, req=None):
+ """this should be overwritten in account plugin,\
+ and retrieving account information for user
+
+ :param name:
+ :param req: `Request` instance
+ :return:
+ """
+ return {"validuntil": None, #: -1 for unlimited
+ "login": name,
+ # "password": self.accounts[name]['password'], #: commented due security reason
+ "options": self.accounts[name]['options'],
+ "valid": self.accounts[name]['valid'],
+ "trafficleft": None, #: in kb, -1 for unlimited
+ "maxtraffic": None,
+ "premium": None,
+ "timestamp": 0, #: time this info was retrieved
+ "type": self.__name__}
+
+
+ def getAllAccounts(self, force=False):
+ return [self.getAccountInfo(user, force) for user, data in self.accounts.iteritems()]
+
+
+ def getAccountRequest(self, user=None):
+ if not user:
+ user, data = self.selectAccount()
+ if not user:
+ return None
+
+ req = self.core.requestFactory.getRequest(self.__name__, user)
+ return req
+
+
+ def getAccountCookies(self, user=None):
+ if not user:
+ user, data = self.selectAccount()
+ if not user:
+ return None
+
+ cj = self.core.requestFactory.getCookieJar(self.__name__, user)
+ return cj
+
+
+ def getAccountData(self, user):
+ return self.accounts[user]
+
+
+ def selectAccount(self):
+ """ returns an valid account name and data"""
+ usable = []
+ for user, data in self.accounts.iteritems():
+ if not data['valid']: continue
+
+ if "time" in data['options'] and data['options']['time']:
+ time_data = ""
+ try:
+ time_data = data['options']['time'][0]
+ start, end = time_data.split("-")
+ if not compare_time(start.split(":"), end.split(":")):
+ continue
+ except:
+ self.logWarning(_("Your Time %s has wrong format, use: 1:22-3:44") % time_data)
+
+ if user in self.infos:
+ if "validuntil" in self.infos[user]:
+ if self.infos[user]['validuntil'] > 0 and time() > self.infos[user]['validuntil']:
+ continue
+ if "trafficleft" in self.infos[user]:
+ if self.infos[user]['trafficleft'] == 0:
+ continue
+
+ usable.append((user, data))
+
+ if not usable: return None, None
+ return choice(usable)
+
+
+ def canUse(self):
+ return False if self.selectAccount() == (None, None) else True
+
+
+ def parseTraffic(self, string): #returns kbyte
+ return parseFileSize(string)
+
+
+ def wrongPassword(self):
+ raise WrongPassword
+
+
+ def empty(self, user):
+ if user in self.infos:
+ self.logWarning(_("Account %s has not enough traffic, checking again in 30min") % user)
+
+ self.infos[user].update({"trafficleft": 0})
+ self.scheduleRefresh(user, 30 * 60)
+
+
+ def expired(self, user):
+ if user in self.infos:
+ self.logWarning(_("Account %s is expired, checking again in 1h") % user)
+
+ self.infos[user].update({"validuntil": time() - 1})
+ self.scheduleRefresh(user, 60 * 60)
+
+
+ def scheduleRefresh(self, user, time=0, force=True):
+ """ add task to refresh account info to sheduler """
+ self.logDebug("Scheduled Account refresh for %s in %s seconds." % (user, time))
+ self.core.scheduler.addJob(time, self.getAccountInfo, [user, force])
+
+
+ @lock
+ def checkLogin(self, user):
+ """ checks if user is still logged in """
+ if user in self.timestamps:
+ if self.login_timeout > 0 and self.timestamps[user] + self.login_timeout * 60 < time():
+ self.logDebug("Reached login timeout for %s" % user)
+ return self.relogin(user)
+ else:
+ return True
+ else:
+ return False
diff --git a/pyload/plugins/internal/Addon.py b/pyload/plugins/internal/Addon.py
new file mode 100644
index 000000000..b126b97d6
--- /dev/null
+++ b/pyload/plugins/internal/Addon.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+
+from traceback import print_exc
+
+from pyload.plugins.Plugin import Base
+
+
+class Expose(object):
+ """ used for decoration to declare rpc services """
+
+ def __new__(cls, f, *args, **kwargs):
+ addonManager.addRPC(f.__module__, f.func_name, f.func_doc)
+ return f
+
+
+def threaded(f):
+
+ def run(*args,**kwargs):
+ addonManager.startThread(f, *args, **kwargs)
+ return run
+
+
+class Addon(Base):
+ """
+ Base class for addon plugins.
+ """
+ __name__ = "Addon"
+ __type__ = "addon"
+ __version__ = "0.03"
+
+ __config__ = [] #: [("name", "type", "desc", "default")]
+
+ __description__ = """Base addon/hook plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de"),
+ ("RaNaN", "RaNaN@pyload.org")]
+
+
+ #: automatically register event listeners for functions, attribute will be deleted dont use it yourself
+ event_map = {}
+
+ # Alternative to event_map
+ #: List of events the plugin can handle, name the functions exactly like eventname.
+ event_list = [] #@NOTE: dont make duplicate entries in event_map
+
+
+ def __init__(self, core, manager):
+ Base.__init__(self, core)
+
+ #: Provide information in dict here, usable by API `getInfo`
+ self.info = {}
+
+ #: Callback of periodical job task, used by AddonManager
+ self.cb = None
+ self.interval = -1 #: disabled
+
+ #: `AddonManager`
+ self.manager = manager
+
+ #register events
+ if self.event_map:
+ for event, funcs in self.event_map.iteritems():
+ if type(funcs) in (list, tuple):
+ for f in funcs:
+ self.manager.addEvent(event, getattr(self,f))
+ else:
+ self.manager.addEvent(event, getattr(self,funcs))
+
+ #delete for various reasons
+ self.event_map = None
+
+ if self.event_list:
+ for f in self.event_list:
+ self.manager.addEvent(f, getattr(self,f))
+
+ self.event_list = None
+
+ self.setup()
+
+ self.initPeriodical()
+
+
+ def initPeriodical(self, delay=0, threaded=False):
+ self.cb = self.core.scheduler.addJob(delay, self._periodical, args=[threaded], threaded=threaded)
+
+
+ def _periodical(self, threaded):
+ if self.interval < 0:
+ self.cb = None
+ return
+
+ try:
+ self.periodical()
+
+ except Exception, e:
+ self.logError(_("Error executing addon: %s") % e)
+ if self.core.debug:
+ print_exc()
+
+ self.cb = self.core.scheduler.addJob(self.interval, self._periodical, threaded=threaded)
+
+
+ def __repr__(self):
+ return "<Addon %s>" % self.__name__
+
+
+ def setup(self):
+ """ more init stuff if needed """
+ pass
+
+
+ def unload(self):
+ """ called when addon was deactivated """
+ pass
+
+
+ def isActivated(self):
+ """ checks if addon is activated"""
+ return self.core.config.getPlugin(self.__name__, "activated")
+
+
+ #event methods - overwrite these if needed
+ def coreReady(self):
+ pass
+
+
+ def coreExiting(self):
+ pass
+
+
+ def downloadPreparing(self, pyfile):
+ pass
+
+
+ def downloadFinished(self, pyfile):
+ pass
+
+
+ def downloadFailed(self, pyfile):
+ pass
+
+
+ def packageFinished(self, pypack):
+ pass
+
+
+ def beforeReconnecting(self, ip):
+ pass
+
+
+ def afterReconnecting(self, ip):
+ pass
+
+
+ def periodical(self):
+ pass
+
+
+ def newCaptchaTask(self, task):
+ """ new captcha task for the plugin, it MUST set the handler and timeout or will be ignored """
+ pass
+
+
+ def captchaCorrect(self, task):
+ pass
+
+
+ def captchaInvalid(self, task):
+ pass
diff --git a/pyload/plugins/internal/BasePlugin.py b/pyload/plugins/internal/BasePlugin.py
new file mode 100644
index 000000000..dd8540578
--- /dev/null
+++ b/pyload/plugins/internal/BasePlugin.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urllib import unquote
+from urlparse import urlparse
+
+from pyload.network.HTTPRequest import BadHeader
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import html_unescape, remove_chars
+
+
+class BasePlugin(Hoster):
+ __name__ = "BasePlugin"
+ __type__ = "hoster"
+ __version__ = "0.20"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Base Plugin when any other didnt fit"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ """main function"""
+
+ #: debug part, for api exerciser
+ if pyfile.url.startswith("DEBUG_API"):
+ self.multiDL = False
+ return
+
+ if pyfile.url.startswith("http"):
+
+ try:
+ self.downloadFile(pyfile)
+ except BadHeader, e:
+ if e.code in (401, 403):
+ self.logDebug("Auth required")
+
+ account = self.core.accountManager.getAccountPlugin('Http')
+ servers = [x['login'] for x in account.getAllAccounts()]
+ server = urlparse(pyfile.url).netloc
+
+ if server in servers:
+ self.logDebug("Logging on to %s" % server)
+ self.req.addAuth(account.accounts[server]['password'])
+ else:
+ for pwd in pyfile.package().password.splitlines():
+ if ":" in pwd:
+ self.req.addAuth(pwd.strip())
+ break
+ else:
+ self.fail(_("Authorization required (username:password)"))
+
+ self.downloadFile(pyfile)
+ else:
+ raise
+
+ else:
+ self.fail(_("No Plugin matched and not a downloadable url"))
+
+
+ def downloadFile(self, pyfile):
+ url = pyfile.url
+
+ for _i in xrange(5):
+ header = self.load(url, just_header=True)
+
+ # self.load does not raise a BadHeader on 404 responses, do it here
+ if 'code' in header and header['code'] == 404:
+ raise BadHeader(404)
+
+ if 'location' in header:
+ self.logDebug("Location: " + header['location'])
+ base = re.match(r'https?://[^/]+', url).group(0)
+ if header['location'].startswith("http"):
+ url = header['location']
+ elif header['location'].startswith("/"):
+ url = base + unquote(header['location'])
+ else:
+ url = '%s/%s' % (base, unquote(header['location']))
+ else:
+ break
+
+ name = html_unescape(unquote(urlparse(url).path.split("/")[-1]))
+
+ if 'content-disposition' in header:
+ self.logDebug("Content-Disposition: " + header['content-disposition'])
+ m = re.search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", header['content-disposition'])
+ if m:
+ disp = m.groupdict()
+ self.logDebug(disp)
+ if not disp['enc']:
+ disp['enc'] = 'utf-8'
+ name = remove_chars(disp['name'], "\"';").strip()
+ name = unicode(unquote(name), disp['enc'])
+
+ if not name:
+ name = url
+ pyfile.name = name
+ self.logDebug("Filename: %s" % pyfile.name)
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/internal/Captcha.py b/pyload/plugins/internal/Captcha.py
new file mode 100644
index 000000000..7197c390e
--- /dev/null
+++ b/pyload/plugins/internal/Captcha.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.Plugin import Plugin
+
+
+class Captcha(Plugin):
+ __name__ = "Captcha"
+ __type__ = "captcha"
+ __version__ = "0.14"
+
+ __description__ = """Base captcha service plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ KEY_PATTERN = None
+
+ key = None #: last key detected
+
+
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+
+ def detect_key(self, html=None):
+ if not html:
+ if hasattr(self.plugin, "html") and self.plugin.html:
+ html = self.plugin.html
+ else:
+ errmsg = _("%s html not found") % self.__name__
+ self.plugin.error(errmsg)
+ raise TypeError(errmsg)
+
+ m = re.search(self.KEY_PATTERN, html)
+ if m:
+ self.key = m.group("KEY")
+ self.plugin.logDebug("%s key: %s" % (self.__name__, self.key))
+ return self.key
+ else:
+ self.plugin.logDebug("%s key not found" % self.__name__)
+ return None
+
+
+ def challenge(self, key=None):
+ raise NotImplementedError
+
+
+ def result(self, server, challenge):
+ raise NotImplementedError
diff --git a/pyload/plugins/internal/Container.py b/pyload/plugins/internal/Container.py
new file mode 100644
index 000000000..b7dd3aa20
--- /dev/null
+++ b/pyload/plugins/internal/Container.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from os import remove
+from os.path import basename, exists
+
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.utils import safe_join
+
+
+class Container(Crypter):
+ __name__ = "Container"
+ __type__ = "container"
+ __version__ = "0.01"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [] #: [("name", "type", "desc", "default")]
+
+ __description__ = """Base container decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
+
+
+ def preprocessing(self, thread):
+ """prepare"""
+
+ self.setup()
+ self.thread = thread
+
+ self.loadToDisk()
+
+ self.decrypt(self.pyfile)
+ self.deleteTmp()
+
+ self.createPackages()
+
+
+ def loadToDisk(self):
+ """loads container to disk if its stored remotely and overwrite url,
+ or check existent on several places at disk"""
+
+ if self.pyfile.url.startswith("http"):
+ self.pyfile.name = re.findall("([^\/=]+)", self.pyfile.url)[-1]
+ content = self.load(self.pyfile.url)
+ self.pyfile.url = safe_join(self.core.config['general']['download_folder'], self.pyfile.name)
+ try:
+ with open(self.pyfile.url, "wb") as f:
+ f.write(content)
+ except IOError, e:
+ self.fail(str(e))
+
+ else:
+ self.pyfile.name = basename(self.pyfile.url)
+ if not exists(self.pyfile.url):
+ if exists(safe_join(pypath, self.pyfile.url)):
+ self.pyfile.url = safe_join(pypath, self.pyfile.url)
+ else:
+ self.fail(_("File not exists"))
+
+
+ def deleteTmp(self):
+ if self.pyfile.name.startswith("tmp_"):
+ remove(self.pyfile.url)
diff --git a/pyload/plugins/internal/Crypter.py b/pyload/plugins/internal/Crypter.py
new file mode 100644
index 000000000..76880ca14
--- /dev/null
+++ b/pyload/plugins/internal/Crypter.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+
+from urlparse import urlparse
+
+from pyload.plugins.Plugin import Plugin
+from pyload.utils import decode, html_unescape, save_filename
+
+
+class Crypter(Plugin):
+ __name__ = "Crypter"
+ __type__ = "crypter"
+ __version__ = "0.05"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config['general']['folder_per_package']
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Base decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ html = None #: last html loaded
+
+
+ def __init__(self, pyfile):
+ #: Put all packages here. It's a list of tuples like: ( name, [list of links], folder )
+ self.packages = []
+
+ #: List of urls, pyLoad will generate packagenames
+ self.urls = []
+
+ Plugin.__init__(self, pyfile)
+
+
+ def process(self, pyfile):
+ """ main method """
+
+ self.decrypt(pyfile)
+
+ if self.urls:
+ self.generatePackages()
+
+ elif not self.packages:
+ self.error(_("No link extracted"), "decrypt")
+
+ self.createPackages()
+
+
+ def decrypt(self, pyfile):
+ raise NotImplementedError
+
+
+ def generatePackages(self):
+ """ generate new packages from self.urls """
+
+ packages = map(lambda name, links: (name, links, None), self.core.api.generatePackages(self.urls).iteritems())
+ self.packages.extend(packages)
+
+
+ def createPackages(self):
+ """ create new packages from self.packages """
+
+ package_folder = self.pyfile.package().folder
+ package_password = self.pyfile.package().password
+ package_queue = self.pyfile.package().queue
+
+ folder_per_package = self.core.config['general']['folder_per_package']
+ try:
+ use_subfolder = self.getConfig('use_subfolder')
+ except:
+ use_subfolder = folder_per_package
+ try:
+ subfolder_per_package = self.getConfig('subfolder_per_package')
+ except:
+ subfolder_per_package = True
+
+ for pack in self.packages:
+ name, links, folder = pack
+
+ self.logDebug("Parsed package: %s" % name,
+ "%d links" % len(links),
+ "Saved to folder: %s" % folder if folder else "Saved to download folder")
+
+ links = map(decode, links)
+
+ pid = self.core.api.addPackage(name, links, package_queue)
+
+ if package_password:
+ self.core.api.setPackageData(pid, {"password": package_password})
+
+ setFolder = lambda x: self.core.api.setPackageData(pid, {"folder": x or ""}) #: Workaround to do not break API addPackage method
+
+ if use_subfolder:
+ if not subfolder_per_package:
+ setFolder(package_folder)
+ self.logDebug("Set package %(name)s folder to: %(folder)s" % {"name": name, "folder": folder})
+
+ elif not folder_per_package or name != folder:
+ if not folder:
+ folder = urlparse(html_unescape(name)).path.split("/")[-1]
+
+ setFolder(folder)
+ self.logDebug("Set package %(name)s folder to: %(folder)s" % {"name": name, "folder": folder})
+
+ elif folder_per_package:
+ setFolder(None)
diff --git a/pyload/plugins/internal/DeadCrypter.py b/pyload/plugins/internal/DeadCrypter.py
new file mode 100644
index 000000000..bf150f3d5
--- /dev/null
+++ b/pyload/plugins/internal/DeadCrypter.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Crypter import Crypter as _Crypter
+
+
+class DeadCrypter(_Crypter):
+ __name__ = "DeadCrypter"
+ __type__ = "crypter"
+ __version__ = "0.02"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Crypter is no longer available"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it")]
+
+
+ def setup(self):
+ self.offline("Crypter is no longer available")
diff --git a/pyload/plugins/internal/DeadHoster.py b/pyload/plugins/internal/DeadHoster.py
new file mode 100644
index 000000000..036ed3cb6
--- /dev/null
+++ b/pyload/plugins/internal/DeadHoster.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.Hoster import Hoster as _Hoster
+
+
+def create_getInfo(plugin):
+
+ def getInfo(urls):
+ yield map(lambda url: ('#N/A: ' + url, 0, 1, url), urls)
+
+ return getInfo
+
+
+class DeadHoster(_Hoster):
+ __name__ = "DeadHoster"
+ __type__ = "hoster"
+ __version__ = "0.12"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Hoster is no longer available"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
+
+
+ def setup(self):
+ self.offline("Hoster is no longer available")
diff --git a/pyload/plugins/internal/Hoster.py b/pyload/plugins/internal/Hoster.py
new file mode 100644
index 000000000..ea225262e
--- /dev/null
+++ b/pyload/plugins/internal/Hoster.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.Plugin import Plugin
+
+
+def getInfo(self):
+ #result = [ .. (name, size, status, url) .. ]
+ return
+
+
+class Hoster(Plugin):
+ __name__ = "Hoster"
+ __type__ = "hoster"
+ __version__ = "0.02"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [] #: [("name", "type", "desc", "default")]
+
+ __description__ = """Base hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("mkaay", "mkaay@mkaay.de")]
diff --git a/pyload/plugins/internal/MultiHoster.py b/pyload/plugins/internal/MultiHoster.py
new file mode 100644
index 000000000..4eb4a6f31
--- /dev/null
+++ b/pyload/plugins/internal/MultiHoster.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from pyload.plugins.internal.Addon import Addon
+from pyload.utils import remove_chars
+
+
+class MultiHoster(Addon):
+ __name__ = "MultiHoster"
+ __type__ = "addon"
+ __version__ = "0.20"
+
+ __description__ = """Base multi-hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ interval = 24 * 60 * 60 #: reload hosters daily
+
+ HOSTER_REPLACEMENTS = [("2shared.com", "twoshared.com"), ("4shared.com", "fourshared.com"), ("cloudnator.com", "shragle.com"),
+ ("ifile.it", "filecloud.io"), ("easy-share.com", "crocko.com"), ("freakshare.net", "freakshare.com"),
+ ("hellshare.com", "hellshare.cz"), ("share-rapid.cz", "sharerapid.com"), ("sharerapid.cz", "sharerapid.com"),
+ ("ul.to", "uploaded.to"), ("uploaded.net", "uploaded.to"), ("1fichier.com", "onefichier.com")]
+ HOSTER_EXCLUDED = []
+
+
+ def setup(self):
+ self.hosters = []
+ self.supported = []
+ self.new_supported = []
+
+
+ def getConfig(self, option, default=''):
+ """getConfig with default value - subclass may not implements all config options"""
+ try:
+ # Fixed loop due to getConf deprecation in 0.4.10
+ return super(MultiHoster, self).getConfig(option)
+ except KeyError:
+ return default
+
+
+ def getHosterCached(self):
+ if not self.hosters:
+ try:
+ hosterSet = self.toHosterSet(self.getHoster()) - set(self.HOSTER_EXCLUDED)
+ except Exception, e:
+ self.logError(e)
+ return []
+
+ try:
+ configMode = self.getConfig('hosterListMode', 'all')
+ if configMode in ("listed", "unlisted"):
+ configSet = self.toHosterSet(self.getConfig('hosterList', '').replace('|', ',').replace(';', ',').split(','))
+
+ if configMode == "listed":
+ hosterSet &= configSet
+ else:
+ hosterSet -= configSet
+
+ except Exception, e:
+ self.logError(e)
+
+ self.hosters = list(hosterSet)
+
+ return self.hosters
+
+
+ def toHosterSet(self, hosters):
+ hosters = set((str(x).strip().lower() for x in hosters))
+
+ for rep in self.HOSTER_REPLACEMENTS:
+ if rep[0] in hosters:
+ hosters.remove(rep[0])
+ hosters.add(rep[1])
+
+ hosters.discard('')
+ return hosters
+
+
+ def getHoster(self):
+ """Load list of supported hoster
+
+ :return: List of domain names
+ """
+ raise NotImplementedError
+
+
+ def coreReady(self):
+ if self.cb:
+ self.core.scheduler.removeJob(self.cb)
+
+ self.setConfig("activated", True) #: config not in sync after plugin reload
+
+ cfg_interval = self.getConfig("interval", None) #: reload interval in hours
+ if cfg_interval is not None:
+ self.interval = cfg_interval * 60 * 60
+
+ if self.interval:
+ self._periodical()
+ else:
+ self.periodical()
+
+
+ def initPeriodical(self):
+ pass
+
+
+ def periodical(self):
+ """reload hoster list periodically"""
+ self.logInfo(_("Reloading supported hoster list"))
+
+ old_supported = self.supported
+ self.supported, self.new_supported, self.hosters = [], [], []
+
+ self.overridePlugins()
+
+ old_supported = [hoster for hoster in old_supported if hoster not in self.supported]
+ if old_supported:
+ self.logDebug("UNLOAD", ", ".join(old_supported))
+ for hoster in old_supported:
+ self.unloadHoster(hoster)
+
+
+ def overridePlugins(self):
+ pluginMap = {}
+ for name in self.core.pluginManager.hosterPlugins.keys():
+ pluginMap[name.lower()] = name
+
+ accountList = [name.lower() for name, data in self.core.accountManager.accounts.iteritems() if data]
+ excludedList = []
+
+ for hoster in self.getHosterCached():
+ name = remove_chars(hoster.lower(), "-.")
+
+ if name in accountList:
+ excludedList.append(hoster)
+ else:
+ if name in pluginMap:
+ self.supported.append(pluginMap[name])
+ else:
+ self.new_supported.append(hoster)
+
+ if not self.supported and not self.new_supported:
+ self.logError(_("No Hoster loaded"))
+ return
+
+ module = self.core.pluginManager.getPlugin(self.__type__, self.__name__)
+ klass = getattr(module, self.__name__)
+
+ # inject plugin plugin
+ self.logDebug("Overwritten Hosters", ", ".join(sorted(self.supported)))
+ for hoster in self.supported:
+ dict = self.core.pluginManager.hosterPlugins[hoster]
+ dict['new_module'] = module
+ dict['new_name'] = self.__name__
+
+ if excludedList:
+ self.logInfo(_("The following hosters were not overwritten - account exists"), ", ".join(sorted(excludedList)))
+
+ if self.new_supported:
+ self.logDebug("New Hosters", ", ".join(sorted(self.new_supported)))
+
+ # create new regexp
+ regexp = r'.*(%s).*' % "|".join([x.replace(".", "\\.") for x in self.new_supported])
+ if hasattr(klass, "__pattern__") and isinstance(klass.__pattern__, basestring) and '://' in klass.__pattern__:
+ regexp = r'%s|%s' % (klass.__pattern__, regexp)
+
+ self.logDebug("Regexp", regexp)
+
+ dict = self.core.pluginManager.hosterPlugins[self.__name__]
+ dict['pattern'] = regexp
+ dict['re'] = re.compile(regexp)
+
+
+ def unloadHoster(self, hoster):
+ dict = self.core.pluginManager.hosterPlugins[hoster]
+ if "module" in dict:
+ del dict['module']
+
+ if "new_module" in dict:
+ del dict['new_module']
+ del dict['new_name']
+
+
+ def unload(self):
+ """Remove override for all hosters. Scheduler job is removed by AddonManager"""
+ for hoster in self.supported:
+ self.unloadHoster(hoster)
+
+ # reset pattern
+ klass = getattr(self.core.pluginManager.getPlugin(self.__type__, self.__name__), self.__name__)
+ dict = self.core.pluginManager.hosterPlugins[self.__name__]
+ dict['pattern'] = getattr(klass, "__pattern__", r'^unmatchable$')
+ dict['re'] = re.compile(dict['pattern'])
+
+
+ def downloadFailed(self, pyfile):
+ """remove plugin override if download fails but not if file is offline/temp.offline"""
+ if pyfile.hasStatus("failed") and self.getConfig("unloadFailing", True):
+ hdict = self.core.pluginManager.hosterPlugins[pyfile.pluginname]
+ if "new_name" in hdict and hdict['new_name'] == self.__name__:
+ self.logDebug("Unload MultiHoster", pyfile.pluginname, hdict)
+ self.unloadHoster(pyfile.pluginname)
+ pyfile.setStatus("queued")
diff --git a/pyload/plugins/internal/OCR.py b/pyload/plugins/internal/OCR.py
new file mode 100644
index 000000000..dec9f28b7
--- /dev/null
+++ b/pyload/plugins/internal/OCR.py
@@ -0,0 +1,314 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+
+try:
+ from PIL import Image, GifImagePlugin, JpegImagePlugin, PngImagePlugin, TiffImagePlugin
+except ImportError:
+ import Image, GifImagePlugin, JpegImagePlugin, PngImagePlugin, TiffImagePlugin
+
+import logging
+import subprocess
+
+from os.path import abspath, join
+
+
+class OCR(object):
+ __name__ = "OCR"
+ __type__ = "ocr"
+ __version__ = "0.01"
+
+ __description__ = """Base OCR plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ def __init__(self):
+ self.logger = logging.getLogger("log")
+
+
+ def load_image(self, image):
+ self.image = Image.open(image)
+ self.pixels = self.image.load()
+ self.result_captcha = ''
+
+
+ def unload(self):
+ """delete all tmp images"""
+ pass
+
+
+ def threshold(self, value):
+ self.image = self.image.point(lambda a: a * value + 10)
+
+
+ def run(self, command):
+ """Run a command"""
+
+ popen = subprocess.Popen(command, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ popen.wait()
+ output = popen.stdout.read() + " | " + popen.stderr.read()
+ popen.stdout.close()
+ popen.stderr.close()
+ self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
+
+
+ def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
+ #tmpTif = tempfile.NamedTemporaryFile(suffix=".tif")
+ try:
+ tmpTif = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
+ tmpTif.close()
+
+ #tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
+ tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
+ tmpTxt.close()
+
+ except IOError, e:
+ self.logError(e)
+ return
+
+ self.logger.debug("save tiff")
+ self.image.save(tmpTif.name, 'TIFF')
+
+ if os.name == "nt":
+ tessparams = [join(pypath, "tesseract", "tesseract.exe")]
+ else:
+ tessparams = ['tesseract']
+
+ tessparams.extend([abspath(tmpTif.name), abspath(tmpTxt.name).replace(".txt", "")] )
+
+ if subset and (digits or lowercase or uppercase):
+ #tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
+ tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
+ tmpSub.write("tessedit_char_whitelist ")
+ if digits:
+ tmpSub.write("0123456789")
+ if lowercase:
+ tmpSub.write("abcdefghijklmnopqrstuvwxyz")
+ if uppercase:
+ tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ tmpSub.write("\n")
+ tessparams.append("nobatch")
+ tessparams.append(abspath(tmpSub.name))
+ tmpSub.close()
+
+ self.logger.debug("run tesseract")
+ self.run(tessparams)
+ self.logger.debug("read txt")
+
+ try:
+ with open(tmpTxt.name, 'r') as f:
+ self.result_captcha = f.read().replace("\n", "")
+ except:
+ self.result_captcha = ""
+
+ self.logger.debug(self.result_captcha)
+ try:
+ os.remove(tmpTif.name)
+ os.remove(tmpTxt.name)
+ if subset and (digits or lowercase or uppercase):
+ os.remove(tmpSub.name)
+ except:
+ pass
+
+
+ def get_captcha(self, name):
+ raise NotImplementedError
+
+
+ def to_greyscale(self):
+ if self.image.mode != 'L':
+ self.image = self.image.convert('L')
+
+ self.pixels = self.image.load()
+
+
+ def eval_black_white(self, limit):
+ self.pixels = self.image.load()
+ w, h = self.image.size
+ for x in xrange(w):
+ for y in xrange(h):
+ if self.pixels[x, y] > limit:
+ self.pixels[x, y] = 255
+ else:
+ self.pixels[x, y] = 0
+
+
+ def clean(self, allowed):
+ pixels = self.pixels
+
+ w, h = self.image.size
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 255:
+ continue
+ # No point in processing white pixels since we only want to remove black pixel
+ count = 0
+
+ try:
+ if pixels[x - 1, y - 1] != 255:
+ count += 1
+ if pixels[x - 1, y] != 255:
+ count += 1
+ if pixels[x - 1, y + 1] != 255:
+ count += 1
+ if pixels[x, y + 1] != 255:
+ count += 1
+ if pixels[x + 1, y + 1] != 255:
+ count += 1
+ if pixels[x + 1, y] != 255:
+ count += 1
+ if pixels[x + 1, y - 1] != 255:
+ count += 1
+ if pixels[x, y - 1] != 255:
+ count += 1
+ except:
+ pass
+
+ # not enough neighbors are dark pixels so mark this pixel
+ # to be changed to white
+ if count < allowed:
+ pixels[x, y] = 1
+
+ # second pass: this time set all 1's to 255 (white)
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 1:
+ pixels[x, y] = 255
+
+ self.pixels = pixels
+
+
+ def derotate_by_average(self):
+ """rotate by checking each angle and guess most suitable"""
+
+ w, h = self.image.size
+ pixels = self.pixels
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 155
+
+ highest = {}
+ counts = {}
+
+ for angle in xrange(-45, 45):
+
+ tmpimage = self.image.rotate(angle)
+
+ pixels = tmpimage.load()
+
+ w, h = self.image.size
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 255
+
+ count = {}
+
+ for x in xrange(w):
+ count[x] = 0
+ for y in xrange(h):
+ if pixels[x, y] == 155:
+ count[x] += 1
+
+ sum = 0
+ cnt = 0
+
+ for x in count.values():
+ if x != 0:
+ sum += x
+ cnt += 1
+
+ avg = sum / cnt
+ counts[angle] = cnt
+ highest[angle] = 0
+ for x in count.values():
+ if x > highest[angle]:
+ highest[angle] = x
+
+ highest[angle] = highest[angle] - avg
+
+ hkey = 0
+ hvalue = 0
+
+ for key, value in highest.iteritems():
+ if value > hvalue:
+ hkey = key
+ hvalue = value
+
+ self.image = self.image.rotate(hkey)
+ pixels = self.image.load()
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 255
+
+ if pixels[x, y] == 155:
+ pixels[x, y] = 0
+
+ self.pixels = pixels
+
+
+ def split_captcha_letters(self):
+ captcha = self.image
+ started = False
+ letters = []
+ width, height = captcha.size
+ bottomY, topY = 0, height
+ pixels = captcha.load()
+
+ for x in xrange(width):
+ black_pixel_in_col = False
+ for y in xrange(height):
+ if pixels[x, y] != 255:
+ if not started:
+ started = True
+ firstX = x
+ lastX = x
+
+ if y > bottomY:
+ bottomY = y
+ if y < topY:
+ topY = y
+ if x > lastX:
+ lastX = x
+
+ black_pixel_in_col = True
+
+ if black_pixel_in_col is False and started is True:
+ rect = (firstX, topY, lastX, bottomY)
+ new_captcha = captcha.crop(rect)
+
+ w, h = new_captcha.size
+ if w > 5 and h > 5:
+ letters.append(new_captcha)
+
+ started = False
+ bottomY, topY = 0, height
+
+ return letters
+
+
+ def correct(self, values, var=None):
+ if var:
+ result = var
+ else:
+ result = self.result_captcha
+
+ for key, item in values.iteritems():
+
+ if key.__class__ == str:
+ result = result.replace(key, item)
+ else:
+ for expr in key:
+ result = result.replace(expr, item)
+
+ if var:
+ return result
+ else:
+ self.result_captcha = result
diff --git a/pyload/plugins/internal/SimpleCrypter.py b/pyload/plugins/internal/SimpleCrypter.py
new file mode 100644
index 000000000..ead5cefba
--- /dev/null
+++ b/pyload/plugins/internal/SimpleCrypter.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from urlparse import urlparse
+
+from pyload.plugins.internal.Crypter import Crypter
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, replace_patterns, set_cookies
+from pyload.utils import fixup
+
+
+class SimpleCrypter(Crypter, SimpleHoster):
+ __name__ = "SimpleCrypter"
+ __type__ = "crypter"
+ __version__ = "0.31"
+
+ __pattern__ = r'^unmatchable$'
+ __config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config['general']['folder_per_package']
+ ("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
+
+ __description__ = """Simple decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it"),
+ ("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ """
+ Following patterns should be defined by each crypter:
+
+ LINK_PATTERN: group(1) must be a download link or a regex to catch more links
+ example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"'
+
+ NAME_PATTERN: (optional) folder name or webpage title
+ example: NAME_PATTERN = r'<title>Files of: (?P<N>[^<]+) folder</title>'
+
+ OFFLINE_PATTERN: (optional) Checks if the file is yet available online
+ example: OFFLINE_PATTERN = r'File (deleted|not found)'
+
+ TEMP_OFFLINE_PATTERN: (optional) Checks if the file is temporarily offline
+ example: TEMP_OFFLINE_PATTERN = r'Server maintainance'
+
+
+ You can override the getLinks method if you need a more sophisticated way to extract the links.
+
+
+ If the links are splitted on multiple pages you can define the PAGES_PATTERN regex:
+
+ PAGES_PATTERN: (optional) group(1) should be the number of overall pages containing the links
+ example: PAGES_PATTERN = r'Pages: (\d+)'
+
+ and its loadPage method:
+
+
+ def loadPage(self, page_n):
+ return the html of the page number page_n
+ """
+
+ LINK_PATTERN = None
+
+ NAME_REPLACEMENTS = [("&#?\w+;", fixup)]
+ URL_REPLACEMENTS = []
+
+ TEXT_ENCODING = False #: Set to True or encoding name if encoding in http header is not correct
+ COOKIES = True #: or False or list of tuples [(domain, name, value)]
+
+ LOGIN_ACCOUNT = False
+ LOGIN_PREMIUM = False
+
+
+ def prepare(self):
+ self.info = {}
+ self.links = []
+
+ if self.LOGIN_ACCOUNT and not self.account:
+ self.fail(_("Required account not found"))
+
+ if self.LOGIN_PREMIUM and not self.premium:
+ self.fail(_("Required premium account not found"))
+
+ self.req.setOption("timeout", 120)
+
+ if isinstance(self.COOKIES, list):
+ set_cookies(self.req.cj, self.COOKIES)
+
+ self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS)
+
+
+ def decrypt(self, pyfile):
+ self.prepare()
+
+ self.preload()
+
+ if self.html is None:
+ self.fail(_("No html retrieved"))
+
+ self.checkInfo()
+
+ self.links = self.getLinks()
+
+ if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'):
+ self.handleMultiPages()
+
+ self.logDebug("Package has %d links" % len(self.links))
+
+ if self.links:
+ self.packages = [(self.info['name'], self.links, self.info['folder'])]
+
+
+ def checkStatus(self):
+ status = self.info['status']
+
+ if status is 1:
+ self.offline()
+
+ elif status is 6:
+ self.tempOffline()
+
+
+ def checkNameSize(self):
+ name = self.info['name']
+ url = self.info['url']
+
+ if name and name != url:
+ self.pyfile.name = name
+ else:
+ self.pyfile.name = name = self.info['name'] = urlparse(name).path.split('/')[-1]
+
+ folder = self.info['folder'] = name
+
+ self.logDebug("File name: %s" % name,
+ "File folder: %s" % folder)
+
+
+ def getLinks(self):
+ """
+ Returns the links extracted from self.html
+ You should override this only if it's impossible to extract links using only the LINK_PATTERN.
+ """
+ return re.findall(self.LINK_PATTERN, self.html)
+
+
+ def handleMultiPages(self):
+ try:
+ m = re.search(self.PAGES_PATTERN, self.html)
+ pages = int(m.group(1))
+ except:
+ pages = 1
+
+ for p in xrange(2, pages + 1):
+ self.html = self.loadPage(p)
+ self.links += self.getLinks()
diff --git a/pyload/plugins/internal/SimpleHoster.py b/pyload/plugins/internal/SimpleHoster.py
new file mode 100644
index 000000000..4e9db7f73
--- /dev/null
+++ b/pyload/plugins/internal/SimpleHoster.py
@@ -0,0 +1,473 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import time
+from urlparse import urlparse
+
+from pycurl import FOLLOWLOCATION
+
+from pyload.datatype.PyFile import statusMap as _statusMap
+from pyload.network.CookieJar import CookieJar
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Hoster import Hoster
+from pyload.utils import fixup, parseFileSize
+
+
+#@TODO: Adapt and move to PyFile in 0.4.10
+statusMap = dict((v, k) for k, v in _statusMap.iteritems())
+
+
+def replace_patterns(string, ruleslist):
+ for r in ruleslist:
+ rf, rt = r
+ string = re.sub(rf, rt, string)
+ return string
+
+
+def set_cookies(cj, cookies):
+ for cookie in cookies:
+ if isinstance(cookie, tuple) and len(cookie) == 3:
+ domain, name, value = cookie
+ cj.setCookie(domain, name, value)
+
+
+def parseHtmlTagAttrValue(attr_name, tag):
+ m = re.search(r"%s\s*=\s*([\"']?)((?<=\")[^\"]+|(?<=')[^']+|[^>\s\"'][^>\s]*)\1" % attr_name, tag, re.I)
+ return m.group(2) if m else None
+
+
+def parseHtmlForm(attr_str, html, input_names=None):
+ for form in re.finditer(r"(?P<tag><form[^>]*%s[^>]*>)(?P<content>.*?)</?(form|body|html)[^>]*>" % attr_str,
+ html, re.S | re.I):
+ inputs = {}
+ action = parseHtmlTagAttrValue("action", form.group('tag'))
+ for inputtag in re.finditer(r'(<(input|textarea)[^>]*>)([^<]*(?=</\2)|)', form.group('content'), re.S | re.I):
+ name = parseHtmlTagAttrValue("name", inputtag.group(1))
+ if name:
+ value = parseHtmlTagAttrValue("value", inputtag.group(1))
+ if not value:
+ inputs[name] = inputtag.group(3) or ''
+ else:
+ inputs[name] = value
+
+ if isinstance(input_names, dict):
+ # check input attributes
+ for key, val in input_names.iteritems():
+ if key in inputs:
+ if isinstance(val, basestring) and inputs[key] == val:
+ continue
+ elif isinstance(val, tuple) and inputs[key] in val:
+ continue
+ elif hasattr(val, "search") and re.match(val, inputs[key]):
+ continue
+ break #: attibute value does not match
+ else:
+ break #: attibute name does not match
+ else:
+ return action, inputs #: passed attribute check
+ else:
+ # no attribute check
+ return action, inputs
+
+ return {}, None #: no matching form found
+
+
+#: Deprecated
+def parseFileInfo(plugin, url="", html=""):
+ info = plugin.getInfo(url, html)
+ return info['name'], info['size'], info['status'], info['url']
+
+
+#@TODO: Remove in 0.4.10
+def create_getInfo(plugin):
+ return lambda urls: [(info['name'], info['size'], info['status'], info['url']) for info in plugin.parseInfo(urls)]
+
+
+def timestamp():
+ return int(time() * 1000)
+
+
+#@TODO: Move to hoster class in 0.4.10
+def _getDirectLink(self, url):
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ html = self.load(url, ref=True, decode=True)
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ if self.getInfo(url, html)['status'] is not 2:
+ try:
+ return re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I).group(1).rstrip() #@TODO: Remove .rstrip() in 0.4.10
+ except:
+ pass
+
+
+class SimpleHoster(Hoster):
+ __name__ = "SimpleHoster"
+ __type__ = "hoster"
+ __version__ = "0.60"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """Simple hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ """
+ Info patterns should be defined by each hoster:
+
+ INFO_PATTERN: (optional) Name and Size of the file
+ example: INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>size_unit)'
+ or
+ NAME_PATTERN: (optional) Name that will be set for the file
+ example: NAME_PATTERN = r'(?P<N>file_name)'
+ SIZE_PATTERN: (optional) Size that will be checked for the file
+ example: SIZE_PATTERN = r'(?P<S>file_size) (?P<U>size_unit)'
+
+ OFFLINE_PATTERN: (optional) Check if the file is yet available online
+ example: OFFLINE_PATTERN = r'File (deleted|not found)'
+
+ TEMP_OFFLINE_PATTERN: (optional) Check if the file is temporarily offline
+ example: TEMP_OFFLINE_PATTERN = r'Server (maintenance|maintainance)'
+
+
+ Error handling patterns are all optional:
+
+ WAIT_PATTERN: (optional) Detect waiting time
+ example: WAIT_PATTERN = r''
+
+ PREMIUM_ONLY_PATTERN: (optional) Check if the file can be downloaded only with a premium account
+ example: PREMIUM_ONLY_PATTERN = r'Premium account required'
+
+ ERROR_PATTERN: (optional) Detect any error preventing download
+ example: ERROR_PATTERN = r''
+
+
+ Instead overriding handleFree and handlePremium methods you can define the following patterns for direct download:
+
+ LINK_FREE_PATTERN: (optional) group(1) should be the direct link for free download
+ example: LINK_FREE_PATTERN = r'<div class="link"><a href="(.+?)"'
+
+ LINK_PREMIUM_PATTERN: (optional) group(1) should be the direct link for premium download
+ example: LINK_PREMIUM_PATTERN = r'<div class="link"><a href="(.+?)"'
+ """
+
+ NAME_REPLACEMENTS = [("&#?\w+;", fixup)]
+ SIZE_REPLACEMENTS = []
+ URL_REPLACEMENTS = []
+
+ TEXT_ENCODING = False #: Set to True or encoding name if encoding value in http header is not correct
+ COOKIES = True #: or False or list of tuples [(domain, name, value)]
+ FORCE_CHECK_TRAFFIC = False #: Set to True to force checking traffic left for premium account
+ CHECK_DIRECT_LINK = None #: when None self-set to True if self.account else False
+ MULTI_HOSTER = False #: Set to True to leech other hoster link
+ CONTENT_DISPOSITION = False #: Set to True to replace file name with content-disposition value in http header
+
+
+ @classmethod
+ def parseInfo(cls, urls):
+ for url in urls:
+ url = replace_patterns(url, cls.URL_REPLACEMENTS)
+ yield cls.getInfo(url)
+
+
+ @classmethod
+ def getInfo(cls, url="", html=""):
+ info = {'name': url or _("Unknown"), 'size': 0, 'status': 3, 'url': url}
+
+ if not html:
+ if url:
+ html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING)
+ if isinstance(cls.TEXT_ENCODING, basestring):
+ html = unicode(html, cls.TEXT_ENCODING)
+ else:
+ return info
+
+ online = False
+
+ if hasattr(cls, "OFFLINE_PATTERN") and re.search(cls.OFFLINE_PATTERN, html):
+ info['status'] = 1
+
+ elif hasattr(cls, "TEMP_OFFLINE_PATTERN") and re.search(cls.TEMP_OFFLINE_PATTERN, html):
+ info['status'] = 6
+
+ else:
+ try:
+ info.update(re.match(cls.__pattern__, url).groupdict())
+ except:
+ pass
+
+ for pattern in ("INFO_PATTERN", "NAME_PATTERN", "SIZE_PATTERN"):
+ try:
+ attr = getattr(cls, pattern)
+ info.update(re.search(attr, html).groupdict())
+ except AttributeError:
+ continue
+ else:
+ online = True
+
+ if online:
+ info['status'] = 2
+
+ if 'N' in info:
+ info['name'] = replace_patterns(info['N'].strip(), cls.NAME_REPLACEMENTS)
+
+ if 'S' in info:
+ size = replace_patterns(info['S'] + info['U'] if 'U' in info else info['S'], cls.SIZE_REPLACEMENTS)
+ info['size'] = parseFileSize(size)
+
+ elif isinstance(info['size'], basestring):
+ unit = info['units'] if 'units' in info else None
+ info['size'] = parseFileSize(info['size'], unit)
+
+ return info
+
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+
+ def prepare(self):
+ self.info = {}
+ self.link = "" #@TODO: Move to hoster class in 0.4.10
+ self.directDL = False #@TODO: Move to hoster class in 0.4.10
+ self.multihost = False #@TODO: Move to hoster class in 0.4.10
+
+ self.req.setOption("timeout", 120)
+
+ if isinstance(self.COOKIES, list):
+ set_cookies(self.req.cj, self.COOKIES)
+
+ if (self.MULTI_HOSTER
+ and self.__pattern__ != self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
+ and re.match(self.__pattern__, self.pyfile.url) is None):
+
+ self.logInfo("Multi hoster detected")
+
+ if self.account:
+ self.multihost = True
+ return
+ else:
+ self.fail(_("Only registered or premium users can use url leech feature"))
+
+ if self.CHECK_DIRECT_LINK is None:
+ self.directDL = bool(self.account)
+
+ self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS)
+
+
+ def preload(self):
+ self.html = self.load(self.pyfile.url, cookies=bool(self.COOKIES), decode=not self.TEXT_ENCODING)
+
+ if isinstance(self.TEXT_ENCODING, basestring):
+ self.html = unicode(self.html, self.TEXT_ENCODING)
+
+
+ def process(self, pyfile):
+ self.prepare()
+
+ if self.multihost:
+ self.logDebug("Looking for leeched download link...")
+ self.handleMulti()
+
+ elif self.directDL:
+ self.logDebug("Looking for direct download link...")
+ self.handleDirect()
+
+ if not self.link:
+ self.preload()
+
+ if self.html is None:
+ self.fail(_("No html retrieved"))
+
+ self.checkErrors()
+
+ premium_only = 'error' in self.info and self.info['error'] == "premium-only"
+
+ info = self.getInfo(pyfile.url, self.html)
+ self._updateInfo(info)
+
+ self.checkNameSize()
+
+ #: Usually premium only pages doesn't show any file information
+ if not premium_only:
+ self.checkStatus()
+
+ if self.premium and (not self.FORCE_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.logDebug("Handled as premium download")
+ self.handlePremium()
+
+ elif premium_only:
+ self.fail(_("Link require a premium account to be handled"))
+
+ else:
+ self.logDebug("Handled as free download")
+ self.handleFree()
+
+ if self.link:
+ self.download(self.link, disposition=self.CONTENT_DISPOSITION)
+
+
+ def checkErrors(self):
+ if hasattr(self, 'WAIT_PATTERN'):
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait_time = sum([int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1}[u.lower()] for v, u in
+ re.findall(r'(\d+)\s*(hr|hour|min|sec)', m, re.I)])
+ self.wait(wait_time, False)
+ return
+
+ if hasattr(self, 'PREMIUM_ONLY_PATTERN'):
+ m = re.search(self.PREMIUM_ONLY_PATTERN, self.html)
+ if m:
+ self.info['error'] = "premium-only"
+ return
+
+ if hasattr(self, 'ERROR_PATTERN'):
+ m = re.search(self.ERROR_PATTERN, self.html)
+ if m:
+ e = self.info['error'] = m.group(1)
+ self.error(e)
+
+ self.info.pop('error', None)
+
+
+ def checkStatus(self):
+ status = self.info['status']
+
+ if status is 1:
+ self.offline()
+
+ elif status is 6:
+ self.tempOffline()
+
+ elif status is not 2:
+ self.logInfo(_("File status: %s") % statusMap[status],
+ _("File info: %s") % self.info)
+ self.error(_("No file info retrieved"))
+
+
+ def checkNameSize(self):
+ name = self.info['name']
+ size = self.info['size']
+ url = self.info['url']
+
+ if name and name != url:
+ self.pyfile.name = name
+ else:
+ self.pyfile.name = name = self.info['name'] = urlparse(name).path.split('/')[-1]
+
+ if size > 0:
+ self.pyfile.size = size
+ else:
+ size = "Unknown"
+
+ self.logDebug("File name: %s" % name,
+ "File size: %s" % size)
+
+
+ def checkInfo(self):
+ self.checkErrors()
+
+ self._updateInfo(self.getInfo(self.pyfile.url, self.html or ""))
+
+ self.checkNameSize()
+ self.checkStatus()
+
+
+ #: Deprecated
+ def getFileInfo(self):
+ return self.checkInfo()
+
+
+ def _updateInfo(self, info):
+ self.logDebug(_("File info (before update): %s") % self.info)
+ self.info.update(info)
+ self.logDebug(_("File info (after update): %s") % self.info)
+
+
+ def handleDirect(self):
+ self.link = _getDirectLink(self, self.pyfile.url)
+
+ if self.link:
+ self.logInfo(_("Direct download link detected"))
+
+ self._updateInfo(self.getInfo(self.pyfile.url))
+ self.checkNameSize()
+
+ else:
+ self.logDebug(_("Direct download link not found"))
+
+
+ def handleMulti(self): #: Multi-hoster handler
+ pass
+
+
+ def handleFree(self):
+ if not hasattr(self, 'LINK_FREE_PATTERN'):
+ self.fail(_("Free download not implemented"))
+
+ try:
+ m = re.search(self.LINK_FREE_PATTERN, self.html)
+ if m is None:
+ self.error(_("Free download link not found"))
+
+ self.link = m.group(1)
+
+ except Exception, e:
+ self.fail(str(e))
+
+
+ def handlePremium(self):
+ if not hasattr(self, 'LINK_PREMIUM_PATTERN'):
+ self.fail(_("Premium download not implemented"))
+
+ try:
+ m = re.search(self.LINK_PREMIUM_PATTERN, self.html)
+ if m is None:
+ self.error(_("Premium download link not found"))
+
+ self.link = m.group(1)
+
+ except Exception, e:
+ self.fail(str(e))
+
+
+ def longWait(self, wait_time=None, max_tries=3):
+ if wait_time and isinstance(wait_time, (int, long, float)):
+ time_str = "%dh %dm" % divmod(wait_time / 60, 60)
+ else:
+ wait_time = 900
+ time_str = _("(unknown time)")
+ max_tries = 100
+
+ self.logInfo(_("Download limit reached, reconnect or wait %s") % time_str)
+
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry(max_tries=max_tries, reason=_("Download limit reached"))
+
+
+ def parseHtmlForm(self, attr_str='', input_names=None):
+ return parseHtmlForm(attr_str, self.html, input_names)
+
+
+ def checkTrafficLeft(self):
+ traffic = self.account.getAccountInfo(self.user, True)['trafficleft']
+
+ if traffic is None:
+ return False
+ elif traffic == -1:
+ return True
+ else:
+ size = self.pyfile.size / 1024
+ self.logInfo(_("Filesize: %i KiB, Traffic left for user %s: %i KiB") % (size, self.user, traffic))
+ return size <= traffic
+
+
+ def error(self, reason="", type="parse"):
+ return super(SimpleHoster, self).error(self, reason, type)
diff --git a/pyload/plugins/internal/UnRar.py b/pyload/plugins/internal/UnRar.py
new file mode 100644
index 000000000..31a0d7642
--- /dev/null
+++ b/pyload/plugins/internal/UnRar.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+
+from glob import glob
+from os.path import basename, join
+from string import digits
+from subprocess import Popen, PIPE
+
+from pyload.plugins.internal.AbstractExtractor import AbtractExtractor, WrongPassword, ArchiveError, CRCError
+from pyload.utils import safe_join, decode
+
+
+def renice(pid, value):
+ if os.name != "nt" and value:
+ try:
+ Popen(["renice", str(value), str(pid)], stdout=PIPE, stderr=PIPE, bufsize=-1)
+ except:
+ print "Renice failed"
+
+
+class UnRar(AbtractExtractor):
+ __name__ = "UnRar"
+ __version__ = "0.18"
+
+ __description__ = """Rar extractor plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ CMD = "unrar"
+
+ # there are some more uncovered rar formats
+ re_version = re.compile(r"(UNRAR 5[\d.]+(.*?)freeware)")
+ re_splitfile = re.compile(r"(.*)\.part(\d+)\.rar$", re.I)
+ re_partfiles = re.compile(r".*\.(rar|r\d+)", re.I)
+ re_filelist = re.compile(r"(.+)\s+(\d+)\s+(\d+)\s+")
+ re_filelist5 = re.compile(r"(.+)\s+(\d+)\s+\d\d-\d\d-\d\d\s+\d\d:\d\d\s+(.+)")
+ re_wrongpwd = re.compile("(Corrupt file or wrong password|password incorrect)", re.I)
+
+
+ @staticmethod
+ def checkDeps():
+ if os.name == "nt":
+ UnRar.CMD = join(pypath, "UnRAR.exe")
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+ else:
+ try:
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+ except OSError:
+
+ # fallback to rar
+ UnRar.CMD = "rar"
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+
+ return True
+
+
+ @staticmethod
+ def getTargets(files_ids):
+ result = []
+
+ for file, id in files_ids:
+ if not file.endswith(".rar"):
+ continue
+
+ match = UnRar.re_splitfile.findall(file)
+ if match:
+ # only add first parts
+ if int(match[0][1]) == 1:
+ result.append((file, id))
+ else:
+ result.append((file, id))
+
+ return result
+
+
+ def init(self):
+ self.passwordProtected = False
+ self.headerProtected = False #: list files will not work without password
+ self.smallestFile = None #: small file to test passwords
+ self.password = "" #: save the correct password
+
+
+ def checkArchive(self):
+ p = self.call_unrar("l", "-v", self.file)
+ out, err = p.communicate()
+ if self.re_wrongpwd.search(err):
+ self.passwordProtected = True
+ self.headerProtected = True
+ return True
+
+ # output only used to check if passworded files are present
+ if self.re_version.search(out):
+ for attr, size, name in self.re_filelist5.findall(out):
+ if attr.startswith("*"):
+ self.passwordProtected = True
+ return True
+ else:
+ for name, size, packed in self.re_filelist.findall(out):
+ if name.startswith("*"):
+ self.passwordProtected = True
+ return True
+
+ self.listContent()
+ if not self.files:
+ raise ArchiveError("Empty Archive")
+
+ return False
+
+
+ def checkPassword(self, password):
+ # at this point we can only verify header protected files
+ if self.headerProtected:
+ p = self.call_unrar("l", "-v", self.file, password=password)
+ out, err = p.communicate()
+ if self.re_wrongpwd.search(err):
+ return False
+
+ return True
+
+
+ def extract(self, progress, password=None):
+ command = "x" if self.fullpath else "e"
+
+ p = self.call_unrar(command, self.file, self.out, password=password)
+ renice(p.pid, self.renice)
+
+ progress(0)
+ progressstring = ""
+ while True:
+ c = p.stdout.read(1)
+ # quit loop on eof
+ if not c:
+ break
+ # reading a percentage sign -> set progress and restart
+ if c == '%':
+ progress(int(progressstring))
+ progressstring = ""
+ # not reading a digit -> therefore restart
+ elif c not in digits:
+ progressstring = ""
+ # add digit to progressstring
+ else:
+ progressstring = progressstring + c
+ progress(100)
+
+ # retrieve stderr
+ err = p.stderr.read()
+
+ if "CRC failed" in err and not password and not self.passwordProtected:
+ raise CRCError
+ elif "CRC failed" in err:
+ raise WrongPassword
+ if err.strip(): #: raise error if anything is on stderr
+ raise ArchiveError(err.strip())
+ if p.returncode:
+ raise ArchiveError("Process terminated")
+
+ if not self.files:
+ self.password = password
+ self.listContent()
+
+
+ def getDeleteFiles(self):
+ if ".part" in basename(self.file):
+ return glob(re.sub("(?<=\.part)([01]+)", "*", self.file, re.I))
+ # get files which matches .r* and filter unsuited files out
+ parts = glob(re.sub(r"(?<=\.r)ar$", "*", self.file, re.I))
+ return filter(lambda x: self.re_partfiles.match(x), parts)
+
+
+ def listContent(self):
+ command = "vb" if self.fullpath else "lb"
+ p = self.call_unrar(command, "-v", self.file, password=self.password)
+ out, err = p.communicate()
+
+ if "Cannot open" in err:
+ raise ArchiveError("Cannot open file")
+
+ if err.strip(): #: only log error at this point
+ self.m.logError(err.strip())
+
+ result = set()
+
+ for f in decode(out).splitlines():
+ f = f.strip()
+ result.add(safe_join(self.out, f))
+
+ self.files = result
+
+
+ def call_unrar(self, command, *xargs, **kwargs):
+ args = []
+ # overwrite flag
+ args.append("-o+") if self.overwrite else args.append("-o-")
+
+ if self.excludefiles:
+ for word in self.excludefiles.split(';'):
+ args.append("-x%s" % word)
+
+ # assume yes on all queries
+ args.append("-y")
+
+ # set a password
+ if "password" in kwargs and kwargs['password']:
+ args.append("-p%s" % kwargs['password'])
+ else:
+ args.append("-p-")
+
+ # NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue
+ call = [self.CMD, command] + args + list(xargs)
+ self.m.logDebug(" ".join(call))
+
+ p = Popen(call, stdout=PIPE, stderr=PIPE)
+
+ return p
diff --git a/pyload/plugins/internal/UnZip.py b/pyload/plugins/internal/UnZip.py
new file mode 100644
index 000000000..413c0699e
--- /dev/null
+++ b/pyload/plugins/internal/UnZip.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+import sys
+import zipfile
+
+from pyload.plugins.internal.AbstractExtractor import AbtractExtractor
+
+
+class UnZip(AbtractExtractor):
+ __name__ = "UnZip"
+ __version__ = "0.1"
+
+ __description__ = """Zip extractor plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ @staticmethod
+ def checkDeps():
+ return sys.version_info[:2] >= (2, 6)
+
+
+ @staticmethod
+ def getTargets(files_ids):
+ result = []
+
+ for file, id in files_ids:
+ if file.endswith(".zip"):
+ result.append((file, id))
+
+ return result
+
+
+ def extract(self, progress, password=None):
+ z = zipfile.ZipFile(self.file)
+ self.files = z.namelist()
+ z.extractall(self.out)
+
+
+ def getDeleteFiles(self):
+ return [self.file]
diff --git a/pyload/plugins/internal/UpdateManager.py b/pyload/plugins/internal/UpdateManager.py
new file mode 100644
index 000000000..082721e2f
--- /dev/null
+++ b/pyload/plugins/internal/UpdateManager.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+
+import re
+import sys
+
+from operator import itemgetter
+from os import path, remove, stat
+
+from pyload.network.RequestFactory import getURL
+from pyload.plugins.internal.Addon import Expose, Addon, threaded
+from pyload.utils import safe_join
+
+
+class UpdateManager(Addon):
+ __name__ = "UpdateManager"
+ __type__ = "addon"
+ __version__ = "0.40"
+
+ __config__ = [("activated" , "bool" , "Activated" , True ),
+ ("mode" , "pyLoad + plugins;plugins only", "Check updates for" , "pyLoad + plugins"),
+ ("interval" , "int" , "Check interval in hours" , 8 ),
+ ("reloadplugins", "bool" , "Monitor plugins for code changes (debug mode only)", True ),
+ ("nodebugupdate", "bool" , "Don't check for updates in debug mode" , True )]
+
+ __description__ = """Check for updates"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ # event_list = ["pluginConfigChanged"]
+
+ SERVER_URL = "http://updatemanager.pyload.org"
+ MIN_INTERVAL = 6 * 60 * 60 #: 6h minimum check interval (value is in seconds)
+
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == "interval":
+ interval = value * 60 * 60
+ if self.MIN_INTERVAL <= interval != self.interval:
+ self.core.scheduler.removeJob(self.cb)
+ self.interval = interval
+ self.initPeriodical()
+ else:
+ self.logDebug("Invalid interval value, kept current")
+
+ elif name == "reloadplugins":
+ if self.cb2:
+ self.core.scheduler.removeJob(self.cb2)
+ if value is True and self.core.debug:
+ self.periodical2()
+
+
+ def coreReady(self):
+ self.pluginConfigChanged(self.__name__, "interval", self.getConfig("interval"))
+ x = lambda: self.pluginConfigChanged(self.__name__, "reloadplugins", self.getConfig("reloadplugins"))
+ self.core.scheduler.addJob(10, x, threaded=False)
+
+
+ def unload(self):
+ self.pluginConfigChanged(self.__name__, "reloadplugins", False)
+
+
+ def setup(self):
+ self.cb2 = None
+ self.interval = self.MIN_INTERVAL
+ self.updating = False
+ self.info = {'pyload': False, 'version': None, 'plugins': False}
+ self.mtimes = {} #: store modification time for each plugin
+
+
+ def periodical2(self):
+ if not self.updating:
+ self.autoreloadPlugins()
+
+ self.cb2 = self.core.scheduler.addJob(4, self.periodical2, threaded=False)
+
+
+ @Expose
+ def autoreloadPlugins(self):
+ """ reload and reindex all modified plugins """
+ modules = filter(
+ lambda m: m and (m.__name__.startswith("pyload.plugins.") or
+ m.__name__.startswith("userplugins.")) and
+ m.__name__.count(".") >= 2, sys.modules.itervalues()
+ )
+
+ reloads = []
+
+ for m in modules:
+ root, type, name = m.__name__.rsplit(".", 2)
+ id = (type, name)
+ if type in self.core.pluginManager.plugins:
+ f = m.__file__.replace(".pyc", ".py")
+ if not path.isfile(f):
+ continue
+
+ mtime = stat(f).st_mtime
+
+ if id not in self.mtimes:
+ self.mtimes[id] = mtime
+ elif self.mtimes[id] < mtime:
+ reloads.append(id)
+ self.mtimes[id] = mtime
+
+ return True if self.core.pluginManager.reloadPlugins(reloads) else False
+
+
+ def periodical(self):
+ if self.info['pyload'] or self.getConfig("nodebugupdate") and self.core.debug:
+ return
+
+ self.updateThread()
+
+
+ def server_request(self):
+ try:
+ return getURL(self.SERVER_URL, get={'v': self.core.api.getServerVersion()}).splitlines()
+ except:
+ self.logWarning(_("Unable to contact server to get updates"))
+
+
+ @threaded
+ def updateThread(self):
+ self.updating = True
+
+ status = self.update(onlyplugin=self.getConfig("mode") == "plugins only")
+
+ if status == 2:
+ self.core.api.restart()
+ else:
+ self.updating = False
+
+
+ @Expose
+ def updatePlugins(self):
+ """ simple wrapper for calling plugin update quickly """
+ return self.update(onlyplugin=True)
+
+
+ @Expose
+ def update(self, onlyplugin=False):
+ """ check for updates """
+ data = self.server_request()
+
+ if not data:
+ exitcode = 0
+
+ elif data[0] == "None":
+ self.logInfo(_("No new pyLoad version available"))
+ updates = data[1:]
+ exitcode = self._updatePlugins(updates)
+
+ elif onlyplugin:
+ exitcode = 0
+
+ else:
+ newversion = data[0]
+ self.logInfo(_("*** New pyLoad Version %s available ***") % newversion)
+ self.logInfo(_("*** Get it here: https://github.com/pyload/pyload/releases ***"))
+ exitcode = 3
+ self.info['pyload'] = True
+ self.info['version'] = newversion
+
+ return exitcode #: 0 = No plugins updated; 1 = Plugins updated; 2 = Plugins updated, but restart required; 3 = No plugins updated, new pyLoad version available
+
+
+ def _updatePlugins(self, updates):
+ """ check for plugin updates """
+
+ if self.info['plugins']:
+ return False #: plugins were already updated
+
+ exitcode = 0
+ updated = []
+
+ vre = re.compile(r'__version__.*=.*("|\')([\d.]+)')
+ url = updates[0]
+ schema = updates[1].split('|')
+
+ if "BLACKLIST" in updates:
+ blacklist = updates[updates.index('BLACKLIST') + 1:]
+ updates = updates[2:updates.index('BLACKLIST')]
+ else:
+ blacklist = None
+ updates = updates[2:]
+
+ upgradable = sorted(map(lambda x: dict(zip(schema, x.split('|'))), updates),
+ key=itemgetter("type", "name"))
+
+ for plugin in upgradable:
+ filename = plugin['name']
+ type = plugin['type']
+ version = plugin['version']
+
+ if filename.endswith(".pyc"):
+ name = filename[:filename.find("_")]
+ else:
+ name = filename.replace(".py", "")
+
+ plugins = getattr(self.core.pluginManager, "%sPlugins" % type)
+
+ oldver = float(plugins[name]['version']) if name in plugins else None
+ newver = float(version)
+
+ if not oldver:
+ msg = "New plugin: [%(type)s] %(name)s (v%(newver).2f)"
+ elif newver > oldver:
+ msg = "New version of plugin: [%(type)s] %(name)s (v%(oldver).2f -> v%(newver).2f)"
+ else:
+ continue
+
+ self.logInfo(_(msg) % {'type' : type,
+ 'name' : name,
+ 'oldver': oldver,
+ 'newver': newver})
+ try:
+ content = getURL(url % plugin)
+ m = vre.search(content)
+
+ if m and m.group(2) == version:
+ f = open(safe_join("userplugins", prefix, filename), "wb")
+ f.write(content)
+ f.close()
+ updated.append((prefix, name))
+ else:
+ raise Exception, _("Version mismatch")
+
+ except Exception, e:
+ self.logError(_("Error updating plugin %s") % filename, e)
+
+ if blacklist:
+ blacklisted = map(lambda x: (x.split('|')[0], x.split('|')[1].rsplit('.', 1)[0]), blacklist)
+
+ # Always protect internal plugins from removing
+ for i, n, t in blacklisted.enumerate():
+ if t == "internal":
+ del blacklisted[i]
+
+ blacklisted = sorted(blacklisted)
+ removed = self.removePlugins(blacklisted)
+ for t, n in removed:
+ self.logInfo(_("Removed blacklisted plugin [%(type)s] %(name)s") % {
+ 'type': t,
+ 'name': n,
+ })
+
+ if updated:
+ reloaded = self.core.pluginManager.reloadPlugins(updated)
+ if reloaded:
+ self.logInfo(_("Plugins updated and reloaded"))
+ exitcode = 1
+ else:
+ self.logInfo(_("*** Plugins have been updated, but need a pyLoad restart to be reloaded ***"))
+ self.info['plugins'] = True
+ exitcode = 2
+ else:
+ self.logInfo(_("No plugin updates available"))
+
+ return exitcode #: 0 = No plugins updated; 1 = Plugins updated; 2 = Plugins updated, but restart required
+
+
+ @Expose
+ def removePlugins(self, type_plugins):
+ """ delete plugins from disk """
+
+ if not type_plugins:
+ return
+
+ self.logDebug("Requested deletion of plugins: %s" % type_plugins)
+
+ removed = []
+
+ for type, name in type_plugins:
+ err = False
+ file = name + ".py"
+
+ for root in ("userplugins", path.join(pypath, "pyload", "plugins")):
+
+ filename = safe_join(root, type, file)
+ try:
+ remove(filename)
+ except Exception, e:
+ self.logDebug("Error deleting: %s" % path.basename(filename), e)
+ err = True
+
+ filename += "c"
+ if path.isfile(filename):
+ try:
+ if type == "addon":
+ self.manager.deactivateAddon(name)
+ remove(filename)
+ except Exception, e:
+ self.logDebug("Error deleting: %s" % path.basename(filename), e)
+ err = True
+
+ if not err:
+ id = (type, name)
+ removed.append(id)
+
+ return removed #: return a list of the plugins successfully removed
diff --git a/pyload/plugins/internal/XFSAccount.py b/pyload/plugins/internal/XFSAccount.py
new file mode 100644
index 000000000..1e18c09bd
--- /dev/null
+++ b/pyload/plugins/internal/XFSAccount.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from time import gmtime, mktime, strptime
+from urlparse import urljoin
+
+from pyload.plugins.internal.Account import Account
+from pyload.plugins.internal.SimpleHoster import parseHtmlForm, set_cookies
+
+
+class XFSAccount(Account):
+ __name__ = "XFSAccount"
+ __type__ = "account"
+ __version__ = "0.26"
+
+ __description__ = """XFileSharing account plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = None
+ HOSTER_URL = None
+
+ COOKIES = [(HOSTER_DOMAIN, "lang", "english")]
+
+ PREMIUM_PATTERN = r'\(Premium only\)'
+
+ VALID_UNTIL_PATTERN = r'>Premium.[Aa]ccount expire:.*?(\d{1,2} [\w^_]+ \d{4})'
+
+ TRAFFIC_LEFT_PATTERN = r'>Traffic available today:.*?<b>\s*(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>'
+ TRAFFIC_LEFT_UNIT = "MB" #: used only if no group <U> was found
+
+ LOGIN_FAIL_PATTERN = r'>(Incorrect Login or Password|Error<)'
+
+
+ def init(self):
+ # if not self.HOSTER_DOMAIN:
+ # self.fail(_("Missing HOSTER_DOMAIN"))
+
+ if not self.HOSTER_URL:
+ self.HOSTER_URL = "http://www.%s/" % self.HOSTER_DOMAIN
+
+
+ def loadAccountInfo(self, user, req):
+ validuntil = None
+ trafficleft = None
+ premium = None
+
+ html = req.load(self.HOSTER_URL, get={'op': "my_account"}, decode=True)
+
+ premium = True if re.search(self.PREMIUM_PATTERN, html) else False
+
+ m = re.search(self.VALID_UNTIL_PATTERN, html)
+ if m:
+ expiredate = m.group(1).strip()
+ self.logDebug("Expire date: " + expiredate)
+
+ try:
+ validuntil = mktime(strptime(expiredate, "%d %B %Y"))
+
+ except Exception, e:
+ self.logError(e)
+
+ else:
+ if validuntil > mktime(gmtime()):
+ premium = True
+ else:
+ premium = False
+ validuntil = None #: registered account type (not premium)
+
+ m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if m:
+ try:
+ traffic = m.groupdict()
+ size = traffic['S']
+
+ if "nlimited" in size:
+ trafficleft = -1
+ if validuntil is None:
+ validuntil = -1
+ else:
+ if 'U' in traffic:
+ unit = traffic['U']
+ elif isinstance(self.TRAFFIC_LEFT_UNIT, basestring):
+ unit = self.TRAFFIC_LEFT_UNIT
+ else:
+ unit = ""
+
+ trafficleft = self.parseTraffic(size + unit)
+
+ except Exception, e:
+ self.logError(e)
+ else:
+ if premium:
+ trafficleft = -1
+
+ return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
+
+
+ def login(self, user, data, req):
+ if isinstance(self.COOKIES, list):
+ set_cookies(req.cj, self.COOKIES)
+
+ url = urljoin(self.HOSTER_URL, "login.html")
+ html = req.load(url, decode=True)
+
+ action, inputs = parseHtmlForm('name="FL"', html)
+ if not inputs:
+ inputs = {'op': "login",
+ 'redirect': self.HOSTER_URL}
+
+ inputs.update({'login': user,
+ 'password': data['password']})
+
+ html = req.load(self.HOSTER_URL, post=inputs, decode=True)
+
+ if re.search(self.LOGIN_FAIL_PATTERN, html):
+ self.wrongPassword()
diff --git a/pyload/plugins/internal/XFSCrypter.py b/pyload/plugins/internal/XFSCrypter.py
new file mode 100644
index 000000000..2de39f4bc
--- /dev/null
+++ b/pyload/plugins/internal/XFSCrypter.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class XFSCrypter(SimpleCrypter):
+ __name__ = "XFSCrypter"
+ __type__ = "crypter"
+ __version__ = "0.04"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """XFileSharing decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = None
+ HOSTER_NAME = None
+
+ URL_REPLACEMENTS = [(r'&?per_page=\d+', ""), (r'[?/&]+$', ""), (r'(.+/[^?]+)$', r'\1?'), (r'$', r'&per_page=10000')]
+
+ COOKIES = [(HOSTER_DOMAIN, "lang", "english")]
+
+ LINK_PATTERN = r'<(?:td|TD).*?>\s*<a href="(.+?)".*?>.+?(?:</a>)?\s*</(?:td|TD)>'
+ NAME_PATTERN = r'<[tT]itle>.*?\: (?P<N>.+) folder</[tT]itle>'
+
+ OFFLINE_PATTERN = r'>\s*\w+ (Not Found|file (was|has been) removed)'
+ TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)'
diff --git a/pyload/plugins/internal/XFSHoster.py b/pyload/plugins/internal/XFSHoster.py
new file mode 100644
index 000000000..3ae9ee05a
--- /dev/null
+++ b/pyload/plugins/internal/XFSHoster.py
@@ -0,0 +1,344 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from random import random
+from time import sleep
+
+from pycurl import FOLLOWLOCATION, LOW_SPEED_TIME
+
+from pyload.plugins.hoster.UnrestrictLi import secondsToMidnight
+from pyload.plugins.internal.CaptchaService import ReCaptcha, SolveMedia
+from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pyload.utils import html_unescape
+
+
+class XFSHoster(SimpleHoster):
+ __name__ = "XFSHoster"
+ __type__ = "hoster"
+ __version__ = "0.22"
+
+ __pattern__ = r'^unmatchable$'
+
+ __description__ = """XFileSharing hoster plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
+ ("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
+
+
+ HOSTER_DOMAIN = None
+ HOSTER_NAME = None
+
+ URL_REPLACEMENTS = [(r'/(?:embed-)?(\w{12}).*', r'/\1')] #: plus support embedded files
+
+ TEXT_ENCODING = False
+ COOKIES = [(HOSTER_DOMAIN, "lang", "english")]
+ CHECK_DIRECT_LINK = None
+ MULTI_HOSTER = False
+
+ INFO_PATTERN = r'<tr><td align=right><b>Filename:</b></td><td nowrap>(?P<N>[^<]+)</td></tr>\s*.*?<small>\((?P<S>[^<]+)\)</small>'
+ NAME_PATTERN = r'(>Filename:</b></td><td nowrap>|name="fname" value="|<span class="name">|<[Tt]itle>.*?Download )(?P<N>.+?)(\s*<|")'
+ SIZE_PATTERN = r'(>Size:</b></td><td>|>File:.*>|<span class="size">)(?P<S>[\d.,]+)\s*(?P<U>[\w^_]+)'
+
+ OFFLINE_PATTERN = r'>\s*\w+ (Not Found|file (was|has been) removed)'
+ TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)'
+
+ WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>|id="countdown" value=".*?(\d+).*?"'
+ PREMIUM_ONLY_PATTERN = r'>This file is available for Premium Users only'
+ ERROR_PATTERN = r'(?:class=["\']err["\'].*?>|<[Cc]enter><b>|>Error</td>|>\(ERROR:)(?:\s*<.+?>\s*)*(.+?)(?:["\']|<|\))'
+
+ OVR_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
+ LINK_PATTERN = None #: final download url pattern
+
+ CAPTCHA_PATTERN = r'(https?://[^"\']+?/captchas?/[^"\']+)'
+ CAPTCHA_DIV_PATTERN = r'>Enter code.*?<div.*?>(.+?)</div>'
+ RECAPTCHA_PATTERN = None
+ SOLVEMEDIA_PATTERN = None
+
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.resumeDownload = self.multiDL = self.premium
+
+
+ def prepare(self):
+ """ Initialize important variables """
+ if not self.HOSTER_DOMAIN:
+ self.fail(_("Missing HOSTER_DOMAIN"))
+
+ if not self.HOSTER_NAME:
+ self.HOSTER_NAME = "".join([str.capitalize() for str in self.HOSTER_DOMAIN.split('.')])
+
+ if not self.LINK_PATTERN:
+ pattern = r'(https?://(www\.)?([^/]*?%s|\d+\.\d+\.\d+\.\d+)(\:\d+)?(/d/|(/files)?/\d+/\w+/).+?)["\'<]'
+ self.LINK_PATTERN = pattern % self.HOSTER_DOMAIN.replace('.', '\.')
+
+ self.captcha = None
+ self.errmsg = None
+ self.passwords = self.getPassword().splitlines()
+
+ super(XFSHoster, self).prepare()
+
+ if self.CHECK_DIRECT_LINK is None:
+ self.directDL = bool(self.premium)
+
+
+ def handleFree(self):
+ link = self.getDownloadLink()
+
+ if link:
+ if self.captcha:
+ self.correctCaptcha()
+
+ self.download(link, ref=True, cookies=True, disposition=True)
+
+ elif self.errmsg:
+ if 'captcha' in self.errmsg:
+ self.fail(_("No valid captcha code entered"))
+ else:
+ self.fail(self.errmsg)
+
+ else:
+ self.fail(_("Download link not found"))
+
+
+ def handlePremium(self):
+ return self.handleFree()
+
+
+ def getDownloadLink(self):
+ for i in xrange(1, 5):
+ self.logDebug("Getting download link: #%d" % i)
+
+ self.checkErrors()
+
+ m = re.search(self.LINK_PATTERN, self.html, re.S)
+ if m:
+ break
+
+ data = self.getPostParameters()
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ m = re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I)
+ if m and not "op=" in m.group(1):
+ break
+
+ m = re.search(self.LINK_PATTERN, self.html, re.S)
+ if m:
+ break
+ else:
+ self.logError(data['op'] if 'op' in data else _("UNKNOWN"))
+ return ""
+
+ self.errmsg = None
+
+ return m.group(1)
+
+
+ def handleMulti(self):
+ #only tested with easybytez.com
+ self.html = self.load("http://www.%s/" % self.HOSTER_DOMAIN)
+
+ action, inputs = self.parseHtmlForm('')
+
+ upload_id = "%012d" % int(random() * 10 ** 12)
+ action += upload_id + "&js_on=1&utype=prem&upload_type=url"
+
+ inputs['tos'] = '1'
+ inputs['url_mass'] = self.pyfile.url
+ inputs['up1oad_type'] = 'url'
+
+ self.logDebug(action, inputs)
+
+ self.req.setOption("timeout", 600) #: wait for file to upload to easybytez.com
+
+ self.html = self.load(action, post=inputs)
+
+ self.checkErrors()
+
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry(reason=self.errmsg)
+ else:
+ self.error(_("TEXTAREA F1 not found"))
+
+ self.logDebug(inputs)
+
+ stmsg = inputs['st']
+
+ if stmsg == 'OK':
+ self.html = self.load(action, post=inputs)
+
+ elif 'Can not leech file' in stmsg:
+ self.retry(20, 3 * 60, _("Can not leech file"))
+
+ elif 'today' in stmsg:
+ self.retry(wait_time=secondsToMidnight(gmt=2), reason=_("You've used all Leech traffic today"))
+
+ else:
+ self.fail(stmsg)
+
+ #get easybytez.com link for uploaded file
+ m = re.search(self.OVR_LINK_PATTERN, self.html)
+ if m is None:
+ self.error(_("OVR_LINK_PATTERN not found"))
+
+ header = self.load(m.group(1), just_header=True, decode=True)
+
+ if 'location' in header: #: Direct download link
+ self.link = header['location']
+ else:
+ self.fail(_("Download link not found"))
+
+
+ def checkErrors(self):
+ m = re.search(self.PREMIUM_ONLY_PATTERN, self.html)
+ if m:
+ self.info['error'] = "premium-only"
+ return
+
+ m = re.search(self.ERROR_PATTERN, self.html)
+
+ if m is None:
+ self.errmsg = None
+ else:
+ self.errmsg = m.group(1).strip()
+
+ self.logWarning(re.sub(r"<.*?>", " ", self.errmsg))
+
+ if 'wait' in self.errmsg:
+ wait_time = sum([int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1}[u.lower()] for v, u in
+ re.findall(r'(\d+)\s*(hr|hour|min|sec)', self.errmsg, re.I)])
+ self.wait(wait_time, True)
+
+ elif 'country' in self.errmsg:
+ self.fail(_("Downloads are disabled for your country"))
+
+ elif 'captcha' in self.errmsg:
+ self.invalidCaptcha()
+
+ elif 'premium' in self.errmsg and 'require' in self.errmsg:
+ self.fail(_("File can be downloaded by premium users only"))
+
+ elif 'limit' in self.errmsg:
+ if 'days' in self.errmsg:
+ delay = secondsToMidnight(gmt=2)
+ retries = 3
+ else:
+ delay = 1 * 60 * 60
+ retries = 25
+
+ self.wait(delay, True)
+ self.retry(retries, reason=_("Download limit exceeded"))
+
+ elif 'countdown' in self.errmsg or 'Expired' in self.errmsg:
+ self.retry(reason=_("Link expired"))
+
+ elif 'maintenance' in self.errmsg or 'maintainance' in self.errmsg:
+ self.tempOffline()
+
+ elif 'download files up to' in self.errmsg:
+ self.fail(_("File too large for free download"))
+
+ else:
+ self.retry(wait_time=60, reason=self.errmsg)
+
+ if self.errmsg:
+ self.info['error'] = self.errmsg
+ else:
+ self.info.pop('error', None)
+
+ return self.errmsg
+
+
+ def getPostParameters(self):
+ if hasattr(self, "FORM_PATTERN"):
+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
+ else:
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+
+ if not inputs:
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry(reason=self.errmsg)
+ else:
+ self.error(_("TEXTAREA F1 not found"))
+
+ self.logDebug(inputs)
+
+ if 'op' in inputs:
+ if "password" in inputs:
+ if self.passwords:
+ inputs['password'] = self.passwords.pop(0)
+ else:
+ self.fail(_("Missing password"))
+
+ if not self.premium:
+ m = re.search(self.WAIT_PATTERN, self.html)
+ if m:
+ wait_time = int(m.group(1))
+ self.setWait(wait_time, False)
+
+ self.captcha = self.handleCaptcha(inputs)
+
+ self.wait()
+ else:
+ inputs['referer'] = self.pyfile.url
+
+ if self.premium:
+ inputs['method_premium'] = "Premium Download"
+ inputs.pop('method_free', None)
+ else:
+ inputs['method_free'] = "Free Download"
+ inputs.pop('method_premium', None)
+
+ return inputs
+
+
+ def handleCaptcha(self, inputs):
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+ if m:
+ captcha_url = m.group(1)
+ inputs['code'] = self.decryptCaptcha(captcha_url)
+ return 1
+
+ m = re.search(self.CAPTCHA_DIV_PATTERN, self.html, re.S)
+ if m:
+ captcha_div = m.group(1)
+ self.logDebug(captcha_div)
+ numerals = re.findall(r'<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
+ inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))])
+ self.logDebug("Captcha code: %s" % inputs['code'], numerals)
+ return 2
+
+ recaptcha = ReCaptcha(self)
+ try:
+ captcha_key = re.search(self.RECAPTCHA_PATTERN, self.html).group(1)
+ except:
+ captcha_key = recaptcha.detect_key()
+
+ if captcha_key:
+ self.logDebug("ReCaptcha key: %s" % captcha_key)
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key)
+ return 3
+
+ solvemedia = SolveMedia(self)
+ try:
+ captcha_key = re.search(self.SOLVEMEDIA_PATTERN, self.html).group(1)
+ except:
+ captcha_key = solvemedia.detect_key()
+
+ if captcha_key:
+ self.logDebug("SolveMedia key: %s" % captcha_key)
+ inputs['adcopy_challenge'], inputs['adcopy_response'] = solvemedia.challenge(captcha_key)
+ return 4
+
+ return 0
diff --git a/pyload/plugins/internal/__init__.py b/pyload/plugins/internal/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/internal/__init__.py
diff --git a/pyload/plugins/ocr/GigasizeCom.py b/pyload/plugins/ocr/GigasizeCom.py
new file mode 100644
index 000000000..e1f6e8753
--- /dev/null
+++ b/pyload/plugins/ocr/GigasizeCom.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.OCR import OCR
+
+
+class GigasizeCom(OCR):
+ __name__ = "GigasizeCom"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Gigasize.com ocr plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ def __init__(self):
+ OCR.__init__(self)
+
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.threshold(2.8)
+ self.run_tesser(True, False, False, True)
+ return self.result_captcha
diff --git a/pyload/plugins/ocr/LinksaveIn.py b/pyload/plugins/ocr/LinksaveIn.py
new file mode 100644
index 000000000..a9171ac7c
--- /dev/null
+++ b/pyload/plugins/ocr/LinksaveIn.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+
+try:
+ from PIL import Image
+except ImportError:
+ import Image
+
+from glob import glob
+from os import sep
+from os.path import abspath, dirname
+
+from pyload.plugins.internal.OCR import OCR
+
+
+class LinksaveIn(OCR):
+ __name__ = "LinksaveIn"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Linksave.in ocr plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ def __init__(self):
+ OCR.__init__(self)
+ self.data_dir = dirname(abspath(__file__)) + sep + "LinksaveIn" + sep
+
+
+ def load_image(self, image):
+ im = Image.open(image)
+ frame_nr = 0
+
+ lut = im.resize((256, 1))
+ lut.putdata(range(256))
+ lut = list(lut.convert("RGB").getdata())
+
+ new = Image.new("RGB", im.size)
+ npix = new.load()
+ while True:
+ try:
+ im.seek(frame_nr)
+ except EOFError:
+ break
+ frame = im.copy()
+ pix = frame.load()
+ for x in xrange(frame.size[0]):
+ for y in xrange(frame.size[1]):
+ if lut[pix[x, y]] != (0,0,0):
+ npix[x, y] = lut[pix[x, y]]
+ frame_nr += 1
+ new.save(self.data_dir+"unblacked.png")
+ self.image = new.copy()
+ self.pixels = self.image.load()
+ self.result_captcha = ''
+
+
+ def get_bg(self):
+ stat = {}
+ cstat = {}
+ img = self.image.convert("P")
+ for bgpath in glob(self.data_dir+"bg/*.gif"):
+ stat[bgpath] = 0
+ bg = Image.open(bgpath)
+
+ bglut = bg.resize((256, 1))
+ bglut.putdata(range(256))
+ bglut = list(bglut.convert("RGB").getdata())
+
+ lut = img.resize((256, 1))
+ lut.putdata(range(256))
+ lut = list(lut.convert("RGB").getdata())
+
+ bgpix = bg.load()
+ pix = img.load()
+ for x in xrange(bg.size[0]):
+ for y in xrange(bg.size[1]):
+ rgb_bg = bglut[bgpix[x, y]]
+ rgb_c = lut[pix[x, y]]
+ try:
+ cstat[rgb_c] += 1
+ except:
+ cstat[rgb_c] = 1
+ if rgb_bg == rgb_c:
+ stat[bgpath] += 1
+ max_p = 0
+ bg = ""
+ for bgpath, value in stat.iteritems():
+ if max_p < value:
+ bg = bgpath
+ max_p = value
+ return bg
+
+
+ def substract_bg(self, bgpath):
+ bg = Image.open(bgpath)
+ img = self.image.convert("P")
+
+ bglut = bg.resize((256, 1))
+ bglut.putdata(range(256))
+ bglut = list(bglut.convert("RGB").getdata())
+
+ lut = img.resize((256, 1))
+ lut.putdata(range(256))
+ lut = list(lut.convert("RGB").getdata())
+
+ bgpix = bg.load()
+ pix = img.load()
+ orgpix = self.image.load()
+ for x in xrange(bg.size[0]):
+ for y in xrange(bg.size[1]):
+ rgb_bg = bglut[bgpix[x, y]]
+ rgb_c = lut[pix[x, y]]
+ if rgb_c == rgb_bg:
+ orgpix[x, y] = (255,255,255)
+
+
+ def eval_black_white(self):
+ new = Image.new("RGB", (140, 75))
+ pix = new.load()
+ orgpix = self.image.load()
+ thresh = 4
+ for x in xrange(new.size[0]):
+ for y in xrange(new.size[1]):
+ rgb = orgpix[x, y]
+ r, g, b = rgb
+ pix[x, y] = (255,255,255)
+ if r > max(b, g)+thresh:
+ pix[x, y] = (0,0,0)
+ if g < min(r, b):
+ pix[x, y] = (0,0,0)
+ if g > max(r, b)+thresh:
+ pix[x, y] = (0,0,0)
+ if b > max(r, g)+thresh:
+ pix[x, y] = (0,0,0)
+ self.image = new
+ self.pixels = self.image.load()
+
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ bg = self.get_bg()
+ self.substract_bg(bg)
+ self.eval_black_white()
+ self.to_greyscale()
+ self.image.save(self.data_dir+"cleaned_pass1.png")
+ self.clean(4)
+ self.clean(4)
+ self.image.save(self.data_dir+"cleaned_pass2.png")
+ letters = self.split_captcha_letters()
+ final = ""
+ for n, letter in enumerate(letters):
+ self.image = letter
+ self.image.save(ocr.data_dir+"letter%d.png" % n)
+ self.run_tesser(True, True, False, False)
+ final += self.result_captcha
+
+ return final
diff --git a/pyload/plugins/ocr/NetloadIn.py b/pyload/plugins/ocr/NetloadIn.py
new file mode 100644
index 000000000..79a88c27a
--- /dev/null
+++ b/pyload/plugins/ocr/NetloadIn.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.OCR import OCR
+
+
+class NetloadIn(OCR):
+ __name__ = "NetloadIn"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Netload.in ocr plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("pyLoad Team", "admin@pyload.org")]
+
+
+ def __init__(self):
+ OCR.__init__(self)
+
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.to_greyscale()
+ self.clean(3)
+ self.clean(3)
+ self.run_tesser(True, True, False, False)
+
+ self.result_captcha = self.result_captcha.replace(" ", "")[:4] # cut to 4 numbers
+
+ return self.result_captcha
diff --git a/pyload/plugins/ocr/ShareonlineBiz.py b/pyload/plugins/ocr/ShareonlineBiz.py
new file mode 100644
index 000000000..5263f8316
--- /dev/null
+++ b/pyload/plugins/ocr/ShareonlineBiz.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+from pyload.plugins.internal.OCR import OCR
+
+
+class ShareonlineBiz(OCR):
+ __name__ = "ShareonlineBiz"
+ __type__ = "ocr"
+ __version__ = "0.1"
+
+ __description__ = """Shareonline.biz ocr plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("RaNaN", "RaNaN@pyload.org")]
+
+
+ def __init__(self):
+ OCR.__init__(self)
+
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.to_greyscale()
+ self.image = self.image.resize((160, 50))
+ self.pixels = self.image.load()
+ self.threshold(1.85)
+ #self.eval_black_white(240)
+ #self.derotate_by_average()
+
+ letters = self.split_captcha_letters()
+
+ final = ""
+ for letter in letters:
+ self.image = letter
+ self.run_tesser(True, True, False, False)
+ final += self.result_captcha
+
+ return final
+
+ #tesseract at 60%
diff --git a/pyload/plugins/ocr/__init__.py b/pyload/plugins/ocr/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/ocr/__init__.py