summaryrefslogtreecommitdiffstats
path: root/pyload
diff options
context:
space:
mode:
Diffstat (limited to 'pyload')
-rw-r--r--pyload/AccountManager.py141
-rw-r--r--pyload/AddonManager.py224
-rw-r--r--pyload/Api.py206
-rw-r--r--pyload/Core.py667
-rw-r--r--pyload/FileManager.py582
-rw-r--r--pyload/InitHomeDir.py94
-rw-r--r--pyload/PluginManager.py438
-rw-r--r--pyload/Scheduler.py141
-rw-r--r--pyload/Setup.py418
-rw-r--r--pyload/__init__.py4
-rw-r--r--pyload/api/AccountApi.py54
-rw-r--r--pyload/api/AddonApi.py27
-rw-r--r--pyload/api/ApiComponent.py23
-rw-r--r--pyload/api/CollectorApi.py37
-rw-r--r--pyload/api/ConfigApi.py135
-rw-r--r--pyload/api/CoreApi.py131
-rw-r--r--pyload/api/DownloadApi.py182
-rw-r--r--pyload/api/DownloadPreparingApi.py121
-rw-r--r--pyload/api/FileApi.py169
-rw-r--r--pyload/api/UserInteractionApi.py61
-rw-r--r--pyload/api/__init__.py8
-rw-r--r--pyload/cli/AddPackage.py66
-rw-r--r--pyload/cli/Cli.py586
-rw-r--r--pyload/cli/Handler.py48
-rw-r--r--pyload/cli/ManageFiles.py204
-rw-r--r--pyload/cli/__init__.py2
-rw-r--r--pyload/cli/printer.py26
-rw-r--r--pyload/config/ConfigManager.py140
-rw-r--r--pyload/config/ConfigParser.py211
-rw-r--r--pyload/config/__init__.py1
-rw-r--r--pyload/config/convert.py39
-rw-r--r--pyload/config/default.py107
-rw-r--r--pyload/database/AccountDatabase.py25
-rw-r--r--pyload/database/ConfigDatabase.py56
-rw-r--r--pyload/database/DatabaseBackend.py500
-rw-r--r--pyload/database/FileDatabase.py448
-rw-r--r--pyload/database/StatisticDatabase.py13
-rw-r--r--pyload/database/StorageDatabase.py48
-rw-r--r--pyload/database/UserDatabase.py129
-rw-r--r--pyload/database/__init__.py8
-rw-r--r--pyload/datatypes/PyFile.py270
-rw-r--r--pyload/datatypes/PyPackage.py115
-rw-r--r--pyload/datatypes/User.py63
-rw-r--r--pyload/datatypes/__init__.py0
-rw-r--r--pyload/interaction/EventManager.py77
-rw-r--r--pyload/interaction/InteractionManager.py166
-rw-r--r--pyload/interaction/InteractionTask.py100
-rw-r--r--pyload/interaction/__init__.py2
-rw-r--r--pyload/lib/Getch.py76
-rw-r--r--pyload/lib/ReadWriteLock.py232
-rw-r--r--pyload/lib/SafeEval.py47
-rw-r--r--pyload/lib/__init__.py0
-rw-r--r--pyload/lib/beaker/__init__.py1
-rw-r--r--pyload/lib/beaker/cache.py459
-rw-r--r--pyload/lib/beaker/container.py633
-rw-r--r--pyload/lib/beaker/converters.py26
-rw-r--r--pyload/lib/beaker/crypto/__init__.py40
-rw-r--r--pyload/lib/beaker/crypto/jcecrypto.py30
-rw-r--r--pyload/lib/beaker/crypto/pbkdf2.py342
-rw-r--r--pyload/lib/beaker/crypto/pycrypto.py31
-rw-r--r--pyload/lib/beaker/crypto/util.py30
-rw-r--r--pyload/lib/beaker/exceptions.py24
-rw-r--r--pyload/lib/beaker/ext/__init__.py0
-rw-r--r--pyload/lib/beaker/ext/database.py165
-rw-r--r--pyload/lib/beaker/ext/google.py120
-rw-r--r--pyload/lib/beaker/ext/memcached.py82
-rw-r--r--pyload/lib/beaker/ext/sqla.py133
-rw-r--r--pyload/lib/beaker/middleware.py165
-rw-r--r--pyload/lib/beaker/session.py618
-rw-r--r--pyload/lib/beaker/synchronization.py381
-rw-r--r--pyload/lib/beaker/util.py302
-rw-r--r--pyload/lib/bottle.py3251
-rw-r--r--pyload/lib/forwarder.py73
-rw-r--r--pyload/lib/hg_tool.py133
-rw-r--r--pyload/lib/mod_pywebsocket/COPYING28
-rw-r--r--pyload/lib/mod_pywebsocket/__init__.py197
-rw-r--r--pyload/lib/mod_pywebsocket/_stream_base.py165
-rw-r--r--pyload/lib/mod_pywebsocket/_stream_hixie75.py229
-rw-r--r--pyload/lib/mod_pywebsocket/_stream_hybi.py915
-rw-r--r--pyload/lib/mod_pywebsocket/common.py307
-rw-r--r--pyload/lib/mod_pywebsocket/dispatch.py387
-rw-r--r--pyload/lib/mod_pywebsocket/extensions.py727
-rw-r--r--pyload/lib/mod_pywebsocket/handshake/__init__.py110
-rw-r--r--pyload/lib/mod_pywebsocket/handshake/_base.py226
-rw-r--r--pyload/lib/mod_pywebsocket/handshake/hybi.py404
-rw-r--r--pyload/lib/mod_pywebsocket/handshake/hybi00.py242
-rw-r--r--pyload/lib/mod_pywebsocket/headerparserhandler.py244
-rw-r--r--pyload/lib/mod_pywebsocket/http_header_util.py263
-rw-r--r--pyload/lib/mod_pywebsocket/memorizingfile.py99
-rw-r--r--pyload/lib/mod_pywebsocket/msgutil.py219
-rw-r--r--pyload/lib/mod_pywebsocket/mux.py1636
-rwxr-xr-xpyload/lib/mod_pywebsocket/standalone.py998
-rw-r--r--pyload/lib/mod_pywebsocket/stream.py57
-rw-r--r--pyload/lib/mod_pywebsocket/util.py515
-rw-r--r--pyload/lib/new_collections.py375
-rw-r--r--pyload/lib/rename_process.py14
-rw-r--r--pyload/lib/simplejson/__init__.py466
-rw-r--r--pyload/lib/simplejson/decoder.py421
-rw-r--r--pyload/lib/simplejson/encoder.py534
-rw-r--r--pyload/lib/simplejson/ordered_dict.py119
-rw-r--r--pyload/lib/simplejson/scanner.py77
-rw-r--r--pyload/lib/simplejson/tool.py39
-rw-r--r--pyload/lib/wsgiserver/LICENSE.txt25
-rw-r--r--pyload/lib/wsgiserver/__init__.py1794
-rw-r--r--pyload/network/Browser.py153
-rw-r--r--pyload/network/Bucket.py63
-rw-r--r--pyload/network/CookieJar.py19
-rw-r--r--pyload/network/HTTPChunk.py298
-rw-r--r--pyload/network/HTTPDownload.py338
-rw-r--r--pyload/network/HTTPRequest.py324
-rw-r--r--pyload/network/RequestFactory.py108
-rw-r--r--pyload/network/XDCCRequest.py162
-rw-r--r--pyload/network/__init__.py1
-rw-r--r--pyload/plugins/Account.py295
-rw-r--r--pyload/plugins/Addon.py171
-rw-r--r--pyload/plugins/Base.py348
-rw-r--r--pyload/plugins/Crypter.py271
-rw-r--r--pyload/plugins/Download.py48
-rw-r--r--pyload/plugins/Hoster.py416
-rw-r--r--pyload/plugins/MultiHoster.py74
-rw-r--r--pyload/plugins/Plugin.py8
-rw-r--r--pyload/plugins/ReCaptcha.py22
-rw-r--r--pyload/plugins/Request.py82
-rw-r--r--pyload/plugins/__init__.py0
-rw-r--r--pyload/plugins/accounts/AlldebridCom.py49
-rw-r--r--pyload/plugins/accounts/BayfilesCom.py51
-rw-r--r--pyload/plugins/accounts/BitshareCom.py44
-rw-r--r--pyload/plugins/accounts/BoltsharingCom.py12
-rw-r--r--pyload/plugins/accounts/CramitIn.py12
-rw-r--r--pyload/plugins/accounts/CyberlockerCh.py31
-rw-r--r--pyload/plugins/accounts/CzshareCom.py57
-rw-r--r--pyload/plugins/accounts/DdlstorageCom.py12
-rw-r--r--pyload/plugins/accounts/DebridItaliaCom.py50
-rw-r--r--pyload/plugins/accounts/DepositfilesCom.py47
-rw-r--r--pyload/plugins/accounts/EasybytezCom.py73
-rw-r--r--pyload/plugins/accounts/EgoFilesCom.py40
-rw-r--r--pyload/plugins/accounts/EuroshareEu.py55
-rw-r--r--pyload/plugins/accounts/FastshareCz.py54
-rw-r--r--pyload/plugins/accounts/FilebeerInfo.py57
-rw-r--r--pyload/plugins/accounts/FilecloudIo.py49
-rw-r--r--pyload/plugins/accounts/FilefactoryCom.py54
-rw-r--r--pyload/plugins/accounts/FilejungleCom.py60
-rw-r--r--pyload/plugins/accounts/FilerNet.py63
-rw-r--r--pyload/plugins/accounts/FilerioCom.py12
-rw-r--r--pyload/plugins/accounts/FilesMailRu.py41
-rw-r--r--pyload/plugins/accounts/FileserveCom.py58
-rw-r--r--pyload/plugins/accounts/FourSharedCom.py48
-rw-r--r--pyload/plugins/accounts/FreakshareCom.py51
-rw-r--r--pyload/plugins/accounts/FshareVn.py62
-rw-r--r--pyload/plugins/accounts/Ftp.py13
-rw-r--r--pyload/plugins/accounts/HellshareCz.py87
-rw-r--r--pyload/plugins/accounts/HellspyCz.py70
-rw-r--r--pyload/plugins/accounts/HotfileCom.py86
-rw-r--r--pyload/plugins/accounts/Http.py13
-rw-r--r--pyload/plugins/accounts/MegasharesCom.py42
-rw-r--r--pyload/plugins/accounts/MultiDebridCom.py47
-rw-r--r--pyload/plugins/accounts/MultishareCz.py58
-rwxr-xr-xpyload/plugins/accounts/NetloadIn.py49
-rw-r--r--pyload/plugins/accounts/Premium4Me.py28
-rw-r--r--pyload/plugins/accounts/PremiumizeMe.py61
-rw-r--r--pyload/plugins/accounts/QuickshareCz.py53
-rw-r--r--pyload/plugins/accounts/RapidgatorNet.py74
-rw-r--r--pyload/plugins/accounts/RapidshareCom.py68
-rw-r--r--pyload/plugins/accounts/RarefileNet.py12
-rw-r--r--pyload/plugins/accounts/RealdebridCom.py35
-rw-r--r--pyload/plugins/accounts/RehostTo.py37
-rw-r--r--pyload/plugins/accounts/ReloadCc.py73
-rw-r--r--pyload/plugins/accounts/RyushareCom.py18
-rw-r--r--pyload/plugins/accounts/Share76Com.py11
-rw-r--r--pyload/plugins/accounts/ShareFilesCo.py12
-rw-r--r--pyload/plugins/accounts/ShareRapidCom.py47
-rw-r--r--pyload/plugins/accounts/ShareonlineBiz.py56
-rw-r--r--pyload/plugins/accounts/SpeedLoadOrg.py12
-rw-r--r--pyload/plugins/accounts/StahnuTo.py49
-rw-r--r--pyload/plugins/accounts/TurbobitNet.py56
-rw-r--r--pyload/plugins/accounts/UlozTo.py36
-rw-r--r--pyload/plugins/accounts/UnrestrictLi.py54
-rw-r--r--pyload/plugins/accounts/UploadedTo.py65
-rw-r--r--pyload/plugins/accounts/UploadheroCom.py35
-rw-r--r--pyload/plugins/accounts/UploadingCom.py52
-rw-r--r--pyload/plugins/accounts/UploadstationCom.py13
-rw-r--r--pyload/plugins/accounts/UptoboxCom.py12
-rw-r--r--pyload/plugins/accounts/WarserverCz.py70
-rw-r--r--pyload/plugins/accounts/WuploadCom.py47
-rw-r--r--pyload/plugins/accounts/X7To.py66
-rw-r--r--pyload/plugins/accounts/YibaishiwuCom.py51
-rw-r--r--pyload/plugins/accounts/ZeveraCom.py49
-rw-r--r--pyload/plugins/accounts/__init__.py0
-rw-r--r--pyload/plugins/addons/AlldebridCom.py28
-rw-r--r--pyload/plugins/addons/BypassCaptcha.py143
-rw-r--r--pyload/plugins/addons/CaptchaBrotherhood.py169
-rw-r--r--pyload/plugins/addons/CaptchaTrader.py159
-rw-r--r--pyload/plugins/addons/Checksum.py169
-rw-r--r--pyload/plugins/addons/ClickAndLoad.py89
-rw-r--r--pyload/plugins/addons/DeathByCaptcha.py210
-rw-r--r--pyload/plugins/addons/DebridItaliaCom.py42
-rw-r--r--pyload/plugins/addons/DeleteFinished.py84
-rw-r--r--pyload/plugins/addons/DownloadScheduler.py86
-rw-r--r--pyload/plugins/addons/EasybytezCom.py31
-rw-r--r--pyload/plugins/addons/Ev0InFetcher.py87
-rw-r--r--pyload/plugins/addons/ExpertDecoders.py112
-rw-r--r--pyload/plugins/addons/ExternalScripts.py118
-rw-r--r--pyload/plugins/addons/ExtractArchive.py312
-rw-r--r--pyload/plugins/addons/HotFolder.py85
-rw-r--r--pyload/plugins/addons/IRCInterface.py431
-rw-r--r--pyload/plugins/addons/ImageTyperz.py160
-rw-r--r--pyload/plugins/addons/LinkdecrypterCom.py59
-rw-r--r--pyload/plugins/addons/MergeFiles.py94
-rw-r--r--pyload/plugins/addons/MultiDebridCom.py42
-rw-r--r--pyload/plugins/addons/MultiHome.py82
-rw-r--r--pyload/plugins/addons/MultiHoster.py106
-rw-r--r--pyload/plugins/addons/MultishareCz.py23
-rw-r--r--pyload/plugins/addons/Premium4Me.py33
-rw-r--r--pyload/plugins/addons/PremiumizeMe.py50
-rw-r--r--pyload/plugins/addons/RealdebridCom.py25
-rw-r--r--pyload/plugins/addons/RehostTo.py41
-rw-r--r--pyload/plugins/addons/RestartFailed.py31
-rw-r--r--pyload/plugins/addons/SkipRev.py46
-rw-r--r--pyload/plugins/addons/UnSkipOnFail.py97
-rw-r--r--pyload/plugins/addons/UnrestrictLi.py44
-rw-r--r--pyload/plugins/addons/UpdateManager.py201
-rw-r--r--pyload/plugins/addons/XFileSharingPro.py70
-rw-r--r--pyload/plugins/addons/XMPPInterface.py276
-rw-r--r--pyload/plugins/addons/ZeveraCom.py19
-rw-r--r--pyload/plugins/addons/__init__.py0
-rw-r--r--pyload/plugins/container/DLC_25.pycbin0 -> 8340 bytes
-rw-r--r--pyload/plugins/container/DLC_26.pycbin0 -> 8313 bytes
-rw-r--r--pyload/plugins/container/DLC_27.pycbin0 -> 8237 bytes
-rw-r--r--pyload/plugins/crypter/BitshareComFolder.py31
-rw-r--r--pyload/plugins/crypter/C1neonCom.py133
-rw-r--r--pyload/plugins/crypter/CCF.py42
-rw-r--r--pyload/plugins/crypter/CrockoComFolder.py14
-rw-r--r--pyload/plugins/crypter/CryptItCom.py38
-rw-r--r--pyload/plugins/crypter/CzshareComFolder.py30
-rw-r--r--pyload/plugins/crypter/DDLMusicOrg.py42
-rw-r--r--pyload/plugins/crypter/DataHuFolder.py55
-rw-r--r--pyload/plugins/crypter/DdlstorageComFolder.py30
-rw-r--r--pyload/plugins/crypter/DepositfilesComFolder.py14
-rw-r--r--pyload/plugins/crypter/Dereferer.py34
-rw-r--r--pyload/plugins/crypter/DontKnowMe.py21
-rw-r--r--pyload/plugins/crypter/DownloadVimeoCom.py30
-rw-r--r--pyload/plugins/crypter/DuckCryptInfo.py58
-rw-r--r--pyload/plugins/crypter/EasybytezComFolder.py35
-rw-r--r--pyload/plugins/crypter/EmbeduploadCom.py54
-rw-r--r--pyload/plugins/crypter/FilebeerInfoFolder.py35
-rw-r--r--pyload/plugins/crypter/FilefactoryComFolder.py44
-rw-r--r--pyload/plugins/crypter/FileserveComFolder.py32
-rw-r--r--pyload/plugins/crypter/FourChanOrg.py25
-rw-r--r--pyload/plugins/crypter/FshareVnFolder.py14
-rw-r--r--pyload/plugins/crypter/GooGl.py41
-rw-r--r--pyload/plugins/crypter/HoerbuchIn.py55
-rw-r--r--pyload/plugins/crypter/HotfileFolderCom.py29
-rw-r--r--pyload/plugins/crypter/ILoadTo.py62
-rw-r--r--pyload/plugins/crypter/LetitbitNetFolder.py33
-rw-r--r--pyload/plugins/crypter/LinkList.py55
-rw-r--r--pyload/plugins/crypter/LinkSaveIn.py227
-rw-r--r--pyload/plugins/crypter/LinkdecrypterCom.py100
-rw-r--r--pyload/plugins/crypter/LixIn.py60
-rw-r--r--pyload/plugins/crypter/LofCc.py49
-rw-r--r--pyload/plugins/crypter/MBLinkInfo.py27
-rw-r--r--pyload/plugins/crypter/MediafireComFolder.py55
-rw-r--r--pyload/plugins/crypter/Movie2kTo.py151
-rw-r--r--pyload/plugins/crypter/MultiloadCz.py41
-rw-r--r--pyload/plugins/crypter/MultiuploadCom.py58
-rw-r--r--pyload/plugins/crypter/NCryptIn.py251
-rw-r--r--pyload/plugins/crypter/NetfolderIn.py71
-rw-r--r--pyload/plugins/crypter/OneKhDe.py36
-rwxr-xr-xpyload/plugins/crypter/OronComFolder.py46
-rw-r--r--pyload/plugins/crypter/QuickshareCzFolder.py30
-rw-r--r--pyload/plugins/crypter/RSDF.py49
-rw-r--r--pyload/plugins/crypter/RSLayerCom.py49
-rw-r--r--pyload/plugins/crypter/RelinkUs.py264
-rw-r--r--pyload/plugins/crypter/SecuredIn.py334
-rw-r--r--pyload/plugins/crypter/SerienjunkiesOrg.py323
-rw-r--r--pyload/plugins/crypter/ShareLinksBiz.py269
-rw-r--r--pyload/plugins/crypter/ShareRapidComFolder.py14
-rw-r--r--pyload/plugins/crypter/SpeedLoadOrgFolder.py30
-rw-r--r--pyload/plugins/crypter/StealthTo.py45
-rw-r--r--pyload/plugins/crypter/TrailerzoneInfo.py45
-rw-r--r--pyload/plugins/crypter/UlozToFolder.py40
-rw-r--r--pyload/plugins/crypter/UploadedToFolder.py50
-rw-r--r--pyload/plugins/crypter/WiiReloadedOrg.py52
-rw-r--r--pyload/plugins/crypter/XfilesharingProFolder.py34
-rw-r--r--pyload/plugins/crypter/YoutubeBatch.py42
-rw-r--r--pyload/plugins/crypter/__init__.py0
-rwxr-xr-xpyload/plugins/hooks/Captcha9kw.py163
-rw-r--r--pyload/plugins/hooks/ReloadCc.py65
-rw-r--r--pyload/plugins/hoster/ARD.py80
-rw-r--r--pyload/plugins/hoster/AlldebridCom.py84
-rw-r--r--pyload/plugins/hoster/BasePlugin.py104
-rw-r--r--pyload/plugins/hoster/BayfilesCom.py92
-rw-r--r--pyload/plugins/hoster/BezvadataCz.py94
-rw-r--r--pyload/plugins/hoster/BillionuploadsCom.py17
-rw-r--r--pyload/plugins/hoster/BitshareCom.py147
-rw-r--r--pyload/plugins/hoster/BoltsharingCom.py15
-rw-r--r--pyload/plugins/hoster/CatShareNet.py38
-rw-r--r--pyload/plugins/hoster/ChipDe.py24
-rw-r--r--pyload/plugins/hoster/CloudzerNet.py72
-rw-r--r--pyload/plugins/hoster/CramitIn.py20
-rw-r--r--pyload/plugins/hoster/CrockoCom.py71
-rw-r--r--pyload/plugins/hoster/CyberlockerCh.py15
-rw-r--r--pyload/plugins/hoster/CzshareCom.py160
-rw-r--r--pyload/plugins/hoster/DailymotionCom.py47
-rw-r--r--pyload/plugins/hoster/DataHu.py53
-rw-r--r--pyload/plugins/hoster/DataportCz.py68
-rw-r--r--pyload/plugins/hoster/DateiTo.py94
-rw-r--r--pyload/plugins/hoster/DdlstorageCom.py19
-rw-r--r--pyload/plugins/hoster/DebridItaliaCom.py61
-rw-r--r--pyload/plugins/hoster/DepositfilesCom.py112
-rw-r--r--pyload/plugins/hoster/DlFreeFr.py183
-rw-r--r--pyload/plugins/hoster/EasybytezCom.py45
-rw-r--r--pyload/plugins/hoster/EdiskCz.py62
-rw-r--r--pyload/plugins/hoster/EgoFilesCom.py103
-rw-r--r--pyload/plugins/hoster/EuroshareEu.py74
-rw-r--r--pyload/plugins/hoster/ExtabitCom.py87
-rw-r--r--pyload/plugins/hoster/FastshareCz.py96
-rw-r--r--pyload/plugins/hoster/FileApeCom.py62
-rw-r--r--pyload/plugins/hoster/FilebeerInfo.py15
-rw-r--r--pyload/plugins/hoster/FilecloudIo.py112
-rw-r--r--pyload/plugins/hoster/FilefactoryCom.py133
-rw-r--r--pyload/plugins/hoster/FilejungleCom.py38
-rw-r--r--pyload/plugins/hoster/FilepostCom.py135
-rw-r--r--pyload/plugins/hoster/FilerNet.py119
-rw-r--r--pyload/plugins/hoster/FilerioCom.py20
-rw-r--r--pyload/plugins/hoster/FilesMailRu.py99
-rw-r--r--pyload/plugins/hoster/FileserveCom.py211
-rw-r--r--pyload/plugins/hoster/FileshareInUa.py78
-rw-r--r--pyload/plugins/hoster/FilezyNet.py33
-rw-r--r--pyload/plugins/hoster/FlyFilesNet.py41
-rw-r--r--pyload/plugins/hoster/FourSharedCom.py53
-rw-r--r--pyload/plugins/hoster/FreakshareCom.py167
-rw-r--r--pyload/plugins/hoster/FreevideoCz.py64
-rw-r--r--pyload/plugins/hoster/FshareVn.py111
-rw-r--r--pyload/plugins/hoster/Ftp.py91
-rw-r--r--pyload/plugins/hoster/GamefrontCom.py80
-rw-r--r--pyload/plugins/hoster/GigapetaCom.py73
-rw-r--r--pyload/plugins/hoster/HellshareCz.py56
-rw-r--r--pyload/plugins/hoster/HellspyCz.py70
-rw-r--r--pyload/plugins/hoster/HotfileCom.py137
-rw-r--r--pyload/plugins/hoster/HundredEightyUploadCom.py36
-rw-r--r--pyload/plugins/hoster/IFileWs.py20
-rw-r--r--pyload/plugins/hoster/IcyFilesCom.py112
-rw-r--r--pyload/plugins/hoster/IfileIt.py74
-rw-r--r--pyload/plugins/hoster/IfolderRu.py90
-rw-r--r--pyload/plugins/hoster/JumbofilesCom.py31
-rw-r--r--pyload/plugins/hoster/LetitbitNet.py125
-rw-r--r--pyload/plugins/hoster/LoadTo.py61
-rw-r--r--pyload/plugins/hoster/LuckyShareNet.py72
-rw-r--r--pyload/plugins/hoster/MediafireCom.py135
-rw-r--r--pyload/plugins/hoster/MegaNz.py125
-rw-r--r--pyload/plugins/hoster/MegacrypterCom.py59
-rw-r--r--pyload/plugins/hoster/MegasharesCom.py108
-rw-r--r--pyload/plugins/hoster/MovReelCom.py106
-rw-r--r--pyload/plugins/hoster/MultiDebridCom.py57
-rw-r--r--pyload/plugins/hoster/MultishareCz.py76
-rw-r--r--pyload/plugins/hoster/MyvideoDe.py43
-rw-r--r--pyload/plugins/hoster/NarodRu.py66
-rw-r--r--pyload/plugins/hoster/NetloadIn.py252
-rw-r--r--pyload/plugins/hoster/NovafileCom.py24
-rw-r--r--pyload/plugins/hoster/NowDownloadEu.py66
-rw-r--r--pyload/plugins/hoster/OneFichierCom.py58
-rw-r--r--pyload/plugins/hoster/PornhostCom.py76
-rw-r--r--pyload/plugins/hoster/PornhubCom.py83
-rw-r--r--pyload/plugins/hoster/Premium4Me.py69
-rw-r--r--pyload/plugins/hoster/PremiumizeMe.py47
-rw-r--r--pyload/plugins/hoster/PutlockerCom.py78
-rw-r--r--pyload/plugins/hoster/QuickshareCz.py99
-rw-r--r--pyload/plugins/hoster/RapidgatorNet.py196
-rw-r--r--pyload/plugins/hoster/RapidshareCom.py225
-rw-r--r--pyload/plugins/hoster/RarefileNet.py34
-rw-r--r--pyload/plugins/hoster/RealdebridCom.py88
-rw-r--r--pyload/plugins/hoster/RedtubeCom.py56
-rw-r--r--pyload/plugins/hoster/RehostTo.py37
-rw-r--r--pyload/plugins/hoster/ReloadCc.py103
-rw-r--r--pyload/plugins/hoster/RyushareCom.py55
-rw-r--r--pyload/plugins/hoster/SecureUploadEu.py18
-rw-r--r--pyload/plugins/hoster/SendmywayCom.py18
-rw-r--r--pyload/plugins/hoster/SendspaceCom.py67
-rw-r--r--pyload/plugins/hoster/Share4webCom.py16
-rw-r--r--pyload/plugins/hoster/Share76Com.py19
-rw-r--r--pyload/plugins/hoster/ShareFilesCo.py24
-rw-r--r--pyload/plugins/hoster/ShareRapidCom.py104
-rw-r--r--pyload/plugins/hoster/SharebeesCom.py19
-rw-r--r--pyload/plugins/hoster/ShareonlineBiz.py203
-rw-r--r--pyload/plugins/hoster/ShareplaceCom.py84
-rw-r--r--pyload/plugins/hoster/ShragleCom.py106
-rw-r--r--pyload/plugins/hoster/SpeedLoadOrg.py21
-rw-r--r--pyload/plugins/hoster/SpeedfileCz.py65
-rw-r--r--pyload/plugins/hoster/StreamCz.py76
-rw-r--r--pyload/plugins/hoster/StreamcloudEu.py111
-rw-r--r--pyload/plugins/hoster/TurbobitNet.py170
-rw-r--r--pyload/plugins/hoster/TurbouploadCom.py45
-rw-r--r--pyload/plugins/hoster/TusfilesNet.py18
-rw-r--r--pyload/plugins/hoster/TwoSharedCom.py33
-rw-r--r--pyload/plugins/hoster/UlozTo.py156
-rw-r--r--pyload/plugins/hoster/UloziskoSk.py75
-rw-r--r--pyload/plugins/hoster/UnibytesCom.py80
-rw-r--r--pyload/plugins/hoster/UploadStationCom.py21
-rw-r--r--pyload/plugins/hoster/UploadedTo.py239
-rw-r--r--pyload/plugins/hoster/UploadheroCom.py87
-rw-r--r--pyload/plugins/hoster/UploadingCom.py110
-rw-r--r--pyload/plugins/hoster/UptoboxCom.py21
-rw-r--r--pyload/plugins/hoster/VeehdCom.py80
-rw-r--r--pyload/plugins/hoster/WarserverCz.py70
-rw-r--r--pyload/plugins/hoster/WebshareCz.py48
-rw-r--r--pyload/plugins/hoster/WrzucTo.py58
-rw-r--r--pyload/plugins/hoster/WuploadCom.py241
-rw-r--r--pyload/plugins/hoster/X7To.py93
-rw-r--r--pyload/plugins/hoster/XFileSharingPro.py318
-rw-r--r--pyload/plugins/hoster/XHamsterCom.py120
-rw-r--r--pyload/plugins/hoster/XVideosCom.py19
-rw-r--r--pyload/plugins/hoster/Xdcc.py229
-rw-r--r--pyload/plugins/hoster/YibaishiwuCom.py54
-rw-r--r--pyload/plugins/hoster/YoupornCom.py56
-rw-r--r--pyload/plugins/hoster/YourfilesTo.py83
-rw-r--r--pyload/plugins/hoster/YoutubeCom.py164
-rw-r--r--pyload/plugins/hoster/ZDF.py46
-rw-r--r--pyload/plugins/hoster/ZeveraCom.py108
-rw-r--r--pyload/plugins/hoster/ZippyshareCom.py192
-rw-r--r--pyload/plugins/hoster/__init__.py0
-rw-r--r--pyload/plugins/internal/AbstractExtractor.py93
-rw-r--r--pyload/plugins/internal/CaptchaService.py77
-rw-r--r--pyload/plugins/internal/DeadHoster.py18
-rw-r--r--pyload/plugins/internal/NetloadInOCR.py27
-rw-r--r--pyload/plugins/internal/OCR.py314
-rw-r--r--pyload/plugins/internal/ShareonlineBizOCR.py53
-rw-r--r--pyload/plugins/internal/SimpleCrypter.py92
-rw-r--r--pyload/plugins/internal/SimpleHoster.py251
-rw-r--r--pyload/plugins/internal/UnRar.py212
-rw-r--r--pyload/plugins/internal/UnZip.py49
-rw-r--r--pyload/plugins/internal/XFSPAccount.py79
-rw-r--r--pyload/plugins/internal/__init__.py0
-rw-r--r--pyload/plugins/network/CurlChunk.py299
-rw-r--r--pyload/plugins/network/CurlDownload.py323
-rw-r--r--pyload/plugins/network/CurlRequest.py314
-rw-r--r--pyload/plugins/network/DefaultRequest.py9
-rw-r--r--pyload/plugins/network/__init__.py1
-rw-r--r--pyload/remote/ClickAndLoadBackend.py170
-rw-r--r--pyload/remote/JSONClient.py56
-rw-r--r--pyload/remote/RemoteManager.py89
-rw-r--r--pyload/remote/WSClient.py59
-rw-r--r--pyload/remote/WebSocketBackend.py49
-rw-r--r--pyload/remote/__init__.py0
-rw-r--r--pyload/remote/apitypes.py537
-rw-r--r--pyload/remote/apitypes_debug.py135
-rw-r--r--pyload/remote/create_apitypes.py180
-rw-r--r--pyload/remote/create_jstypes.py36
-rw-r--r--pyload/remote/json_converter.py64
-rw-r--r--pyload/remote/pyload.thrift539
-rw-r--r--pyload/remote/ttypes.py534
-rw-r--r--pyload/remote/wsbackend/AbstractHandler.py133
-rw-r--r--pyload/remote/wsbackend/ApiHandler.py81
-rw-r--r--pyload/remote/wsbackend/AsyncHandler.py167
-rw-r--r--pyload/remote/wsbackend/Dispatcher.py31
-rw-r--r--pyload/remote/wsbackend/Server.py733
-rw-r--r--pyload/remote/wsbackend/__init__.py0
-rw-r--r--pyload/setup/System_Checks.py126
-rw-r--r--pyload/threads/AddonThread.py65
-rw-r--r--pyload/threads/BaseThread.py142
-rw-r--r--pyload/threads/DecrypterThread.py81
-rw-r--r--pyload/threads/DownloadThread.py231
-rw-r--r--pyload/threads/InfoThread.py168
-rw-r--r--pyload/threads/ThreadManager.py313
-rw-r--r--pyload/threads/__init__.py0
-rw-r--r--pyload/utils/ImportDebugger.py19
-rw-r--r--pyload/utils/JsEngine.py195
-rw-r--r--pyload/utils/__init__.py232
-rw-r--r--pyload/utils/fs.py78
-rw-r--r--pyload/utils/json_layer.py15
-rw-r--r--pyload/utils/packagetools.py155
-rw-r--r--pyload/utils/pylgettext.py61
-rw-r--r--pyload/web/.bowerrc3
-rw-r--r--pyload/web/Gruntfile.js425
-rw-r--r--pyload/web/ServerThread.py150
-rw-r--r--pyload/web/__init__.py0
-rw-r--r--pyload/web/api_app.py112
-rwxr-xr-xpyload/web/app/fonts/Abel-Regular.ttfbin0 -> 36400 bytes
-rw-r--r--pyload/web/app/fonts/Abel-Regular.woffbin0 -> 16284 bytes
-rw-r--r--pyload/web/app/images/default/checks_sheet.pngbin0 -> 1145 bytes
-rw-r--r--pyload/web/app/images/icon.pngbin0 -> 1912 bytes
-rw-r--r--pyload/web/app/index.html110
-rw-r--r--pyload/web/app/scripts/app.js104
-rw-r--r--pyload/web/app/scripts/collections/AccountList.js24
-rw-r--r--pyload/web/app/scripts/collections/FileList.js28
-rw-r--r--pyload/web/app/scripts/collections/InteractionList.js49
-rw-r--r--pyload/web/app/scripts/collections/PackageList.js16
-rw-r--r--pyload/web/app/scripts/collections/ProgressList.js18
-rw-r--r--pyload/web/app/scripts/config.js75
-rw-r--r--pyload/web/app/scripts/controller.js72
-rw-r--r--pyload/web/app/scripts/default.js30
-rw-r--r--pyload/web/app/scripts/helpers/fileHelper.js55
-rw-r--r--pyload/web/app/scripts/helpers/formatSize.js15
-rw-r--r--pyload/web/app/scripts/helpers/formatTime.js17
-rw-r--r--pyload/web/app/scripts/helpers/gettext.js16
-rw-r--r--pyload/web/app/scripts/helpers/pluginIcon.js14
-rw-r--r--pyload/web/app/scripts/helpers/truncate.js25
-rw-r--r--pyload/web/app/scripts/models/Account.js51
-rw-r--r--pyload/web/app/scripts/models/ConfigHolder.js68
-rw-r--r--pyload/web/app/scripts/models/ConfigItem.js40
-rw-r--r--pyload/web/app/scripts/models/File.js97
-rw-r--r--pyload/web/app/scripts/models/InteractionTask.js41
-rw-r--r--pyload/web/app/scripts/models/Package.js119
-rw-r--r--pyload/web/app/scripts/models/Progress.js50
-rw-r--r--pyload/web/app/scripts/models/ServerStatus.js47
-rw-r--r--pyload/web/app/scripts/models/TreeCollection.js50
-rw-r--r--pyload/web/app/scripts/models/UserSession.js20
-rw-r--r--pyload/web/app/scripts/router.js29
-rw-r--r--pyload/web/app/scripts/routers/defaultRouter.js30
-rw-r--r--pyload/web/app/scripts/routers/mobileRouter.js56
-rw-r--r--pyload/web/app/scripts/utils/animations.js129
-rw-r--r--pyload/web/app/scripts/utils/apitypes.js16
-rw-r--r--pyload/web/app/scripts/utils/dialogs.js15
-rw-r--r--pyload/web/app/scripts/utils/i18n.js5
-rw-r--r--pyload/web/app/scripts/utils/lazyRequire.js97
-rw-r--r--pyload/web/app/scripts/vendor/jquery.omniwindow.js141
-rw-r--r--pyload/web/app/scripts/vendor/remaining.js149
-rw-r--r--pyload/web/app/scripts/views/abstract/itemView.js47
-rw-r--r--pyload/web/app/scripts/views/abstract/modalView.js124
-rw-r--r--pyload/web/app/scripts/views/accounts/accountListView.js52
-rw-r--r--pyload/web/app/scripts/views/accounts/accountModal.js72
-rw-r--r--pyload/web/app/scripts/views/accounts/accountView.js18
-rw-r--r--pyload/web/app/scripts/views/dashboard/dashboardView.js172
-rw-r--r--pyload/web/app/scripts/views/dashboard/fileView.js103
-rw-r--r--pyload/web/app/scripts/views/dashboard/filterView.js147
-rw-r--r--pyload/web/app/scripts/views/dashboard/packageView.js75
-rw-r--r--pyload/web/app/scripts/views/dashboard/selectionView.js154
-rw-r--r--pyload/web/app/scripts/views/headerView.js252
-rw-r--r--pyload/web/app/scripts/views/input/inputLoader.js8
-rw-r--r--pyload/web/app/scripts/views/input/inputView.js86
-rw-r--r--pyload/web/app/scripts/views/input/textInput.js36
-rw-r--r--pyload/web/app/scripts/views/linkGrabberModal.js49
-rw-r--r--pyload/web/app/scripts/views/loginView.js37
-rw-r--r--pyload/web/app/scripts/views/notificationView.js85
-rw-r--r--pyload/web/app/scripts/views/progressView.js46
-rw-r--r--pyload/web/app/scripts/views/queryModal.js69
-rw-r--r--pyload/web/app/scripts/views/settings/configSectionView.js99
-rw-r--r--pyload/web/app/scripts/views/settings/pluginChooserModal.js72
-rw-r--r--pyload/web/app/scripts/views/settings/settingsView.js184
-rw-r--r--pyload/web/app/styles/default/accounts.less6
-rw-r--r--pyload/web/app/styles/default/admin.less17
-rw-r--r--pyload/web/app/styles/default/dashboard.less330
-rw-r--r--pyload/web/app/styles/default/main.less21
-rw-r--r--pyload/web/app/styles/default/settings.less121
-rw-r--r--pyload/web/app/styles/default/style.less297
-rw-r--r--pyload/web/app/styles/font.css13
-rw-r--r--pyload/web/app/templates/default/accounts/account.html10
-rw-r--r--pyload/web/app/templates/default/accounts/actionbar.html5
-rw-r--r--pyload/web/app/templates/default/accounts/layout.html19
-rw-r--r--pyload/web/app/templates/default/admin.html223
-rw-r--r--pyload/web/app/templates/default/dashboard/actionbar.html54
-rw-r--r--pyload/web/app/templates/default/dashboard/file.html34
-rw-r--r--pyload/web/app/templates/default/dashboard/layout.html32
-rw-r--r--pyload/web/app/templates/default/dashboard/package.html50
-rw-r--r--pyload/web/app/templates/default/dashboard/select.html11
-rwxr-xr-xpyload/web/app/templates/default/dialogs/addAccount.html42
-rwxr-xr-xpyload/web/app/templates/default/dialogs/addPluginConfig.html26
-rw-r--r--pyload/web/app/templates/default/dialogs/confirmDelete.html11
-rwxr-xr-xpyload/web/app/templates/default/dialogs/interactionTask.html37
-rwxr-xr-xpyload/web/app/templates/default/dialogs/linkgrabber.html49
-rwxr-xr-xpyload/web/app/templates/default/dialogs/modal.html10
-rw-r--r--pyload/web/app/templates/default/header/layout.html61
-rw-r--r--pyload/web/app/templates/default/header/progress.html10
-rw-r--r--pyload/web/app/templates/default/header/progressStatus.html8
-rw-r--r--pyload/web/app/templates/default/header/progressSub.html6
-rw-r--r--pyload/web/app/templates/default/header/progressSup.html10
-rw-r--r--pyload/web/app/templates/default/header/progressbar.html16
-rw-r--r--pyload/web/app/templates/default/header/status.html3
-rw-r--r--pyload/web/app/templates/default/login.html28
-rw-r--r--pyload/web/app/templates/default/notification.html10
-rw-r--r--pyload/web/app/templates/default/settings/actionbar.html5
-rw-r--r--pyload/web/app/templates/default/settings/config.html17
-rw-r--r--pyload/web/app/templates/default/settings/configItem.html7
-rw-r--r--pyload/web/app/templates/default/settings/layout.html11
-rw-r--r--pyload/web/app/templates/default/settings/menu.html40
-rw-r--r--pyload/web/app/templates/default/setup.html16
-rw-r--r--pyload/web/app/unavailable.html18
-rw-r--r--pyload/web/bower.json22
-rw-r--r--pyload/web/cnl_app.py166
-rw-r--r--pyload/web/middlewares.py134
-rw-r--r--pyload/web/package.json36
-rw-r--r--pyload/web/pyload_app.py72
-rw-r--r--pyload/web/servers.py162
-rw-r--r--pyload/web/setup_app.py21
-rw-r--r--pyload/web/utils.py85
-rw-r--r--pyload/web/webinterface.py100
585 files changed, 66533 insertions, 0 deletions
diff --git a/pyload/AccountManager.py b/pyload/AccountManager.py
new file mode 100644
index 000000000..a476c75c1
--- /dev/null
+++ b/pyload/AccountManager.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN, mkaay
+###############################################################################
+
+from threading import Lock
+from random import choice
+
+from pyload.utils import lock, json
+
+class AccountManager:
+ """manages all accounts"""
+
+ def __init__(self, core):
+ """Constructor"""
+
+ self.core = core
+ self.lock = Lock()
+
+ self.loadAccounts()
+
+ def loadAccounts(self):
+ """loads all accounts available"""
+
+ self.accounts = {}
+
+ for plugin, loginname, activated, password, options in self.core.db.loadAccounts():
+ # put into options as used in other context
+ options = json.loads(options) if options else {}
+ options["activated"] = activated
+
+ self.createAccount(plugin, loginname, password, options)
+
+
+ def iterAccounts(self):
+ """ yields login, account for all accounts"""
+ for name, data in self.accounts.iteritems():
+ for login, account in data.iteritems():
+ yield login, account
+
+ def saveAccounts(self):
+ """save all account information"""
+ # TODO: multi user
+ # TODO: activated
+
+ data = []
+ for name, plugin in self.accounts.iteritems():
+ data.extend(
+ [(name, acc.loginname, 1 if acc.activated else 0, acc.password, json.dumps(acc.options)) for acc in
+ plugin.itervalues()])
+ self.core.db.saveAccounts(data)
+
+ def createAccount(self, plugin, loginname, password, options):
+ klass = self.core.pluginManager.loadClass("accounts", plugin)
+ if not klass:
+ self.core.log.warning(_("Unknown account plugin %s") % plugin)
+ return
+
+ if plugin not in self.accounts:
+ self.accounts[plugin] = {}
+
+ self.core.log.debug("Create account %s:%s" % (plugin, loginname))
+
+ self.accounts[plugin][loginname] = klass(self, loginname, password, options)
+
+
+ def getAccount(self, plugin, user):
+ return self.accounts[plugin].get(user, None)
+
+ @lock
+ def updateAccount(self, plugin, user, password=None, options={}):
+ """add or update account"""
+ if plugin in self.accounts and user in self.accounts[plugin]:
+ acc = self.accounts[plugin][user]
+ updated = acc.update(password, options)
+
+ self.saveAccounts()
+ if updated: acc.scheduleRefresh(force=True)
+ else:
+ self.createAccount(plugin, user, password, options)
+ self.saveAccounts()
+
+ self.sendChange(plugin, user)
+
+ @lock
+ def removeAccount(self, plugin, user):
+ """remove account"""
+ if plugin in self.accounts and user in self.accounts[plugin]:
+ del self.accounts[plugin][user]
+ self.core.db.removeAccount(plugin, user)
+ self.core.eventManager.dispatchEvent("account:deleted", plugin, user)
+ else:
+ self.core.log.debug("Remove non existent account %s %s" % (plugin, user))
+
+
+ @lock
+ def getAccountForPlugin(self, plugin):
+ if plugin in self.accounts:
+ accs = [x for x in self.accounts[plugin].values() if x.isUsable()]
+ if accs: return choice(accs)
+
+ return None
+
+ @lock
+ def getAllAccounts(self, refresh=False):
+ """ Return account info, refresh afterwards if needed
+
+ :param refresh:
+ :return:
+ """
+ if refresh:
+ self.core.scheduler.addJob(0, self.core.accountManager.getAllAccounts)
+
+ # load unavailable account info
+ for p_dict in self.accounts.itervalues():
+ for acc in p_dict.itervalues():
+ acc.getAccountInfo()
+
+ return self.accounts
+
+ def refreshAllAccounts(self):
+ """ Force a refresh of every account """
+ for p in self.accounts.itervalues():
+ for acc in p.itervalues():
+ acc.getAccountInfo(True)
+
+ def sendChange(self, plugin, name):
+ self.core.eventManager.dispatchEvent("account:updated", plugin, name) \ No newline at end of file
diff --git a/pyload/AddonManager.py b/pyload/AddonManager.py
new file mode 100644
index 000000000..75ff4ebc9
--- /dev/null
+++ b/pyload/AddonManager.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2013 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import __builtin__
+
+from thread import start_new_thread
+from threading import RLock
+
+from types import MethodType
+
+from pyload.threads.AddonThread import AddonThread
+from pyload.PluginManager import literal_eval
+from utils import lock, to_string
+
+class AddonManager:
+ """ Manages addons, loading, unloading. """
+
+ def __init__(self, core):
+ self.core = core
+ self.config = self.core.config
+
+ __builtin__.addonManager = self #needed to let addons register themselves
+
+ self.log = self.core.log
+ # TODO: multiuser, addons can store the user itself, probably not needed here
+ self.plugins = {}
+ self.methods = {} # dict of names and list of methods usable by rpc
+ self.events = {} # Contains event that will be registered
+
+ self.lock = RLock()
+ self.createIndex()
+
+ # manage addons on config change
+ self.listenTo("config:changed", self.manageAddon)
+
+ @lock
+ def callInHooks(self, event, eventName, *args):
+ """ Calls a method in all addons and catch / log errors"""
+ for plugin in self.plugins.itervalues():
+ self.call(plugin, event, *args)
+ self.dispatchEvent(eventName, *args)
+
+ def call(self, addon, f, *args):
+ try:
+ func = getattr(addon, f)
+ return func(*args)
+ except Exception, e:
+ addon.logError(_("Error when executing %s" % f), e)
+ self.core.print_exc()
+
+ @lock
+ def createIndex(self):
+ active = []
+ deactive = []
+
+ for pluginname in self.core.pluginManager.getPlugins("addons"):
+ try:
+ # check first for builtin plugin
+ attrs = self.core.pluginManager.loadAttributes("addons", pluginname)
+ internal = attrs.get("internal", False)
+
+ if internal or self.core.config.get(pluginname, "activated"):
+ pluginClass = self.core.pluginManager.loadClass("addons", pluginname)
+
+ if not pluginClass: continue
+
+ plugin = pluginClass(self.core, self)
+ self.plugins[pluginClass.__name__] = plugin
+
+ # hide internals from printing
+ if not internal and plugin.isActivated():
+ active.append(pluginClass.__name__)
+ else:
+ self.log.debug("Loaded internal plugin: %s" % pluginClass.__name__)
+ else:
+ deactive.append(pluginname)
+
+ except:
+ self.log.warning(_("Failed activating %(name)s") % {"name": pluginname})
+ self.core.print_exc()
+
+ self.log.info(_("Activated addons: %s") % ", ".join(sorted(active)))
+ self.log.info(_("Deactivated addons: %s") % ", ".join(sorted(deactive)))
+
+ def manageAddon(self, plugin, name, value):
+ # TODO: user
+
+ # check if section was a plugin
+ if plugin not in self.core.pluginManager.getPlugins("addons"):
+ return
+
+ if name == "activated" and value:
+ self.activateAddon(plugin)
+ elif name == "activated" and not value:
+ self.deactivateAddon(plugin)
+
+ @lock
+ def activateAddon(self, plugin):
+ #check if already loaded
+ if plugin in self.plugins:
+ return
+
+ pluginClass = self.core.pluginManager.loadClass("addons", plugin)
+
+ if not pluginClass: return
+
+ self.log.debug("Plugin loaded: %s" % plugin)
+
+ plugin = pluginClass(self.core, self)
+ self.plugins[pluginClass.__name__] = plugin
+
+ # active the addon in new thread
+ start_new_thread(plugin.activate, tuple())
+ self.registerEvents() # TODO: BUG: events will be destroyed and not re-registered
+
+ @lock
+ def deactivateAddon(self, plugin):
+ if plugin not in self.plugins:
+ return
+ else:
+ addon = self.plugins[plugin]
+
+ if addon.__internal__: return
+
+ self.call(addon, "deactivate")
+ self.log.debug("Plugin deactivated: %s" % plugin)
+
+ #remove periodic call
+ self.log.debug("Removed callback %s" % self.core.scheduler.removeJob(addon.cb))
+ del self.plugins[addon.__name__]
+
+ #remove event listener
+ for f in dir(addon):
+ if f.startswith("__") or type(getattr(addon, f)) != MethodType:
+ continue
+ self.core.eventManager.removeFromEvents(getattr(addon, f))
+
+ def activateAddons(self):
+ self.log.info(_("Activating Plugins..."))
+ for plugin in self.plugins.itervalues():
+ if plugin.isActivated():
+ self.call(plugin, "activate")
+
+ self.registerEvents()
+
+ def deactivateAddons(self):
+ """ Called when core is shutting down """
+ self.log.info(_("Deactivating Plugins..."))
+ for plugin in self.plugins.itervalues():
+ self.call(plugin, "deactivate")
+
+ def downloadPreparing(self, pyfile):
+ self.callInHooks("downloadPreparing", "download:preparing", pyfile)
+
+ def downloadFinished(self, pyfile):
+ self.callInHooks("downloadFinished", "download:finished", pyfile)
+
+ def downloadFailed(self, pyfile):
+ self.callInHooks("downloadFailed", "download:failed", pyfile)
+
+ def packageFinished(self, package):
+ self.callInHooks("packageFinished", "package:finished", package)
+
+ @lock
+ def startThread(self, function, *args, **kwargs):
+ AddonThread(self.core.threadManager, function, args, kwargs)
+
+ def activePlugins(self):
+ """ returns all active plugins """
+ return [x for x in self.plugins.itervalues() if x.isActivated()]
+
+ def getAllInfo(self):
+ """returns info stored by addon plugins"""
+ info = {}
+ for name, plugin in self.plugins.iteritems():
+ if plugin.info:
+ #copy and convert so str
+ info[name] = dict(
+ [(x, to_string(y)) for x, y in plugin.info.iteritems()])
+ return info
+
+ def getInfo(self, plugin):
+ info = {}
+ if plugin in self.plugins and self.plugins[plugin].info:
+ info = dict([(x, to_string(y))
+ for x, y in self.plugins[plugin].info.iteritems()])
+
+ return info
+
+ def addEventListener(self, plugin, func, event):
+ """ add the event to the list """
+ if plugin not in self.events:
+ self.events[plugin] = []
+ self.events[plugin].append((func, event))
+
+ def registerEvents(self):
+ """ actually register all saved events """
+ for name, plugin in self.plugins.iteritems():
+ if name in self.events:
+ for func, event in self.events[name]:
+ self.listenTo(event, getattr(plugin, func))
+ # clean up
+ del self.events[name]
+
+ def listenTo(self, *args):
+ self.core.eventManager.listenTo(*args)
+
+ def dispatchEvent(self, *args):
+ self.core.eventManager.dispatchEvent(*args)
+
diff --git a/pyload/Api.py b/pyload/Api.py
new file mode 100644
index 000000000..32a077c08
--- /dev/null
+++ b/pyload/Api.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2013 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import re
+from types import MethodType
+
+from remote.apitypes import *
+
+# contains function names mapped to their permissions
+# unlisted functions are for admins only
+perm_map = {}
+
+# decorator only called on init, never initialized, so has no effect on runtime
+def RequirePerm(bits):
+ class _Dec(object):
+ def __new__(cls, func, *args, **kwargs):
+ perm_map[func.__name__] = bits
+ return func
+
+ return _Dec
+
+urlmatcher = re.compile(r"((https?|ftps?|xdcc|sftp):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+\-=\\\.&]*)", re.IGNORECASE)
+
+stateMap = {
+ DownloadState.All: frozenset(getattr(DownloadStatus, x) for x in dir(DownloadStatus) if not x.startswith("_")),
+ DownloadState.Finished: frozenset((DownloadStatus.Finished, DownloadStatus.Skipped)),
+ DownloadState.Unfinished: None, # set below
+ DownloadState.Failed: frozenset((DownloadStatus.Failed, DownloadStatus.TempOffline, DownloadStatus.Aborted)),
+ DownloadState.Unmanaged: None, #TODO
+}
+
+stateMap[DownloadState.Unfinished] = frozenset(stateMap[DownloadState.All].difference(stateMap[DownloadState.Finished]))
+
+def state_string(state):
+ return ",".join(str(x) for x in stateMap[state])
+
+from datatypes.User import User
+
+class Api(Iface):
+ """
+ **pyLoads API**
+
+ This is accessible either internal via core.api, websocket backend or json api.
+
+ see Thrift specification file remote/thriftbackend/pyload.thrift\
+ for information about data structures and what methods are usable with rpc.
+
+ Most methods requires specific permissions, please look at the source code if you need to know.\
+ These can be configured via web interface.
+ Admin user have all permissions, and are the only ones who can access the methods with no specific permission.
+ """
+
+ EXTERNAL = Iface # let the json api know which methods are external
+ EXTEND = False # only extendable when set too true
+
+ def __init__(self, core):
+ self.core = core
+ self.user_apis = {}
+
+ @property
+ def user(self):
+ return None #TODO return default user?
+
+ @property
+ def primaryUID(self):
+ return self.user.primary if self.user else None
+
+ @classmethod
+ def initComponents(cls):
+ # Allow extending the api
+ # This prevents unintentionally registering of the components,
+ # but will only work once when they are imported
+ cls.EXTEND = True
+ # Import all Api modules, they register themselves.
+ import pyload.api
+ # they will vanish from the namespace afterwards
+
+
+ @classmethod
+ def extend(cls, api):
+ """Takes all params from api and extends cls with it.
+ api class can be removed afterwards
+
+ :param api: Class with methods to extend
+ """
+ if cls.EXTEND:
+ for name, func in api.__dict__.iteritems():
+ if name.startswith("_"): continue
+ setattr(cls, name, MethodType(func, None, cls))
+
+ return cls.EXTEND
+
+ def withUserContext(self, uid):
+ """ Returns a proxy version of the api, to call method in user context
+
+ :param uid: user or userData instance or uid
+ :return: :class:`UserApi`
+ """
+ if isinstance(uid, User):
+ uid = uid.uid
+
+ if uid not in self.user_apis:
+ user = self.core.db.getUserData(uid=uid)
+ if not user: #TODO: anonymous user?
+ return None
+
+ self.user_apis[uid] = UserApi(self.core, User.fromUserData(self, user))
+
+ return self.user_apis[uid]
+
+
+ #############################
+ # Auth+User Information
+ #############################
+
+ # TODO
+
+ @RequirePerm(Permission.All)
+ def login(self, username, password, remoteip=None):
+ """Login into pyLoad, this **must** be called when using rpc before any methods can be used.
+
+ :param username:
+ :param password:
+ :param remoteip: Omit this argument, its only used internal
+ :return: bool indicating login was successful
+ """
+ return True if self.checkAuth(username, password, remoteip) else False
+
+ def checkAuth(self, username, password, remoteip=None):
+ """Check authentication and returns details
+
+ :param username:
+ :param password:
+ :param remoteip:
+ :return: dict with info, empty when login is incorrect
+ """
+ self.core.log.info(_("User '%s' tries to log in") % username)
+
+ return self.core.db.checkAuth(username, password)
+
+ def isAuthorized(self, func, user):
+ """checks if the user is authorized for specific method
+
+ :param func: function name
+ :param user: `User`
+ :return: boolean
+ """
+ if user.isAdmin():
+ return True
+ elif func in perm_map and user.hasPermission(perm_map[func]):
+ return True
+ else:
+ return False
+
+ # TODO
+ @RequirePerm(Permission.All)
+ def getUserData(self, username, password):
+ """similar to `checkAuth` but returns UserData thrift type """
+ user = self.checkAuth(username, password)
+ if not user:
+ raise UserDoesNotExists(username)
+
+ return user.toUserData()
+
+ def getAllUserData(self):
+ """returns all known user and info"""
+ return self.core.db.getAllUserData()
+
+ def changePassword(self, username, oldpw, newpw):
+ """ changes password for specific user """
+ return self.core.db.changePassword(username, oldpw, newpw)
+
+ def setUserPermission(self, user, permission, role):
+ self.core.db.setPermission(user, permission)
+ self.core.db.setRole(user, role)
+
+
+class UserApi(Api):
+ """ Proxy object for api that provides all methods in user context """
+
+ def __init__(self, core, user):
+ # No need to init super class
+ self.core = core
+ self._user = user
+
+ def withUserContext(self, uid):
+ raise Exception("Not allowed")
+
+ @property
+ def user(self):
+ return self._user \ No newline at end of file
diff --git a/pyload/Core.py b/pyload/Core.py
new file mode 100644
index 000000000..15b036c7a
--- /dev/null
+++ b/pyload/Core.py
@@ -0,0 +1,667 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2013 pyLoad Team
+# http://www.pyload.org
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: spoob
+# @author: sebnapi
+# @author: RaNaN
+# @author: mkaay
+# @version: v0.5.0
+###############################################################################
+
+from pyload import __version__ as CURRENT_VERSION
+
+import __builtin__
+
+from getopt import getopt, GetoptError
+import logging
+import logging.handlers
+import os
+from os import _exit, execl, getcwd, remove, walk, chdir, close
+import signal
+import sys
+from sys import argv, executable, exit
+from time import time, sleep
+from traceback import print_exc
+
+import locale
+locale.locale_alias = locale.windows_locale = {} #save ~100kb ram, no known sideeffects for now
+
+import subprocess
+subprocess.__doc__ = None # the module with the largest doc we are using
+
+import InitHomeDir
+
+from AccountManager import AccountManager
+from config.ConfigParser import ConfigParser
+from config.ConfigManager import ConfigManager
+from PluginManager import PluginManager
+from interaction.EventManager import EventManager
+from network.RequestFactory import RequestFactory
+from web.ServerThread import WebServer
+from Scheduler import Scheduler
+from remote.RemoteManager import RemoteManager
+from utils.JsEngine import JsEngine
+
+import utils.pylgettext as gettext
+from utils import formatSize, get_console_encoding
+from utils.fs import free_space, exists, makedirs, join, chmod
+
+from codecs import getwriter
+
+# test runner overwrites sys.stdout
+if hasattr(sys.stdout, "encoding"): enc = get_console_encoding(sys.stdout.encoding)
+else: enc = "utf8"
+
+sys._stdout = sys.stdout
+sys.stdout = getwriter(enc)(sys.stdout, errors="replace")
+
+# TODO List
+# - configurable auth system ldap/mysql
+# - cron job like sheduler
+# - plugin stack / multi decrypter
+# - media plugin type
+# - general progress info
+# - content attribute for files / sync status
+# - sync with disk content / file manager / nested packages
+# - sync between pyload cores
+# - new attributes (date|sync status)
+# - embedded packages
+# - would require new/modified link collector concept
+# - pausable links/packages
+# - toggable accounts
+# - interaction manager
+# - improve external scripts
+# - make pyload undestructable to fail plugins -> see ConfigParser first
+
+class Core(object):
+ """pyLoad Core, one tool to rule them all... (the filehosters) :D"""
+
+ def __init__(self):
+ self.doDebug = False
+ self.running = False
+ self.daemon = False
+ self.remote = True
+ self.pdb = None
+ self.arg_links = []
+ self.pidfile = "pyload.pid"
+ self.deleteLinks = False # will delete links on startup
+
+ if len(argv) > 1:
+ try:
+ options, args = getopt(argv[1:], 'vchdusqp:',
+ ["version", "clear", "clean", "help", "debug", "user",
+ "setup", "configdir=", "changedir", "daemon",
+ "quit", "status", "no-remote","pidfile="])
+
+ for option, argument in options:
+ if option in ("-v", "--version"):
+ print "pyLoad", CURRENT_VERSION
+ exit()
+ elif option in ("-p", "--pidfile"):
+ self.pidfile = argument
+ elif option == "--daemon":
+ self.daemon = True
+ elif option in ("-c", "--clear"):
+ self.deleteLinks = True
+ elif option in ("-h", "--help"):
+ self.print_help()
+ exit()
+ elif option in ("-d", "--debug"):
+ self.doDebug = True
+ elif option in ("-u", "--user"):
+ from Setup import Setup
+
+ self.config = ConfigParser()
+ s = Setup(pypath, self.config)
+ s.set_user()
+ exit()
+ elif option in ("-s", "--setup"):
+ from Setup import Setup
+
+ self.config = ConfigParser()
+ s = Setup(pypath, self.config)
+ s.start()
+ exit()
+ elif option == "--changedir":
+ from Setup import Setup
+
+ self.config = ConfigParser()
+ s = Setup(pypath, self.config)
+ s.conf_path(True)
+ exit()
+ elif option in ("-q", "--quit"):
+ self.quitInstance()
+ exit()
+ elif option == "--status":
+ pid = self.isAlreadyRunning()
+ if self.isAlreadyRunning():
+ print pid
+ exit(0)
+ else:
+ print "false"
+ exit(1)
+ elif option == "--clean":
+ self.cleanTree()
+ exit()
+ elif option == "--no-remote":
+ self.remote = False
+
+ except GetoptError:
+ print 'Unknown Argument(s) "%s"' % " ".join(argv[1:])
+ self.print_help()
+ exit()
+
+ def print_help(self):
+ print ""
+ print "pyLoad v%s 2008-2013 the pyLoad Team" % CURRENT_VERSION
+ print ""
+ if sys.argv[0].endswith(".py"):
+ print "Usage: python pyload.py [options]"
+ else:
+ print "Usage: pyload [options]"
+ print ""
+ print "<Options>"
+ print " -v, --version", " " * 10, "Print version to terminal"
+ print " -c, --clear", " " * 12, "Delete all saved packages/links"
+ #print " -a, --add=<link/list>", " " * 2, "Add the specified links"
+ print " -u, --user", " " * 13, "Manages users"
+ print " -d, --debug", " " * 12, "Enable debug mode"
+ print " -s, --setup", " " * 12, "Run setup assistant"
+ print " --configdir=<dir>", " " * 6, "Run with <dir> as configuration directory"
+ print " -p, --pidfile=<file>", " " * 3, "Set pidfile to <file>"
+ print " --changedir", " " * 12, "Change configuration directory permanently"
+ print " --daemon", " " * 15, "Daemonize after startup"
+ print " --no-remote", " " * 12, "Disable remote access"
+ print " --status", " " * 15, "Display pid if running or False"
+ print " --clean", " " * 16, "Remove .pyc/.pyo files"
+ print " -q, --quit", " " * 13, "Quit a running pyLoad instance"
+ print " -h, --help", " " * 13, "Display this help screen"
+ print ""
+
+
+ def quit(self, a, b):
+ self.shutdown()
+ self.log.info(_("Received Quit signal"))
+ _exit(1)
+
+ def writePidFile(self):
+ self.deletePidFile()
+ pid = os.getpid()
+ f = open(self.pidfile, "wb")
+ f.write(str(pid))
+ f.close()
+ chmod(self.pidfile, 0660)
+
+ def deletePidFile(self):
+ if self.checkPidFile():
+ self.log.debug("Deleting old pidfile %s" % self.pidfile)
+ os.remove(self.pidfile)
+
+ def checkPidFile(self):
+ """ return pid as int or 0"""
+ if os.path.isfile(self.pidfile):
+ f = open(self.pidfile, "rb")
+ pid = f.read().strip()
+ f.close()
+ if pid:
+ pid = int(pid)
+ return pid
+
+ return 0
+
+ def isAlreadyRunning(self):
+ pid = self.checkPidFile()
+ if not pid or os.name == "nt": return False
+ try:
+ os.kill(pid, 0) # 0 - default signal (does nothing)
+ except:
+ return 0
+
+ return pid
+
+ def quitInstance(self):
+ if os.name == "nt":
+ print "Not supported on windows."
+ return
+
+ pid = self.isAlreadyRunning()
+ if not pid:
+ print "No pyLoad running."
+ return
+
+ try:
+ os.kill(pid, 3) #SIGUIT
+
+ t = time()
+ print "waiting for pyLoad to quit"
+
+ while exists(self.pidfile) and t + 10 > time():
+ sleep(0.25)
+
+ if not exists(self.pidfile):
+ print "pyLoad successfully stopped"
+ else:
+ os.kill(pid, 9) #SIGKILL
+ print "pyLoad did not respond"
+ print "Kill signal was send to process with id %s" % pid
+
+ except:
+ print "Error quitting pyLoad"
+
+
+ def cleanTree(self):
+ for path, dirs, files in walk(self.path("")):
+ for f in files:
+ if not f.endswith(".pyo") and not f.endswith(".pyc"):
+ continue
+
+ if "_25" in f or "_26" in f or "_27" in f:
+ continue
+
+ print join(path, f)
+ remove(join(path, f))
+
+ def start(self, rpc=True, web=True, tests=False):
+ """ starts the fun :D """
+
+ self.version = CURRENT_VERSION
+
+ # TODO: Re-enable when its working again
+ # TODO: Don't forget it
+ if False and not exists("pyload.conf") and not tests:
+ from Setup import Setup
+
+ print "This is your first start, running configuration assistant now."
+ self.config = ConfigParser()
+ s = Setup(pypath, self.config)
+ res = False
+ try:
+ res = s.start()
+ except SystemExit:
+ pass
+ except KeyboardInterrupt:
+ print "\nSetup interrupted"
+ except:
+ res = False
+ print_exc()
+ print "Setup failed"
+ if not res:
+ remove("pyload.conf")
+
+ exit()
+
+ try: signal.signal(signal.SIGQUIT, self.quit)
+ except: pass
+
+ self.config = ConfigParser()
+
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation("pyLoad", self.path("locale"),
+ languages=[self.config['general']['language'],"en"],fallback=True)
+ translation.install(True)
+
+ # load again so translations are propagated
+ self.config.loadDefault()
+
+ self.debug = self.doDebug or self.config['general']['debug_mode']
+ self.remote &= self.config['remote']['activated']
+
+ pid = self.isAlreadyRunning()
+ # don't exit when in test runner
+ if pid and not tests:
+ print _("pyLoad already running with pid %s") % pid
+ exit()
+
+ if os.name != "nt" and self.config["general"]["renice"]:
+ os.system("renice %d %d" % (self.config["general"]["renice"], os.getpid()))
+
+ if self.config["permission"]["change_group"]:
+ if os.name != "nt":
+ try:
+ from grp import getgrnam
+
+ group = getgrnam(self.config["permission"]["group"])
+ os.setgid(group[2])
+ except Exception, e:
+ print _("Failed changing group: %s") % e
+
+ if self.config["permission"]["change_user"]:
+ if os.name != "nt":
+ try:
+ from pwd import getpwnam
+
+ user = getpwnam(self.config["permission"]["user"])
+ os.setuid(user[2])
+ except Exception, e:
+ print _("Failed changing user: %s") % e
+
+ if self.debug:
+ self.init_logger(logging.DEBUG) # logging level
+ else:
+ self.init_logger(logging.INFO) # logging level
+
+ self.do_kill = False
+ self.do_restart = False
+ self.shuttedDown = False
+
+ self.log.info(_("Starting") + " pyLoad %s" % CURRENT_VERSION)
+ self.log.info(_("Using home directory: %s") % getcwd())
+
+ if not tests:
+ self.writePidFile()
+
+ self.captcha = True # checks seems to fail, although tesseract is available
+
+ self.eventManager = self.evm = EventManager(self)
+ self.setupDB()
+
+ # Upgrade to configManager
+ self.config = ConfigManager(self, self.config)
+
+ if self.deleteLinks:
+ self.log.info(_("All links removed"))
+ self.db.purgeLinks()
+
+ self.requestFactory = RequestFactory(self)
+ __builtin__.pyreq = self.requestFactory
+
+ # deferred import, could improve start-up time
+ from Api import Api
+ from AddonManager import AddonManager
+ from interaction.InteractionManager import InteractionManager
+ from threads.ThreadManager import ThreadManager
+
+ Api.initComponents()
+ self.api = Api(self)
+
+ self.scheduler = Scheduler(self)
+
+ #hell yeah, so many important managers :D
+ self.pluginManager = PluginManager(self)
+ self.interactionManager = self.im = InteractionManager(self)
+ self.accountManager = AccountManager(self)
+ self.threadManager = ThreadManager(self)
+ self.addonManager = AddonManager(self)
+ self.remoteManager = RemoteManager(self)
+
+ self.js = JsEngine()
+
+ # enough initialization for test cases
+ if tests: return
+
+ self.log.info(_("Download time: %s") % self.api.isTimeDownload())
+
+ if rpc:
+ self.remoteManager.startBackends()
+
+ if web:
+ self.init_webserver()
+
+ dl_folder = self.config["general"]["download_folder"]
+
+ if not exists(dl_folder):
+ makedirs(dl_folder)
+
+ spaceLeft = free_space(dl_folder)
+
+ self.log.info(_("Free space: %s") % formatSize(spaceLeft))
+
+ self.config.save() #save so config files gets filled
+
+ link_file = join(pypath, "links.txt")
+
+ if exists(link_file):
+ f = open(link_file, "rb")
+ if f.read().strip():
+ self.api.addPackage("links.txt", [link_file], 1)
+ f.close()
+
+ link_file = "links.txt"
+ if exists(link_file):
+ f = open(link_file, "rb")
+ if f.read().strip():
+ self.api.addPackage("links.txt", [link_file], 1)
+ f.close()
+
+ #self.scheduler.addJob(0, self.accountManager.getAccountInfos)
+ self.log.info(_("Activating Accounts..."))
+ self.accountManager.refreshAllAccounts()
+
+ #restart failed
+ if self.config["download"]["restart_failed"]:
+ self.log.info(_("Restarting failed downloads..."))
+ self.api.restartFailed()
+
+ self.threadManager.pause = False
+ self.running = True
+
+ self.addonManager.activateAddons()
+
+ self.log.info(_("pyLoad is up and running"))
+ self.eventManager.dispatchEvent("core:ready")
+
+ #test api
+# from pyload.common.APIExerciser import startApiExerciser
+# startApiExerciser(self, 3)
+
+ #some memory stats
+# from guppy import hpy
+# hp=hpy()
+# print hp.heap()
+# import objgraph
+# objgraph.show_most_common_types(limit=30)
+# import memdebug
+# memdebug.start(8002)
+# from meliae import scanner
+# scanner.dump_all_objects(self.path('objs.json'))
+
+ locals().clear()
+
+ while True:
+ sleep(1.5)
+ if self.do_restart:
+ self.log.info(_("restarting pyLoad"))
+ self.restart()
+ if self.do_kill:
+ self.shutdown()
+ self.log.info(_("pyLoad quits"))
+ self.removeLogger()
+ _exit(0)
+ # TODO check exits codes, clean exit is still blocked
+
+ self.threadManager.work()
+ self.interactionManager.work()
+ self.scheduler.work()
+
+ def setupDB(self):
+ from database import DatabaseBackend
+ from FileManager import FileManager
+
+ self.db = DatabaseBackend(self) # the backend
+ self.db.setup()
+
+ self.files = FileManager(self)
+ self.db.manager = self.files #ugly?
+
+ def init_webserver(self):
+ if self.config['webinterface']['activated']:
+ self.webserver = WebServer(self)
+ self.webserver.start()
+
+ def init_logger(self, level):
+ console = logging.StreamHandler(sys.stdout)
+
+ # try to get a time formatting depending on system locale
+ tfrm = None
+ try: # change current locale to default if it is not set
+ current_locale = locale.getlocale()
+ if current_locale == (None, None):
+ current_locale = locale.setlocale(locale.LC_ALL, '')
+
+ # We use timeformat provided by locale when available
+ if current_locale != (None, None):
+ tfrm = locale.nl_langinfo(locale.D_FMT) + " " + locale.nl_langinfo(locale.T_FMT)
+ except: # something did go wrong, locale is heavily platform dependant
+ pass
+
+ # default formatting when no one was obtained
+ if not tfrm:
+ tfrm = "%d.%m.%Y %H:%M:%S"
+
+ frm = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s", tfrm)
+ console.setFormatter(frm)
+ self.log = logging.getLogger("log") # setable in config
+
+ if not exists(self.config['log']['log_folder']):
+ makedirs(self.config['log']['log_folder'], 0700)
+
+ if self.config['log']['file_log']:
+ if self.config['log']['log_rotate']:
+ file_handler = logging.handlers.RotatingFileHandler(join(self.config['log']['log_folder'], 'log.txt'),
+ maxBytes=self.config['log']['log_size'] * 1024,
+ backupCount=int(self.config['log']['log_count']),
+ encoding="utf8")
+ else:
+ file_handler = logging.FileHandler(join(self.config['log']['log_folder'], 'log.txt'), encoding="utf8")
+
+ file_handler.setFormatter(frm)
+ self.log.addHandler(file_handler)
+
+ self.log.addHandler(console) #if console logging
+ self.log.setLevel(level)
+
+ def removeLogger(self):
+ for h in list(self.log.handlers):
+ self.log.removeHandler(h)
+ h.close()
+
+
+ def restart(self):
+ self.shutdown()
+ chdir(owd)
+ # close some open fds
+ for i in range(3,50):
+ try:
+ close(i)
+ except :
+ pass
+
+ execl(executable, executable, *sys.argv)
+ _exit(0)
+
+ def shutdown(self):
+ self.log.info(_("shutting down..."))
+ self.eventManager.dispatchEvent("coreShutdown")
+ try:
+ if self.config['webinterface']['activated'] and hasattr(self, "webserver"):
+ pass # TODO: quit webserver?
+# self.webserver.quit()
+
+ for thread in self.threadManager.threads:
+ thread.put("quit")
+
+ self.api.stopAllDownloads()
+ self.addonManager.deactivateAddons()
+
+ except:
+ self.print_exc()
+ self.log.info(_("error while shutting down"))
+
+ finally:
+ self.files.syncSave()
+ self.db.shutdown()
+ self.shuttedDown = True
+
+ self.deletePidFile()
+
+ def shell(self):
+ """ stop and open an ipython shell inplace"""
+ if self.debug:
+ from IPython import embed
+ sys.stdout = sys._stdout
+ embed()
+
+ def breakpoint(self):
+ if self.debug:
+ from IPython.core.debugger import Pdb
+ sys.stdout = sys._stdout
+ if not self.pdb: self.pdb = Pdb()
+ self.pdb.set_trace()
+
+ def print_exc(self):
+ if self.debug:
+ print_exc()
+
+ def path(self, *args):
+ return join(pypath, *args)
+
+
+def deamon():
+ try:
+ pid = os.fork()
+ if pid > 0:
+ sys.exit(0)
+ except OSError, e:
+ print >> sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
+ sys.exit(1)
+
+ # decouple from parent environment
+ os.setsid()
+ os.umask(0)
+
+ # do second fork
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # exit from second parent, print eventual PID before
+ print "Daemon PID %d" % pid
+ sys.exit(0)
+ except OSError, e:
+ print >> sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
+ sys.exit(1)
+
+ # Iterate through and close some file descriptors.
+ for fd in range(0, 3):
+ try:
+ os.close(fd)
+ except OSError: # ERROR, fd wasn't open to begin with (ignored)
+ pass
+
+ os.open(os.devnull, os.O_RDWR) # standard input (0)
+ os.dup2(0, 1) # standard output (1)
+ os.dup2(0, 2)
+
+ pyload_core = Core()
+ pyload_core.start()
+
+# And so it begins...
+def main():
+ #change name to 'pyLoadCore'
+ #from module.lib.rename_process import renameProcess
+ #renameProcess('pyLoadCore')
+ if "--daemon" in sys.argv:
+ deamon()
+ else:
+ pyload_core = Core()
+ try:
+ pyload_core.start()
+ except KeyboardInterrupt:
+ pyload_core.shutdown()
+ pyload_core.log.info(_("killed pyLoad from terminal"))
+ pyload_core.removeLogger()
+ _exit(1)
+
+
+if __name__ == "__main__":
+ print "This file can not be started directly." \ No newline at end of file
diff --git a/pyload/FileManager.py b/pyload/FileManager.py
new file mode 100644
index 000000000..b1d3891e9
--- /dev/null
+++ b/pyload/FileManager.py
@@ -0,0 +1,582 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from time import time
+from ReadWriteLock import ReadWriteLock
+
+from pyload.utils import lock, read_lock
+
+from Api import PackageStatus, DownloadStatus as DS, TreeCollection, PackageDoesNotExists
+from datatypes.PyFile import PyFile
+from datatypes.PyPackage import PyPackage, RootPackage
+
+# invalidates the cache
+def invalidate(func):
+ def new(*args):
+ args[0].downloadstats = {}
+ args[0].queuestats = {}
+ args[0].jobCache = {}
+ return func(*args)
+
+ return new
+
+# TODO: needs to be replaced later
+OWNER = 0
+
+class FileManager:
+ """Handles all request made to obtain information,
+ modify status or other request for links or packages"""
+
+ ROOT_PACKAGE = -1
+
+ def __init__(self, core):
+ """Constructor"""
+ self.core = core
+ self.evm = core.eventManager
+
+ # translations
+ self.statusMsg = [_("none"), _("offline"), _("online"), _("queued"), _("paused"),
+ _("finished"), _("skipped"), _("failed"), _("starting"),
+ _("waiting"), _("downloading"), _("temp. offline"), _("aborted"),
+ _("decrypting"), _("processing"), _("custom"), _("unknown")]
+
+ self.files = {} # holds instances for files
+ self.packages = {} # same for packages
+
+ self.jobCache = {}
+
+ # locking the caches, db is already locked implicit
+ self.lock = ReadWriteLock()
+ #self.lock._Verbose__verbose = True
+
+ self.downloadstats = {} # cached dl stats
+ self.queuestats = {} # cached queue stats
+
+ self.db = self.core.db
+
+ def save(self):
+ """saves all data to backend"""
+ self.db.commit()
+
+ @read_lock
+ def syncSave(self):
+ """saves all data to backend and waits until all data are written"""
+ for pyfile in self.files.values():
+ pyfile.sync()
+
+ for pypack in self.packages.values():
+ pypack.sync()
+
+ self.db.syncSave()
+
+ def cachedFiles(self):
+ return self.files.values()
+
+ def cachedPackages(self):
+ return self.packages.values()
+
+ def getCollector(self):
+ pass
+
+ @invalidate
+ def addLinks(self, data, package):
+ """Add links, data = (plugin, url) tuple. Internal method should use API."""
+ self.db.addLinks(data, package, OWNER)
+ self.evm.dispatchEvent("package:updated", package)
+
+
+ @invalidate
+ def addPackage(self, name, folder, root, password, site, comment, paused):
+ """Adds a package to database"""
+ pid = self.db.addPackage(name, folder, root, password, site, comment,
+ PackageStatus.Paused if paused else PackageStatus.Ok, OWNER)
+ p = self.db.getPackageInfo(pid)
+
+ self.evm.dispatchEvent("package:inserted", pid, p.root, p.packageorder)
+ return pid
+
+
+ @lock
+ def getPackage(self, pid):
+ """return package instance"""
+ if pid == self.ROOT_PACKAGE:
+ return RootPackage(self, OWNER)
+ elif pid in self.packages:
+ pack = self.packages[pid]
+ pack.timestamp = time()
+ return pack
+ else:
+ info = self.db.getPackageInfo(pid, False)
+ if not info: return None
+
+ pack = PyPackage.fromInfoData(self, info)
+ self.packages[pid] = pack
+
+ return pack
+
+ @read_lock
+ def getPackageInfo(self, pid):
+ """returns dict with package information"""
+ if pid == self.ROOT_PACKAGE:
+ pack = RootPackage(self, OWNER).toInfoData()
+ elif pid in self.packages:
+ pack = self.packages[pid].toInfoData()
+ pack.stats = self.db.getStatsForPackage(pid)
+ else:
+ pack = self.db.getPackageInfo(pid)
+
+ if not pack: return None
+
+ # todo: what does this todo mean?!
+ #todo: fill child packs and files
+ packs = self.db.getAllPackages(root=pid)
+ if pid in packs: del packs[pid]
+ pack.pids = packs.keys()
+
+ files = self.db.getAllFiles(package=pid)
+ pack.fids = files.keys()
+
+ return pack
+
+ @lock
+ def getFile(self, fid):
+ """returns pyfile instance"""
+ if fid in self.files:
+ return self.files[fid]
+ else:
+ info = self.db.getFileInfo(fid)
+ if not info: return None
+
+ f = PyFile.fromInfoData(self, info)
+ self.files[fid] = f
+ return f
+
+ @read_lock
+ def getFileInfo(self, fid):
+ """returns dict with file information"""
+ if fid in self.files:
+ return self.files[fid].toInfoData()
+
+ return self.db.getFileInfo(fid)
+
+ @read_lock
+ def getTree(self, pid, full, state, search=None):
+ """ return a TreeCollection and fill the info data of containing packages.
+ optional filter only unfnished files
+ """
+ view = TreeCollection(pid)
+
+ # for depth=1, we don't need to retrieve all files/packages
+ root = pid if not full else None
+
+ packs = self.db.getAllPackages(root)
+ files = self.db.getAllFiles(package=root, state=state, search=search)
+
+ # updating from cache
+ for fid, f in self.files.iteritems():
+ if fid in files:
+ files[fid] = f.toInfoData()
+
+ # foreign pid, don't overwrite local pid !
+ for fpid, p in self.packages.iteritems():
+ if fpid in packs:
+ # copy the stats data
+ stats = packs[fpid].stats
+ packs[fpid] = p.toInfoData()
+ packs[fpid].stats = stats
+
+ # root package is not in database, create an instance
+ if pid == self.ROOT_PACKAGE:
+ view.root = RootPackage(self, OWNER).toInfoData()
+ packs[self.ROOT_PACKAGE] = view.root
+ elif pid in packs:
+ view.root = packs[pid]
+ else: # package does not exists
+ return view
+
+ # linear traversal over all data
+ for fpid, p in packs.iteritems():
+ if p.fids is None: p.fids = []
+ if p.pids is None: p.pids = []
+
+ root = packs.get(p.root, None)
+ if root:
+ if root.pids is None: root.pids = []
+ root.pids.append(fpid)
+
+ for fid, f in files.iteritems():
+ p = packs.get(f.package, None)
+ if p: p.fids.append(fid)
+
+
+ # cutting of tree is not good in runtime, only saves bandwidth
+ # need to remove some entries
+ if full and pid > -1:
+ keep = []
+ queue = [pid]
+ while queue:
+ fpid = queue.pop()
+ keep.append(fpid)
+ queue.extend(packs[fpid].pids)
+
+ # now remove unneeded data
+ for fpid in packs.keys():
+ if fpid not in keep:
+ del packs[fpid]
+
+ for fid, f in files.items():
+ if f.package not in keep:
+ del files[fid]
+
+ #remove root
+ del packs[pid]
+ view.files = files
+ view.packages = packs
+
+ return view
+
+
+ @lock
+ def getJob(self, occ):
+ """get suitable job"""
+
+ #TODO only accessed by one thread, should not need a lock
+ #TODO needs to be approved for new database
+ #TODO clean mess
+ #TODO improve selection of valid jobs
+
+ if occ in self.jobCache:
+ if self.jobCache[occ]:
+ id = self.jobCache[occ].pop()
+ if id == "empty":
+ pyfile = None
+ self.jobCache[occ].append("empty")
+ else:
+ pyfile = self.getFile(id)
+ else:
+ jobs = self.db.getJob(occ)
+ jobs.reverse()
+ if not jobs:
+ self.jobCache[occ].append("empty")
+ pyfile = None
+ else:
+ self.jobCache[occ].extend(jobs)
+ pyfile = self.getFile(self.jobCache[occ].pop())
+
+ else:
+ self.jobCache = {} #better not caching to much
+ jobs = self.db.getJob(occ)
+ jobs.reverse()
+ self.jobCache[occ] = jobs
+
+ if not jobs:
+ self.jobCache[occ].append("empty")
+ pyfile = None
+ else:
+ pyfile = self.getFile(self.jobCache[occ].pop())
+
+
+ return pyfile
+
+ def getDownloadStats(self, user=None):
+ """ return number of downloads """
+ if user not in self.downloadstats:
+ self.downloadstats[user] = self.db.downloadstats(user)
+
+ return self.downloadstats[user]
+
+ def getQueueStats(self, user=None, force=False):
+ """number of files that have to be processed, failed files will not be included"""
+ if user not in self.queuestats or force:
+ self.queuestats[user] = self.db.queuestats(user)
+
+ return self.queuestats[user]
+
+ def scanDownloadFolder(self):
+ pass
+
+ @lock
+ @invalidate
+ def deletePackage(self, pid):
+ """delete package and all contained links"""
+
+ p = self.getPackage(pid)
+ if not p: return
+
+ oldorder = p.packageorder
+ root = p.root
+
+ for pyfile in self.cachedFiles():
+ if pyfile.packageid == pid:
+ pyfile.abortDownload()
+
+ # TODO: delete child packages
+ # TODO: delete folder
+
+ self.db.deletePackage(pid)
+ self.releasePackage(pid)
+
+ for pack in self.cachedPackages():
+ if pack.root == root and pack.packageorder > oldorder:
+ pack.packageorder -= 1
+
+ self.evm.dispatchEvent("package:deleted", pid)
+
+ @lock
+ @invalidate
+ def deleteFile(self, fid):
+ """deletes links"""
+
+ f = self.getFile(fid)
+ if not f: return
+
+ pid = f.packageid
+ order = f.fileorder
+
+ if fid in self.core.threadManager.processingIds():
+ f.abortDownload()
+
+ # TODO: delete real file
+
+ self.db.deleteFile(fid, f.fileorder, f.packageid)
+ self.releaseFile(fid)
+
+ for pyfile in self.files.itervalues():
+ if pyfile.packageid == pid and pyfile.fileorder > order:
+ pyfile.fileorder -= 1
+
+ self.evm.dispatchEvent("file:deleted", fid, pid)
+
+ @lock
+ def releaseFile(self, fid):
+ """removes pyfile from cache"""
+ if fid in self.files:
+ del self.files[fid]
+
+ @lock
+ def releasePackage(self, pid):
+ """removes package from cache"""
+ if pid in self.packages:
+ del self.packages[pid]
+
+ def updateFile(self, pyfile):
+ """updates file"""
+ self.db.updateFile(pyfile)
+
+ # This event is thrown with pyfile or only fid
+ self.evm.dispatchEvent("file:updated", pyfile)
+
+ def updatePackage(self, pypack):
+ """updates a package"""
+ self.db.updatePackage(pypack)
+ self.evm.dispatchEvent("package:updated", pypack.pid)
+
+ @invalidate
+ def updateFileInfo(self, data, pid):
+ """ updates file info (name, size, status,[ hash,] url)"""
+ self.db.updateLinkInfo(data)
+ self.evm.dispatchEvent("package:updated", pid)
+
+ def checkAllLinksFinished(self):
+ """checks if all files are finished and dispatch event"""
+
+ # TODO: user context?
+ if not self.db.queuestats()[0]:
+ self.core.addonManager.dispatchEvent("download:allFinished")
+ self.core.log.debug("All downloads finished")
+ return True
+
+ return False
+
+ def checkAllLinksProcessed(self, fid=-1):
+ """checks if all files was processed and pyload would idle now, needs fid which will be ignored when counting"""
+
+ # reset count so statistic will update (this is called when dl was processed)
+ self.resetCount()
+
+ # TODO: user context?
+ if not self.db.processcount(fid):
+ self.core.addonManager.dispatchEvent("download:allProcessed")
+ self.core.log.debug("All downloads processed")
+ return True
+
+ return False
+
+ def checkPackageFinished(self, pyfile):
+ """ checks if package is finished and calls addonmanager """
+
+ ids = self.db.getUnfinished(pyfile.packageid)
+ if not ids or (pyfile.id in ids and len(ids) == 1):
+ if not pyfile.package().setFinished:
+ self.core.log.info(_("Package finished: %s") % pyfile.package().name)
+ self.core.addonManager.packageFinished(pyfile.package())
+ pyfile.package().setFinished = True
+
+ def resetCount(self):
+ self.queuecount = -1
+
+ @read_lock
+ @invalidate
+ def restartPackage(self, pid):
+ """restart package"""
+ for pyfile in self.cachedFiles():
+ if pyfile.packageid == pid:
+ self.restartFile(pyfile.id)
+
+ self.db.restartPackage(pid)
+
+ if pid in self.packages:
+ self.packages[pid].setFinished = False
+
+ self.evm.dispatchEvent("package:updated", pid)
+
+ @read_lock
+ @invalidate
+ def restartFile(self, fid):
+ """ restart file"""
+ if fid in self.files:
+ f = self.files[fid]
+ f.status = DS.Queued
+ f.name = f.url
+ f.error = ""
+ f.abortDownload()
+
+ self.db.restartFile(fid)
+ self.evm.dispatchEvent("file:updated", fid)
+
+
+ @lock
+ @invalidate
+ def orderPackage(self, pid, position):
+
+ p = self.getPackageInfo(pid)
+ self.db.orderPackage(pid, p.root, p.packageorder, position)
+
+ for pack in self.packages.itervalues():
+ if pack.root != p.root or pack.packageorder < 0: continue
+ if pack.pid == pid:
+ pack.packageorder = position
+ if p.packageorder > position:
+ if position <= pack.packageorder < p.packageorder:
+ pack.packageorder += 1
+ elif p.order < position:
+ if position >= pack.packageorder > p.packageorder:
+ pack.packageorder -= 1
+
+ self.db.commit()
+
+ self.evm.dispatchEvent("package:reordered", pid, position, p.root)
+
+ @lock
+ @invalidate
+ def orderFiles(self, fids, pid, position):
+
+ files = [self.getFileInfo(fid) for fid in fids]
+ orders = [f.fileorder for f in files]
+ if min(orders) + len(files) != max(orders) + 1:
+ raise Exception("Tried to reorder non continous block of files")
+
+ # minimum fileorder
+ f = reduce(lambda x,y: x if x.fileorder < y.fileorder else y, files)
+ order = f.fileorder
+
+ self.db.orderFiles(pid, fids, order, position)
+ diff = len(fids)
+
+ if f.fileorder > position:
+ for pyfile in self.files.itervalues():
+ if pyfile.packageid != f.package or pyfile.fileorder < 0: continue
+ if position <= pyfile.fileorder < f.fileorder:
+ pyfile.fileorder += diff
+
+ for i, fid in enumerate(fids):
+ if fid in self.files:
+ self.files[fid].fileorder = position + i
+
+ elif f.fileorder < position:
+ for pyfile in self.files.itervalues():
+ if pyfile.packageid != f.package or pyfile.fileorder < 0: continue
+ if position >= pyfile.fileorder >= f.fileorder+diff:
+ pyfile.fileorder -= diff
+
+ for i, fid in enumerate(fids):
+ if fid in self.files:
+ self.files[fid].fileorder = position -diff + i + 1
+
+ self.db.commit()
+
+ self.evm.dispatchEvent("file:reordered", pid)
+
+ @read_lock
+ @invalidate
+ def movePackage(self, pid, root):
+ """ move pid - root """
+
+ p = self.getPackageInfo(pid)
+ dest = self.getPackageInfo(root)
+ if not p: raise PackageDoesNotExists(pid)
+ if not dest: raise PackageDoesNotExists(root)
+
+ # cantor won't be happy if we put the package in itself
+ if pid == root or p.root == root: return False
+
+ # TODO move real folders
+
+ # we assume pack is not in use anyway, so we can release it
+ self.releasePackage(pid)
+ self.db.movePackage(p.root, p.packageorder, pid, root)
+
+ return True
+
+ @read_lock
+ @invalidate
+ def moveFiles(self, fids, pid):
+ """ move all fids to pid """
+
+ f = self.getFileInfo(fids[0])
+ if not f or f.package == pid:
+ return False
+ if not self.getPackageInfo(pid):
+ raise PackageDoesNotExists(pid)
+
+ # TODO move real files
+
+ self.db.moveFiles(f.package, fids, pid)
+
+ return True
+
+
+ @invalidate
+ def reCheckPackage(self, pid):
+ """ recheck links in package """
+ data = self.db.getPackageData(pid)
+
+ urls = []
+
+ for pyfile in data.itervalues():
+ if pyfile.status not in (DS.NA, DS.Finished, DS.Skipped):
+ urls.append((pyfile.url, pyfile.pluginname))
+
+ self.core.threadManager.createInfoThread(urls, pid)
+
+
+ @invalidate
+ def restartFailed(self):
+ """ restart all failed links """
+ # failed should not be in cache anymore, so working on db is sufficient
+ self.db.restartFailed()
diff --git a/pyload/InitHomeDir.py b/pyload/InitHomeDir.py
new file mode 100644
index 000000000..d24837d83
--- /dev/null
+++ b/pyload/InitHomeDir.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+
+ This modules inits working directories and global variables, pydir and homedir
+"""
+
+from os import makedirs, path, chdir
+from os.path import join
+import sys
+from sys import argv, platform
+
+import __builtin__
+
+__builtin__.owd = path.abspath("") #original working directory
+__builtin__.pypath = path.abspath(path.join(__file__, "..", ".."))
+
+# Before changing the cwd, the abspath of the module must be manifested
+if 'pyload' in sys.modules:
+ rel_pyload = sys.modules['pyload'].__path__[0]
+ abs_pyload = path.abspath(rel_pyload)
+ if abs_pyload != rel_pyload:
+ sys.modules['pyload'].__path__.insert(0, abs_pyload)
+
+sys.path.append(join(pypath, "pyload", "lib"))
+
+homedir = ""
+
+if platform == 'nt':
+ homedir = path.expanduser("~")
+ if homedir == "~":
+ import ctypes
+
+ CSIDL_APPDATA = 26
+ _SHGetFolderPath = ctypes.windll.shell32.SHGetFolderPathW
+ _SHGetFolderPath.argtypes = [ctypes.wintypes.HWND,
+ ctypes.c_int,
+ ctypes.wintypes.HANDLE,
+ ctypes.wintypes.DWORD, ctypes.wintypes.LPCWSTR]
+
+ path_buf = ctypes.wintypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
+ result = _SHGetFolderPath(0, CSIDL_APPDATA, 0, 0, path_buf)
+ homedir = path_buf.value
+else:
+ homedir = path.expanduser("~")
+
+__builtin__.homedir = homedir
+
+configdir = None
+args = " ".join(argv)
+# dirty method to set configdir from commandline arguments
+if "--configdir=" in args:
+ for arg in argv:
+ if arg.startswith("--configdir="):
+ configdir = arg.replace('--configdir=', '').strip()
+
+elif "nosetests" in args:
+ print "Running in test mode"
+ configdir = join(pypath, "tests", "config")
+
+elif path.exists(path.join(pypath, "pyload", "config", "configdir")):
+ f = open(path.join(pypath, "pyload", "config", "configdir"), "rb")
+ c = f.read().strip()
+ f.close()
+ configdir = path.join(pypath, c)
+
+# default config dir
+if not configdir:
+ if platform in ("posix", "linux2", "darwin"):
+ configdir = path.join(homedir, ".pyload")
+ else:
+ configdir = path.join(homedir, "pyload")
+
+if not path.exists(configdir):
+ makedirs(configdir, 0700)
+
+__builtin__.configdir = configdir
+chdir(configdir)
+
+#print "Using %s as working directory." % configdir
diff --git a/pyload/PluginManager.py b/pyload/PluginManager.py
new file mode 100644
index 000000000..182768689
--- /dev/null
+++ b/pyload/PluginManager.py
@@ -0,0 +1,438 @@
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2013 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN, mkaay
+###############################################################################
+
+import re
+import sys
+
+from os import listdir, makedirs
+from os.path import isfile, join, exists, abspath, basename
+from sys import version_info
+from time import time
+from collections import defaultdict
+
+from pyload.lib.SafeEval import const_eval as literal_eval
+from pyload.plugins.Base import Base
+
+from new_collections import namedtuple
+
+#TODO: ignores not updatable
+
+# ignore these plugin configs, mainly because plugins were wiped out
+IGNORE = (
+ "FreakshareNet", "SpeedManager", "ArchiveTo", "ShareCx", ('addons', 'UnRar'),
+ 'EasyShareCom', 'FlyshareCz'
+)
+
+PluginTuple = namedtuple("PluginTuple", "version re deps category user path")
+
+class BaseAttributes(defaultdict):
+ """ Dictionary that loads defaults values from Base object """
+ def __missing__(self, key):
+ attr = "__%s__" % key
+ if not hasattr(Base, attr):
+ return defaultdict.__missing__(self, key)
+
+ return getattr(Base, attr)
+
+
+class PluginManager:
+ ROOT = "pyload.plugins."
+ LOCALROOT = "localplugins."
+ TYPES = ("crypter", "hoster", "accounts", "addons", "network", "internal")
+
+ BUILTIN = re.compile(r'__(?P<attr>[a-z0-9_]+)__\s*=\s?(True|False|None|[0-9x.]+)', re.I)
+ SINGLE = re.compile(r'__(?P<attr>[a-z0-9_]+)__\s*=\s*(?:r|u|_)?((?:(?<!")"(?!")|\'|\().*(?:(?<!")"(?!")|\'|\)))',
+ re.I)
+ # note the nongreedy character: that means we can not embed list and dicts
+ MULTI = re.compile(r'__(?P<attr>[a-z0-9_]+)__\s*=\s*((?:\{|\[|"{3}).*?(?:"""|\}|\]))', re.DOTALL | re.M | re.I)
+
+ NO_MATCH = re.compile(r'^no_match$')
+
+ def __init__(self, core):
+ self.core = core
+
+ #self.config = self.core.config
+ self.log = core.log
+
+ self.plugins = {}
+ self.modules = {} # cached modules
+ self.history = [] # match history to speedup parsing (type, name)
+ self.user_context = {} # plugins working with user context
+ self.createIndex()
+
+ #register for import addon
+ sys.meta_path.append(self)
+
+ def logDebug(self, type, plugin, msg):
+ self.log.debug("Plugin %s | %s: %s" % (type, plugin, msg))
+
+ def createIndex(self):
+ """create information for all plugins available"""
+ # add to path, so we can import from userplugins
+ sys.path.append(abspath(""))
+
+ if not exists("userplugins"):
+ makedirs("userplugins")
+ if not exists(join("userplugins", "__init__.py")):
+ f = open(join("userplugins", "__init__.py"), "wb")
+ f.close()
+
+ a = time()
+ for type in self.TYPES:
+ self.plugins[type] = self.parse(type)
+
+ self.log.debug("Created index of plugins in %.2f ms", (time() - a) * 1000)
+
+ def parse(self, folder, home=None):
+ """ Analyze and parses all plugins in folder """
+ plugins = {}
+ if home:
+ pfolder = join("userplugins", folder)
+ if not exists(pfolder):
+ makedirs(pfolder)
+ if not exists(join(pfolder, "__init__.py")):
+ f = open(join(pfolder, "__init__.py"), "wb")
+ f.close()
+
+ else:
+ pfolder = join(pypath, "pyload", "plugins", folder)
+
+ for f in listdir(pfolder):
+ if (isfile(join(pfolder, f)) and f.endswith(".py") or f.endswith("_25.pyc") or f.endswith(
+ "_26.pyc") or f.endswith("_27.pyc")) and not f.startswith("_"):
+ if f.endswith("_25.pyc") and version_info[0:2] != (2, 5):
+ continue
+ elif f.endswith("_26.pyc") and version_info[0:2] != (2, 6):
+ continue
+ elif f.endswith("_27.pyc") and version_info[0:2] != (2, 7):
+ continue
+
+ # replace suffix and version tag
+ name = f[:-3]
+ if name[-1] == ".": name = name[:-4]
+
+ plugin = self.parsePlugin(join(pfolder, f), folder, name, home)
+ if plugin:
+ plugins[name] = plugin
+
+ if not home:
+ temp = self.parse(folder, plugins)
+ plugins.update(temp)
+
+ return plugins
+
+ def parseAttributes(self, filename, name, folder=""):
+ """ Parse attribute dict from plugin"""
+ data = open(filename, "rb")
+ content = data.read()
+ data.close()
+
+ attrs = BaseAttributes()
+ for m in self.BUILTIN.findall(content) + self.SINGLE.findall(content) + self.MULTI.findall(content):
+ #replace gettext function and eval result
+ try:
+ attrs[m[0]] = literal_eval(m[-1].replace("_(", "("))
+ except:
+ self.logDebug(folder, name, "Error when parsing: %s" % m[-1])
+ self.core.print_exc()
+
+ if not hasattr(Base, "__%s__" % m[0]):
+ if m[0] != "type": #TODO remove type from all plugins, its not needed
+ self.logDebug(folder, name, "Unknown attribute '%s'" % m[0])
+
+ return attrs
+
+ def parsePlugin(self, filename, folder, name, home=None):
+ """ Parses a plugin from disk, folder means plugin type in this context. Also sets config.
+
+ :arg home: dict with plugins, of which the found one will be matched against (according version)
+ :returns PluginTuple"""
+
+ attrs = self.parseAttributes(filename, name, folder)
+ if not attrs: return
+
+ version = 0
+
+ if "version" in attrs:
+ try:
+ version = float(attrs["version"])
+ except ValueError:
+ self.logDebug(folder, name, "Invalid version %s" % attrs["version"])
+ version = 9 #TODO remove when plugins are fixed, causing update loops
+ else:
+ self.logDebug(folder, name, "No version attribute")
+
+ # home contains plugins from pyload root
+ if home and name in home:
+ if home[name].version >= version:
+ return
+
+ if name in IGNORE or (folder, name) in IGNORE:
+ return
+
+ if "pattern" in attrs and attrs["pattern"]:
+ try:
+ plugin_re = re.compile(attrs["pattern"], re.I)
+ except:
+ self.logDebug(folder, name, "Invalid regexp pattern '%s'" % attrs["pattern"])
+ plugin_re = self.NO_MATCH
+ else:
+ plugin_re = self.NO_MATCH
+
+ deps = attrs["dependencies"]
+ category = attrs["category"] if folder == "addons" else ""
+
+ # create plugin tuple
+ plugin = PluginTuple(version, plugin_re, deps, category, bool(home), filename)
+
+ # internals have no config
+ if folder == "internal":
+ return plugin
+
+ if folder == "addons" and "config" not in attrs and not attrs["internal"]:
+ attrs["config"] = (["activated", "bool", "Activated", False],)
+
+ if "config" in attrs and attrs["config"] is not None:
+ config = attrs["config"]
+ desc = attrs["description"]
+ expl = attrs["explanation"]
+
+ # Convert tuples to list
+ config = [list(x) for x in config]
+
+ if folder == "addons" and not attrs["internal"]:
+ for item in config:
+ if item[0] == "activated": break
+ else: # activated flag missing
+ config.insert(0, ("activated", "bool", "Activated", False))
+
+ # Everything that is no addon and user_context=True, is added to dict
+ if folder != "addons" or attrs["user_context"]:
+ self.user_context[name] = True
+
+ try:
+ self.core.config.addConfigSection(name, name, desc, expl, config)
+ except:
+ self.logDebug(folder, name, "Invalid config %s" % config)
+
+ return plugin
+
+
+ def parseUrls(self, urls):
+ """parse plugins for given list of urls, separate to crypter and hoster"""
+
+ res = {"hoster": [], "crypter": []} # tupels of (url, plugin)
+
+ for url in urls:
+ if type(url) not in (str, unicode, buffer):
+ self.log.debug("Parsing invalid type %s" % type(url))
+ continue
+
+ found = False
+
+ for ptype, name in self.history:
+ if self.plugins[ptype][name].re.match(url):
+ res[ptype].append((url, name))
+ found = (ptype, name)
+ break # need to exit this loop first
+
+ if found: # found match
+ if self.history[0] != found: #update history
+ self.history.remove(found)
+ self.history.insert(0, found)
+ continue
+
+ for ptype in ("crypter", "hoster"):
+ for name, plugin in self.plugins[ptype].iteritems():
+ if plugin.re.match(url):
+ res[ptype].append((url, name))
+ self.history.insert(0, (ptype, name))
+ del self.history[10:] # cut down to size of 10
+ found = True
+ break
+
+ if not found:
+ res["hoster"].append((url, "BasePlugin"))
+
+ return res["hoster"], res["crypter"]
+
+ def getPlugins(self, type):
+ return self.plugins.get(type, None)
+
+ def findPlugin(self, name, pluginlist=("hoster", "crypter")):
+ for ptype in pluginlist:
+ if name in self.plugins[ptype]:
+ return ptype, self.plugins[ptype][name]
+ return None, None
+
+ def getPluginModule(self, name):
+ """ Decprecated: return plugin module from hoster|crypter"""
+ self.log.debug("Deprecated method: .getPluginModule()")
+ type, plugin = self.findPlugin(name)
+ return self.loadModule(type, name)
+
+ def getPluginClass(self, name):
+ """ return plugin class from hoster|crypter, always the not overwritten one """
+ type, plugin = self.findPlugin(name)
+ return self.loadClass(type, name)
+
+ # MultiHoster will overwrite this
+ getPlugin = getPluginClass
+
+ def loadAttributes(self, type, name):
+ plugin = self.plugins[type][name]
+ return self.parseAttributes(plugin.path, name, type)
+
+ def loadModule(self, type, name):
+ """ Returns loaded module for plugin
+
+ :param type: plugin type, subfolder of module.plugins
+ :param name:
+ """
+ plugins = self.plugins[type]
+ if name in plugins:
+ if (type, name) in self.modules: return self.modules[(type, name)]
+ try:
+ # convert path to python recognizable import
+ path = basename(plugins[name].path).replace(".pyc", "").replace(".py", "")
+ module = __import__(self.ROOT + "%s.%s" % (type, path), globals(), locals(), path)
+ self.modules[(type, name)] = module # cache import, maybe unneeded
+ return module
+ except Exception, e:
+ self.log.error(_("Error importing %(name)s: %(msg)s") % {"name": name, "msg": str(e)})
+ self.core.print_exc()
+
+ def loadClass(self, type, name):
+ """Returns the class of a plugin with the same name"""
+ module = self.loadModule(type, name)
+ if module: return getattr(module, name)
+
+ def find_module(self, fullname, path=None):
+ #redirecting imports if necesarry
+ if fullname.startswith(self.ROOT) or fullname.startswith(self.LOCALROOT): #separate pyload plugins
+ if fullname.startswith(self.LOCALROOT):
+ user = 1
+ else:
+ user = 0 #used as bool and int
+
+ split = fullname.split(".")
+ if len(split) != 4 - user: return
+ type, name = split[2 - user:4 - user]
+
+ if type in self.plugins and name in self.plugins[type]:
+ #userplugin is a newer version
+ if not user and self.plugins[type][name].user:
+ return self
+ #imported from userdir, but pyloads is newer
+ if user and not self.plugins[type][name].user:
+ return self
+
+ # TODO: Remove when all plugin imports are adapted
+ if "module" in fullname:
+ return self
+
+
+ def load_module(self, name, replace=True):
+ if name not in sys.modules: #could be already in modules
+
+ # TODO: only temporary
+ if name.endswith("module"):
+ # name = "pyload."
+ name = name.replace(".module", "")
+ self.log.debug("Old import reference detected, use %s" % name)
+ replace = False
+ return __import__("pyload")
+ if name.startswith("module"):
+ name = name.replace("module", "pyload")
+ self.log.debug("Old import reference detected, use %s" % name)
+ replace = False
+
+ if replace:
+ if self.ROOT in name:
+ newname = name.replace(self.ROOT, self.LOCALROOT)
+ else:
+ newname = name.replace(self.LOCALROOT, self.ROOT)
+ else:
+ newname = name
+
+ base, plugin = newname.rsplit(".", 1)
+
+ self.log.debug("Redirected import %s -> %s" % (name, newname))
+
+ module = __import__(newname, globals(), locals(), [plugin])
+ #inject under new an old name
+ sys.modules[name] = module
+ sys.modules[newname] = module
+
+ return sys.modules[name]
+
+ def reloadPlugins(self, type_plugins):
+ """ reloads and reindexes plugins """
+ if not type_plugins: return False
+
+ self.log.debug("Request reload of plugins: %s" % type_plugins)
+
+ as_dict = {}
+ for t, n in type_plugins:
+ if t in as_dict:
+ as_dict[t].append(n)
+ else:
+ as_dict[t] = [n]
+
+ # we do not reload addons or internals, would cause to much side effects
+ if "addons" in as_dict or "internal" in as_dict:
+ return False
+
+ for type in as_dict.iterkeys():
+ for plugin in as_dict[type]:
+ if plugin in self.plugins[type]:
+ if (type, plugin) in self.modules:
+ self.log.debug("Reloading %s" % plugin)
+ reload(self.modules[(type, plugin)])
+
+ # index re-creation
+ for type in ("crypter", "container", "hoster", "captcha", "accounts"):
+ self.plugins[type] = self.parse(type)
+
+ if "accounts" in as_dict: #accounts needs to be reloaded
+ self.core.accountManager.initPlugins()
+ self.core.scheduler.addJob(0, self.core.accountManager.getAccountInfos)
+
+ return True
+
+ def isUserPlugin(self, plugin):
+ """ A plugin suitable for multiple user """
+ return plugin in self.user_context
+
+ def isPluginType(self, plugin, type):
+ return plugin in self.plugins[type]
+
+ def getCategory(self, plugin):
+ if plugin in self.plugins["addons"]:
+ return self.plugins["addons"][plugin].category or "addon"
+
+ def loadIcon(self, name):
+ """ load icon for single plugin, base64 encoded"""
+ pass
+
+ def checkDependencies(self, type, name):
+ """ Check deps for given plugin
+
+ :return: List of unfullfilled dependencies
+ """
+ pass
+
diff --git a/pyload/Scheduler.py b/pyload/Scheduler.py
new file mode 100644
index 000000000..0bc396b69
--- /dev/null
+++ b/pyload/Scheduler.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from time import time
+from heapq import heappop, heappush
+from thread import start_new_thread
+from threading import Lock
+
+class AlreadyCalled(Exception):
+ pass
+
+
+class Deferred():
+ def __init__(self):
+ self.call = []
+ self.result = ()
+
+ def addCallback(self, f, *cargs, **ckwargs):
+ self.call.append((f, cargs, ckwargs))
+
+ def callback(self, *args, **kwargs):
+ if self.result:
+ raise AlreadyCalled
+ self.result = (args, kwargs)
+ for f, cargs, ckwargs in self.call:
+ args += tuple(cargs)
+ kwargs.update(ckwargs)
+ f(*args ** kwargs)
+
+
+class Scheduler():
+ def __init__(self, core):
+ self.core = core
+
+ self.queue = PriorityQueue()
+
+ def addJob(self, t, call, args=[], kwargs={}, threaded=True):
+ d = Deferred()
+ t += time()
+ j = Job(t, call, args, kwargs, d, threaded)
+ self.queue.put((t, j))
+ return d
+
+
+ def removeJob(self, d):
+ """
+ :param d: defered object
+ :return: if job was deleted
+ """
+ index = -1
+
+ for i, j in enumerate(self.queue):
+ if j[1].deferred == d:
+ index = i
+
+ if index >= 0:
+ del self.queue[index]
+ return True
+
+ return False
+
+ def work(self):
+ while True:
+ t, j = self.queue.get()
+ if not j:
+ break
+ else:
+ if t <= time():
+ j.start()
+ else:
+ self.queue.put((t, j))
+ break
+
+
+class Job():
+ def __init__(self, time, call, args=[], kwargs={}, deferred=None, threaded=True):
+ self.time = float(time)
+ self.call = call
+ self.args = args
+ self.kwargs = kwargs
+ self.deferred = deferred
+ self.threaded = threaded
+
+ def run(self):
+ ret = self.call(*self.args, **self.kwargs)
+ if self.deferred is None:
+ return
+ else:
+ self.deferred.callback(ret)
+
+ def start(self):
+ if self.threaded:
+ start_new_thread(self.run, ())
+ else:
+ self.run()
+
+
+class PriorityQueue():
+ """ a non blocking priority queue """
+
+ def __init__(self):
+ self.queue = []
+ self.lock = Lock()
+
+ def __iter__(self):
+ return iter(self.queue)
+
+ def __delitem__(self, key):
+ del self.queue[key]
+
+ def put(self, element):
+ self.lock.acquire()
+ heappush(self.queue, element)
+ self.lock.release()
+
+ def get(self):
+ """ return element or None """
+ self.lock.acquire()
+ try:
+ el = heappop(self.queue)
+ return el
+ except IndexError:
+ return None, None
+ finally:
+ self.lock.release() \ No newline at end of file
diff --git a/pyload/Setup.py b/pyload/Setup.py
new file mode 100644
index 000000000..d2ec3731f
--- /dev/null
+++ b/pyload/Setup.py
@@ -0,0 +1,418 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import pyload.utils.pylgettext as gettext
+import os
+import sys
+import socket
+import webbrowser
+
+from getpass import getpass
+from time import time
+from sys import exit
+
+from pyload.utils.fs import abspath, dirname, exists, join, makedirs
+from pyload.utils import get_console_encoding
+from pyload.web.ServerThread import WebServer
+
+
+class Setup():
+ """
+ pyLoads initial setup configuration assistant
+ """
+
+ def __init__(self, path, config):
+ self.path = path
+ self.config = config
+ self.stdin_encoding = get_console_encoding(sys.stdin.encoding)
+ self.lang = None
+ # We will create a timestamp so that the setup will be completed in a specific interval
+ self.timestamp = time()
+
+ # TODO: probably unneeded
+ self.yes = "yes"
+ self.no = "no"
+
+
+ def start(self):
+ web = WebServer(pysetup=self)
+ web.start()
+
+ error = web.check_error()
+ if error: #todo errno 44 port already in use
+ print error
+
+ url = "http://%s:%d/" % (socket.gethostbyname(socket.gethostname()), web.port)
+
+ print "Setup is started"
+
+ opened = webbrowser.open_new_tab(url)
+ if not opened:
+ print "Please point your browser to %s" % url
+
+
+ self.ask_lang()
+
+ print ""
+ print _("Would you like to configure pyLoad via Webinterface?")
+ print _("You need a Browser and a connection to this PC for it.")
+ print _("Url would be: http://hostname:8000/")
+ viaweb = self.ask(_("Start initial webinterface for configuration?"), self.yes, bool=True)
+ if viaweb:
+ self.start_web()
+ else:
+ self.start_cli()
+
+
+
+ def start_cli(self):
+
+
+ print _("Welcome to the pyLoad Configuration Assistent.")
+ print _("It will check your system and make a basic setup in order to run pyLoad.")
+ print ""
+ print _("The value in brackets [] always is the default value,")
+ print _("in case you don't want to change it or you are unsure what to choose, just hit enter.")
+ print _(
+ "Don't forget: You can always rerun this assistent with --setup or -s parameter, when you start pyLoadCore.")
+ print _("If you have any problems with this assistent hit CTRL+C,")
+ print _("to abort and don't let him start with pyLoadCore automatically anymore.")
+ print ""
+ print _("When you are ready for system check, hit enter.")
+ raw_input()
+
+ #self.get_page_next()
+
+
+ if len(avail) < 5:
+ print _("Features missing: ")
+ print
+
+ if not self.check_module("Crypto"):
+ print _("no py-crypto available")
+ print _("You need this if you want to decrypt container files.")
+ print ""
+
+ if not ssl:
+ print _("no SSL available")
+ print _("This is needed if you want to establish a secure connection to core or webinterface.")
+ print _("If you only want to access locally to pyLoad ssl is not useful.")
+ print ""
+
+ if not captcha:
+ print _("no Captcha Recognition available")
+ print _("Only needed for some hosters and as freeuser.")
+ print ""
+
+ if not js:
+ print _("no JavaScript engine found")
+ print _("You will need this for some Click'N'Load links. Install Spidermonkey, ossp-js, pyv8 or rhino")
+
+ print _("You can abort the setup now and fix some dependencies if you want.")
+
+ con = self.ask(_("Continue with setup?"), self.yes, bool=True)
+
+ if not con:
+ return False
+
+ print ""
+ print _("Do you want to change the config path? Current is %s") % abspath("")
+ print _(
+ "If you use pyLoad on a server or the home partition lives on an internal flash it may be a good idea to change it.")
+ path = self.ask(_("Change config path?"), self.no, bool=True)
+ if path:
+ self.conf_path()
+ #calls exit when changed
+
+ print ""
+ print _("Do you want to configure login data and basic settings?")
+ print _("This is recommend for first run.")
+ con = self.ask(_("Make basic setup?"), self.yes, bool=True)
+
+ if con:
+ self.conf_basic()
+
+ if ssl:
+ print ""
+ print _("Do you want to configure ssl?")
+ ssl = self.ask(_("Configure ssl?"), self.no, bool=True)
+ if ssl:
+ self.conf_ssl()
+
+ if web:
+ print ""
+ print _("Do you want to configure webinterface?")
+ web = self.ask(_("Configure webinterface?"), self.yes, bool=True)
+ if web:
+ self.conf_web()
+
+ print ""
+ print _("Setup finished successfully.")
+ print _("Hit enter to exit and restart pyLoad")
+ raw_input()
+ return True
+
+
+ def start_web(self):
+ print ""
+ print _("Webinterface running for setup.")
+ # TODO start browser?
+ try:
+ from pyload.web import ServerThread
+ ServerThread.setup = self
+ from pyload.web import webinterface
+ webinterface.run_simple()
+ self.web = True
+ return True
+ except Exception, e:
+ print "Webinterface failed with this error: ", e
+ print "Falling back to commandline setup."
+ self.start_cli()
+
+
+ def conf_basic(self):
+ print ""
+ print _("## Basic Setup ##")
+
+ print ""
+ print _("The following logindata is valid for CLI, GUI and webinterface.")
+
+ from pyload.database import DatabaseBackend
+
+ db = DatabaseBackend(None)
+ db.setup()
+ username = self.ask(_("Username"), "User")
+ password = self.ask("", "", password=True)
+ db.addUser(username, password)
+ db.shutdown()
+
+ print ""
+ print _("External clients (GUI, CLI or other) need remote access to work over the network.")
+ print _("However, if you only want to use the webinterface you may disable it to save ram.")
+ self.config["remote"]["activated"] = self.ask(_("Enable remote access"), self.yes, bool=True)
+
+ print ""
+ langs = self.config.getMetaData("general", "language")
+ self.config["general"]["language"] = self.ask(_("Language"), "en", langs.type.split(";"))
+
+ self.config["general"]["download_folder"] = self.ask(_("Download folder"), "Downloads")
+ self.config["download"]["max_downloads"] = self.ask(_("Max parallel downloads"), "3")
+ #print _("You should disable checksum proofing, if you have low hardware requirements.")
+ #self.config["general"]["checksum"] = self.ask(_("Proof checksum?"), "y", bool=True)
+
+ reconnect = self.ask(_("Use Reconnect?"), self.no, bool=True)
+ self.config["reconnect"]["activated"] = reconnect
+ if reconnect:
+ self.config["reconnect"]["method"] = self.ask(_("Reconnect script location"), "./reconnect.sh")
+
+
+ def conf_web(self):
+ print ""
+ print _("## Webinterface Setup ##")
+
+ print ""
+ self.config["webinterface"]["activated"] = self.ask(_("Activate webinterface?"), self.yes, bool=True)
+ print ""
+ print _("Listen address, if you use 127.0.0.1 or localhost, the webinterface will only accessible locally.")
+ self.config["webinterface"]["host"] = self.ask(_("Address"), "0.0.0.0")
+ self.config["webinterface"]["port"] = self.ask(_("Port"), "8000")
+ print ""
+ print _("pyLoad offers several server backends, now following a short explanation.")
+ print "threaded:", _("Default server, this server offers SSL and is a good alternative to builtin.")
+ print "fastcgi:", _(
+ "Can be used by apache, lighttpd, requires you to configure them, which is not too easy job.")
+ print "lightweight:", _("Very fast alternative written in C, requires libev and linux knowledge.")
+ print "\t", _("Get it from here: https://github.com/jonashaag/bjoern, compile it")
+ print "\t", _("and copy bjoern.so to pyload/lib")
+
+ print
+ print _(
+ "Attention: In some rare cases the builtin server is not working, if you notice problems with the webinterface")
+ print _("come back here and change the builtin server to the threaded one here.")
+
+ self.config["webinterface"]["server"] = self.ask(_("Server"), "threaded",
+ ["builtin", "threaded", "fastcgi", "lightweight"])
+
+ def conf_ssl(self):
+ print ""
+ print _("## SSL Setup ##")
+ print ""
+ print _("Execute these commands from pyLoad config folder to make ssl certificates:")
+ print ""
+ print "openssl genrsa -out ssl.key 1024"
+ print "openssl req -new -key ssl.key -out ssl.csr"
+ print "openssl req -days 36500 -x509 -key ssl.key -in ssl.csr > ssl.crt "
+ print ""
+ print _("If you're done and everything went fine, you can activate ssl now.")
+ self.config["ssl"]["activated"] = self.ask(_("Activate SSL?"), self.yes, bool=True)
+
+ def set_user(self):
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation("setup", join(self.path, "locale"),
+ languages=[self.config["general"]["language"], "en"], fallback=True)
+ translation.install(True)
+
+ from pyload.database import DatabaseBackend
+
+ db = DatabaseBackend(None)
+ db.setup()
+
+ noaction = True
+ try:
+ while True:
+ print _("Select action")
+ print _("1 - Create/Edit user")
+ print _("2 - List users")
+ print _("3 - Remove user")
+ print _("4 - Quit")
+ action = raw_input("[1]/2/3/4: ")
+ if not action in ("1", "2", "3", "4"):
+ continue
+ elif action == "1":
+ print ""
+ username = self.ask(_("Username"), "User")
+ password = self.ask("", "", password=True)
+ db.addUser(username, password)
+ noaction = False
+ elif action == "2":
+ print ""
+ print _("Users")
+ print "-----"
+ users = db.getAllUserData()
+ noaction = False
+ for user in users.itervalues():
+ print user.name
+ print "-----"
+ print ""
+ elif action == "3":
+ print ""
+ username = self.ask(_("Username"), "")
+ if username:
+ db.removeUserByName(username)
+ noaction = False
+ elif action == "4":
+ db.syncSave()
+ break
+ finally:
+ if not noaction:
+ db.shutdown()
+
+ def conf_path(self, trans=False):
+ if trans:
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation("setup", join(self.path, "locale"),
+ languages=[self.config["general"]["language"], "en"], fallback=True)
+ translation.install(True)
+
+ print _("Setting new configpath, current configuration will not be transferred!")
+ path = self.ask(_("Config path"), abspath(""))
+ try:
+ path = join(pypath, path)
+ if not exists(path):
+ makedirs(path)
+ f = open(join(pypath, "pyload", "config", "configdir"), "wb")
+ f.write(path)
+ f.close()
+ print _("Config path changed, setup will now close, please restart to go on.")
+ print _("Press Enter to exit.")
+ raw_input()
+ exit()
+ except Exception, e:
+ print _("Setting config path failed: %s") % str(e)
+
+
+ def ask_lang(self):
+ langs = self.config.getMetaData("general", "language").type.split(";")
+ self.lang = self.ask(u"Choose your Language / WÀhle deine Sprache", "en", langs)
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation("setup", join(self.path, "locale"), languages=[self.lang, "en"], fallback=True)
+ translation.install(True)
+
+ #l10n Input shorthand for yes
+ self.yes = _("y")
+ #l10n Input shorthand for no
+ self.no = _("n")
+
+ def ask(self, qst, default, answers=[], bool=False, password=False):
+ """ Generate dialog on command line """
+
+ if answers:
+ info = "("
+ for i, answer in enumerate(answers):
+ info += (", " if i != 0 else "") + str((answer == default and "[%s]" % answer) or answer)
+
+ info += ")"
+ elif bool:
+ if default == self.yes:
+ info = "([%s]/%s)" % (self.yes, self.no)
+ else:
+ info = "(%s/[%s])" % (self.yes, self.no)
+ else:
+ info = "[%s]" % default
+
+ if password:
+ p1 = True
+ p2 = False
+ while p1 != p2:
+ # getpass(_("Password: ")) will crash on systems with broken locales (Win, NAS)
+ sys.stdout.write(_("Password: "))
+ p1 = getpass("")
+
+ if len(p1) < 4:
+ print _("Password too short. Use at least 4 symbols.")
+ continue
+
+ sys.stdout.write(_("Password (again): "))
+ p2 = getpass("")
+
+ if p1 == p2:
+ return p1
+ else:
+ print _("Passwords did not match.")
+
+ while True:
+ input = raw_input(qst + " %s: " % info)
+ input = input.decode(self.stdin_encoding)
+
+ if input.strip() == "":
+ input = default
+
+ if bool:
+ #l10n yes, true,t are inputs for booleans with value true
+ if input.lower().strip() in [self.yes, _("yes"), _("true"), _("t"), "yes"]:
+ return True
+ #l10n no, false,f are inputs for booleans with value false
+ elif input.lower().strip() in [self.no, _("no"), _("false"), _("f"), "no"]:
+ return False
+ else:
+ print _("Invalid Input")
+ continue
+
+ if not answers:
+ return input
+
+ else:
+ if input in answers:
+ return input
+ else:
+ print _("Invalid Input")
+
+
+if __name__ == "__main__":
+ test = Setup(join(abspath(dirname(__file__)), ".."), None)
+ test.start()
diff --git a/pyload/__init__.py b/pyload/__init__.py
new file mode 100644
index 000000000..b72ac12e9
--- /dev/null
+++ b/pyload/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+
+__version_info__ = ('0', '4', '9', '9')
+__version__ = '.'.join(__version_info__) + "-dev" \ No newline at end of file
diff --git a/pyload/api/AccountApi.py b/pyload/api/AccountApi.py
new file mode 100644
index 000000000..999484974
--- /dev/null
+++ b/pyload/api/AccountApi.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission
+
+from ApiComponent import ApiComponent
+
+
+class AccountApi(ApiComponent):
+ """ All methods to control accounts """
+
+ @RequirePerm(Permission.Accounts)
+ def getAccounts(self, refresh):
+ """Get information about all entered accounts.
+
+ :param refresh: reload account info
+ :return: list of `AccountInfo`
+ """
+ accs = self.core.accountManager.getAllAccounts(refresh)
+ accounts = []
+ for plugin in accs.itervalues():
+ accounts.extend([acc.toInfoData() for acc in plugin.values()])
+
+ return accounts
+
+ @RequirePerm(Permission.All)
+ def getAccountTypes(self):
+ """All available account types.
+
+ :return: string list
+ """
+ return self.core.pluginManager.getPlugins("accounts").keys()
+
+ @RequirePerm(Permission.Accounts)
+ def updateAccount(self, plugin, login, password):
+ """Changes pw/options for specific account."""
+ # TODO: options
+ self.core.accountManager.updateAccount(plugin, login, password, {})
+
+ def updateAccountInfo(self, account):
+ """ Update account from :class:`AccountInfo` """
+ #TODO
+
+ @RequirePerm(Permission.Accounts)
+ def removeAccount(self, account):
+ """Remove account from pyload.
+
+ :param account: :class:`ÀccountInfo` instance
+ """
+ self.core.accountManager.removeAccount(account.plugin, account.loginname)
+
+
+if Api.extend(AccountApi):
+ del AccountApi \ No newline at end of file
diff --git a/pyload/api/AddonApi.py b/pyload/api/AddonApi.py
new file mode 100644
index 000000000..4ae686d2d
--- /dev/null
+++ b/pyload/api/AddonApi.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission
+
+from ApiComponent import ApiComponent
+
+class AddonApi(ApiComponent):
+ """ Methods to interact with addons """
+
+ def getAllInfo(self):
+ """Returns all information stored by addon plugins. Values are always strings
+
+ :return: {"plugin": {"name": value } }
+ """
+ return self.core.addonManager.getAllInfo()
+
+ def getInfoByPlugin(self, plugin):
+ """Returns information stored by a specific plugin.
+
+ :param plugin: pluginname
+ :return: dict of attr names mapped to value {"name": value}
+ """
+ return self.core.addonManager.getInfo(plugin)
+
+if Api.extend(AddonApi):
+ del AddonApi \ No newline at end of file
diff --git a/pyload/api/ApiComponent.py b/pyload/api/ApiComponent.py
new file mode 100644
index 000000000..bb333c259
--- /dev/null
+++ b/pyload/api/ApiComponent.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.remote.apitypes import Iface
+
+# Workaround to let code-completion think, this is subclass of Iface
+Iface = object
+class ApiComponent(Iface):
+
+ __slots__ = []
+
+ def __init__(self, core, user):
+ # Only for auto completion, this class can not be instantiated
+ from pyload import Core
+ from pyload.datatypes.User import User
+ assert isinstance(core, Core)
+ assert issubclass(ApiComponent, Iface)
+ self.core = core
+ assert isinstance(user, User)
+ self.user = user
+ self.primaryUID = 0
+ # No instantiating!
+ raise Exception() \ No newline at end of file
diff --git a/pyload/api/CollectorApi.py b/pyload/api/CollectorApi.py
new file mode 100644
index 000000000..49340285e
--- /dev/null
+++ b/pyload/api/CollectorApi.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission
+
+from ApiComponent import ApiComponent
+
+class CollectorApi(ApiComponent):
+ """ Link collector """
+
+ @RequirePerm(Permission.All)
+ def getCollector(self):
+ pass
+
+ @RequirePerm(Permission.Add)
+ def addToCollector(self, links):
+ pass
+
+ @RequirePerm(Permission.Add)
+ def addFromCollector(self, name, new_name):
+ pass
+
+ @RequirePerm(Permission.Delete)
+ def deleteCollPack(self, name):
+ pass
+
+ @RequirePerm(Permission.Add)
+ def renameCollPack(self, name, new_name):
+ pass
+
+ @RequirePerm(Permission.Delete)
+ def deleteCollLink(self, url):
+ pass
+
+
+if Api.extend(CollectorApi):
+ del CollectorApi \ No newline at end of file
diff --git a/pyload/api/ConfigApi.py b/pyload/api/ConfigApi.py
new file mode 100644
index 000000000..2adc0c565
--- /dev/null
+++ b/pyload/api/ConfigApi.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission, ConfigHolder, ConfigItem, ConfigInfo
+from pyload.utils import to_string
+
+from ApiComponent import ApiComponent
+
+# helper function to create a ConfigHolder
+def toConfigHolder(section, config, values):
+ holder = ConfigHolder(section, config.label, config.description, config.explanation)
+ holder.items = [ConfigItem(option, x.label, x.description, x.input,
+ to_string(values.get(option, x.input.default_value))) for option, x in
+ config.config.iteritems()]
+ return holder
+
+
+class ConfigApi(ApiComponent):
+ """ Everything related to configuration """
+
+ def getConfigValue(self, section, option):
+ """Retrieve config value.
+
+ :param section: name of category, or plugin
+ :param option: config option
+ :rtype: str
+ :return: config value as string
+ """
+ value = self.core.config.get(section, option, self.primaryUID)
+ return to_string(value)
+
+ def setConfigValue(self, section, option, value):
+ """Set new config value.
+
+ :param section:
+ :param option:
+ :param value: new config value
+ """
+ if option in ("limit_speed", "max_speed"): #not so nice to update the limit
+ self.core.requestFactory.updateBucket()
+
+ self.core.config.set(section, option, value, self.primaryUID)
+
+ def getConfig(self):
+ """Retrieves complete config of core.
+
+ :rtype: dict of section -> ConfigHolder
+ """
+ data = {}
+ for section, config, values in self.core.config.iterCoreSections():
+ data[section] = toConfigHolder(section, config, values)
+ return data
+
+ def getCoreConfig(self):
+ """ Retrieves core config sections
+
+ :rtype: list of PluginInfo
+ """
+ return [ConfigInfo(section, config.label, config.description, False, False)
+ for section, config, values in self.core.config.iterCoreSections()]
+
+ @RequirePerm(Permission.Plugins)
+ def getPluginConfig(self):
+ """All plugins and addons the current user has configured
+
+ :rtype: list of PluginInfo
+ """
+ # TODO: include addons that are activated by default
+ # TODO: multi user
+ # TODO: better plugin / addon activated config
+ data = []
+ active = [x.getName() for x in self.core.addonManager.activePlugins()]
+ for name, config, values in self.core.config.iterSections(self.primaryUID):
+ # skip unmodified and inactive addons
+ if not values and name not in active: continue
+
+ item = ConfigInfo(name, config.label, config.description,
+ self.core.pluginManager.getCategory(name),
+ self.core.pluginManager.isUserPlugin(name),
+ # TODO: won't work probably
+ values.get("activated", None if "activated" not in config.config else config.config[
+ "activated"].input.default_value))
+ data.append(item)
+
+ return data
+
+ @RequirePerm(Permission.Plugins)
+ def getAvailablePlugins(self):
+ """List of all available plugins, that are configurable
+
+ :rtype: list of PluginInfo
+ """
+ # TODO: filter user_context / addons when not allowed
+ plugins = [ConfigInfo(name, config.label, config.description,
+ self.core.pluginManager.getCategory(name),
+ self.core.pluginManager.isUserPlugin(name))
+ for name, config, values in self.core.config.iterSections(self.primaryUID)]
+
+ return plugins
+
+ @RequirePerm(Permission.Plugins)
+ def loadConfig(self, name):
+ """Get complete config options for desired section
+
+ :param name: Name of plugin or config section
+ :rtype: ConfigHolder
+ """
+ # requires at least plugin permissions, but only admin can load core config
+ config, values = self.core.config.getSection(name, self.primaryUID)
+ return toConfigHolder(name, config, values)
+
+
+ @RequirePerm(Permission.Plugins)
+ def saveConfig(self, config):
+ """Used to save a configuration, core config can only be saved by admins
+
+ :param config: :class:`ConfigHolder`
+ """
+ for item in config.items:
+ self.core.config.set(config.name, item.name, item.value, sync=False, user=self.primaryUID)
+ # save the changes
+ self.core.config.saveValues(self.primaryUID, config.name)
+
+ @RequirePerm(Permission.Plugins)
+ def deleteConfig(self, plugin):
+ """Deletes modified config
+
+ :param plugin: plugin name
+ """
+ #TODO: delete should deactivate addons?
+ self.core.config.delete(plugin, self.primaryUID)
+
+
+if Api.extend(ConfigApi):
+ del ConfigApi \ No newline at end of file
diff --git a/pyload/api/CoreApi.py b/pyload/api/CoreApi.py
new file mode 100644
index 000000000..ebb194134
--- /dev/null
+++ b/pyload/api/CoreApi.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission, ServerStatus, Interaction
+from pyload.utils.fs import join, free_space
+from pyload.utils import compare_time
+
+from ApiComponent import ApiComponent
+
+class CoreApi(ApiComponent):
+ """ This module provides methods for general interaction with the core, like status or progress retrieval """
+
+ @RequirePerm(Permission.All)
+ def getServerVersion(self):
+ """pyLoad Core version """
+ return self.core.version
+
+ @RequirePerm(Permission.All)
+ def getWSAddress(self):
+ """Gets and address for the websocket based on configuration"""
+ # TODO SSL (wss)
+ return "ws://%%s:%d" % self.core.config['remote']['port']
+
+ @RequirePerm(Permission.All)
+ def getServerStatus(self):
+ """Some general information about the current status of pyLoad.
+
+ :return: `ServerStatus`
+ """
+ queue = self.core.files.getQueueStats(self.primaryUID)
+ total = self.core.files.getDownloadStats(self.primaryUID)
+
+ serverStatus = ServerStatus(0,
+ total[0], queue[0],
+ total[1], queue[1],
+ self.isInteractionWaiting(Interaction.All),
+ not self.core.threadManager.pause and self.isTimeDownload(),
+ self.core.threadManager.pause,
+ self.core.config['reconnect']['activated'] and self.isTimeReconnect())
+
+
+ for pyfile in self.core.threadManager.getActiveDownloads(self.primaryUID):
+ serverStatus.speed += pyfile.getSpeed() #bytes/s
+
+ return serverStatus
+
+ @RequirePerm(Permission.All)
+ def getProgressInfo(self):
+ """ Status of all currently running tasks
+
+ :rtype: list of :class:`ProgressInfo`
+ """
+ return self.core.threadManager.getProgressList(self.primaryUID)
+
+ def pauseServer(self):
+ """Pause server: It won't start any new downloads, but nothing gets aborted."""
+ self.core.threadManager.pause = True
+
+ def unpauseServer(self):
+ """Unpause server: New Downloads will be started."""
+ self.core.threadManager.pause = False
+
+ def togglePause(self):
+ """Toggle pause state.
+
+ :return: new pause state
+ """
+ self.core.threadManager.pause ^= True
+ return self.core.threadManager.pause
+
+ def toggleReconnect(self):
+ """Toggle reconnect activation.
+
+ :return: new reconnect state
+ """
+ self.core.config["reconnect"]["activated"] ^= True
+ return self.core.config["reconnect"]["activated"]
+
+ def freeSpace(self):
+ """Available free space at download directory in bytes"""
+ return free_space(self.core.config["general"]["download_folder"])
+
+
+ def quit(self):
+ """Clean way to quit pyLoad"""
+ self.core.do_kill = True
+
+ def restart(self):
+ """Restart pyload core"""
+ self.core.do_restart = True
+
+ def getLog(self, offset=0):
+ """Returns most recent log entries.
+
+ :param offset: line offset
+ :return: List of log entries
+ """
+ filename = join(self.core.config['log']['log_folder'], 'log.txt')
+ try:
+ fh = open(filename, "r")
+ lines = fh.readlines()
+ fh.close()
+ if offset >= len(lines):
+ return []
+ return lines[offset:]
+ except:
+ return ['No log available']
+
+ @RequirePerm(Permission.All)
+ def isTimeDownload(self):
+ """Checks if pyload will start new downloads according to time in config.
+
+ :return: bool
+ """
+ start = self.core.config['downloadTime']['start'].split(":")
+ end = self.core.config['downloadTime']['end'].split(":")
+ return compare_time(start, end)
+
+ @RequirePerm(Permission.All)
+ def isTimeReconnect(self):
+ """Checks if pyload will try to make a reconnect
+
+ :return: bool
+ """
+ start = self.core.config['reconnect']['startTime'].split(":")
+ end = self.core.config['reconnect']['endTime'].split(":")
+ return compare_time(start, end) and self.core.config["reconnect"]["activated"]
+
+
+if Api.extend(CoreApi):
+ del CoreApi \ No newline at end of file
diff --git a/pyload/api/DownloadApi.py b/pyload/api/DownloadApi.py
new file mode 100644
index 000000000..0a01007b5
--- /dev/null
+++ b/pyload/api/DownloadApi.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from os.path import isabs
+
+from pyload.Api import Api, RequirePerm, Permission
+from pyload.utils.fs import join
+
+from ApiComponent import ApiComponent
+
+class DownloadApi(ApiComponent):
+ """ Component to create, add, delete or modify downloads."""
+
+ @RequirePerm(Permission.Add)
+ def generateAndAddPackages(self, links, paused=False):
+ """Generates and add packages
+
+ :param links: list of urls
+ :param paused: paused package
+ :return: list of package ids
+ """
+ return [self.addPackageP(name, urls, "", paused) for name, urls
+ in self.generatePackages(links).iteritems()]
+
+ @RequirePerm(Permission.Add)
+ def createPackage(self, name, folder, root, password="", site="", comment="", paused=False):
+ """Create a new package.
+
+ :param name: display name of the package
+ :param folder: folder name or relative path, abs path are not allowed
+ :param root: package id of root package, -1 for top level package
+ :param password: single pw or list of passwords separated with new line
+ :param site: arbitrary url to site for more information
+ :param comment: arbitrary comment
+ :param paused: No downloads will be started when True
+ :return: pid of newly created package
+ """
+
+ if isabs(folder):
+ folder = folder.replace("/", "_")
+
+ folder = folder.replace("http://", "").replace(":", "").replace("\\", "_").replace("..", "")
+
+ self.core.log.info(_("Added package %(name)s as folder %(folder)s") % {"name": name, "folder": folder})
+ pid = self.core.files.addPackage(name, folder, root, password, site, comment, paused)
+
+ return pid
+
+
+ @RequirePerm(Permission.Add)
+ def addPackage(self, name, links, password=""):
+ """Convenient method to add a package to the top-level and for adding links.
+
+ :return: package id
+ """
+ return self.addPackageChild(name, links, password, -1, False)
+
+ @RequirePerm(Permission.Add)
+ def addPackageP(self, name, links, password, paused):
+ """ Same as above with additional paused attribute. """
+ return self.addPackageChild(name, links, password, -1, paused)
+
+ @RequirePerm(Permission.Add)
+ def addPackageChild(self, name, links, password, root, paused):
+ """Adds a package, with links to desired package.
+
+ :param root: parents package id
+ :return: package id of the new package
+ """
+ if self.core.config['general']['folder_per_package']:
+ folder = name
+ else:
+ folder = ""
+
+ pid = self.createPackage(name, folder, root, password)
+ self.addLinks(pid, links)
+
+ return pid
+
+ @RequirePerm(Permission.Add)
+ def addLinks(self, pid, links):
+ """Adds links to specific package. Initiates online status fetching.
+
+ :param pid: package id
+ :param links: list of urls
+ """
+ hoster, crypter = self.core.pluginManager.parseUrls(links)
+
+ if hoster:
+ self.core.files.addLinks(hoster, pid)
+ self.core.threadManager.createInfoThread(hoster, pid)
+
+ self.core.threadManager.createDecryptThread(crypter, pid)
+
+ self.core.log.info((_("Added %d links to package") + " #%d" % pid) % len(hoster))
+ self.core.files.save()
+
+ @RequirePerm(Permission.Add)
+ def uploadContainer(self, filename, data):
+ """Uploads and adds a container file to pyLoad.
+
+ :param filename: filename, extension is important so it can correctly decrypted
+ :param data: file content
+ """
+ th = open(join(self.core.config["general"]["download_folder"], "tmp_" + filename), "wb")
+ th.write(str(data))
+ th.close()
+
+ return self.addPackage(th.name, [th.name])
+
+ @RequirePerm(Permission.Delete)
+ def deleteFiles(self, fids):
+ """Deletes several file entries from pyload.
+
+ :param fids: list of file ids
+ """
+ for fid in fids:
+ self.core.files.deleteFile(fid)
+
+ self.core.files.save()
+
+ @RequirePerm(Permission.Delete)
+ def deletePackages(self, pids):
+ """Deletes packages and containing links.
+
+ :param pids: list of package ids
+ """
+ for pid in pids:
+ self.core.files.deletePackage(pid)
+
+ self.core.files.save()
+
+
+ @RequirePerm(Permission.Modify)
+ def restartPackage(self, pid):
+ """Restarts a package, resets every containing files.
+
+ :param pid: package id
+ """
+ self.core.files.restartPackage(pid)
+
+ @RequirePerm(Permission.Modify)
+ def restartFile(self, fid):
+ """Resets file status, so it will be downloaded again.
+
+ :param fid: file id
+ """
+ self.core.files.restartFile(fid)
+
+ @RequirePerm(Permission.Modify)
+ def recheckPackage(self, pid):
+ """Check online status of all files in a package, also a default action when package is added. """
+ self.core.files.reCheckPackage(pid)
+
+ @RequirePerm(Permission.Modify)
+ def restartFailed(self):
+ """Restarts all failed failes."""
+ self.core.files.restartFailed()
+
+ @RequirePerm(Permission.Modify)
+ def stopAllDownloads(self):
+ """Aborts all running downloads."""
+
+ pyfiles = self.core.files.cachedFiles()
+ for pyfile in pyfiles:
+ pyfile.abortDownload()
+
+ @RequirePerm(Permission.Modify)
+ def stopDownloads(self, fids):
+ """Aborts specific downloads.
+
+ :param fids: list of file ids
+ :return:
+ """
+ pyfiles = self.core.files.cachedFiles()
+ for pyfile in pyfiles:
+ if pyfile.id in fids:
+ pyfile.abortDownload()
+
+
+if Api.extend(DownloadApi):
+ del DownloadApi \ No newline at end of file
diff --git a/pyload/api/DownloadPreparingApi.py b/pyload/api/DownloadPreparingApi.py
new file mode 100644
index 000000000..0a47fe5ab
--- /dev/null
+++ b/pyload/api/DownloadPreparingApi.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from itertools import chain
+
+from pyload.Api import Api, RequirePerm, Permission, OnlineCheck, LinkStatus, urlmatcher
+from pyload.utils.fs import join
+from pyload.utils.packagetools import parseNames
+from pyload.network.RequestFactory import getURL
+
+from ApiComponent import ApiComponent
+
+class DownloadPreparingApi(ApiComponent):
+ """ All kind of methods to parse links or retrieve online status """
+
+ @RequirePerm(Permission.Add)
+ def parseURLs(self, html=None, url=None):
+ """Parses html content or any arbitrary text for links and returns result of `checkURLs`
+
+ :param html: html source
+ :return:
+ """
+ urls = []
+
+ if html:
+ urls += [x[0] for x in urlmatcher.findall(html)]
+
+ if url:
+ page = getURL(url)
+ urls += [x[0] for x in urlmatcher.findall(page)]
+
+ # remove duplicates
+ return self.checkURLs(set(urls))
+
+
+ @RequirePerm(Permission.Add)
+ def checkURLs(self, urls):
+ """ Gets urls and returns pluginname mapped to list of matching urls.
+
+ :param urls:
+ :return: {plugin: urls}
+ """
+ data, crypter = self.core.pluginManager.parseUrls(urls)
+ plugins = {}
+
+ for url, plugin in chain(data, crypter):
+ if plugin in plugins:
+ plugins[plugin].append(url)
+ else:
+ plugins[plugin] = [url]
+
+ return plugins
+
+ @RequirePerm(Permission.Add)
+ def checkOnlineStatus(self, urls):
+ """ initiates online status check, will also decrypt files.
+
+ :param urls:
+ :return: initial set of data as :class:`OnlineCheck` instance containing the result id
+ """
+ data, crypter = self.core.pluginManager.parseUrls(urls)
+
+ # initial result does not contain the crypter links
+ tmp = [(url, (url, LinkStatus(url, pluginname, "unknown", 3, 0))) for url, pluginname in data]
+ data = parseNames(tmp)
+ result = {}
+
+ for k, v in data.iteritems():
+ for url, status in v:
+ status.packagename = k
+ result[url] = status
+
+ data.update(crypter) # hoster and crypter will be processed
+ rid = self.core.threadManager.createResultThread(data, False)
+
+ return OnlineCheck(rid, result)
+
+ @RequirePerm(Permission.Add)
+ def checkOnlineStatusContainer(self, urls, container, data):
+ """ checks online status of urls and a submitted container file
+
+ :param urls: list of urls
+ :param container: container file name
+ :param data: file content
+ :return: :class:`OnlineCheck`
+ """
+ th = open(join(self.core.config["general"]["download_folder"], "tmp_" + container), "wb")
+ th.write(str(data))
+ th.close()
+ urls.append(th.name)
+ return self.checkOnlineStatus(urls)
+
+ @RequirePerm(Permission.Add)
+ def pollResults(self, rid):
+ """ Polls the result available for ResultID
+
+ :param rid: `ResultID`
+ :return: `OnlineCheck`, if rid is -1 then there is no more data available
+ """
+ result = self.core.threadManager.getInfoResult(rid)
+
+ if "ALL_INFO_FETCHED" in result:
+ del result["ALL_INFO_FETCHED"]
+ return OnlineCheck(-1, result)
+ else:
+ return OnlineCheck(rid, result)
+
+
+ @RequirePerm(Permission.Add)
+ def generatePackages(self, links):
+ """ Parses links, generates packages names from urls
+
+ :param links: list of urls
+ :return: package names mapped to urls
+ """
+ result = parseNames((x, x) for x in links)
+ return result
+
+
+if Api.extend(DownloadPreparingApi):
+ del DownloadPreparingApi \ No newline at end of file
diff --git a/pyload/api/FileApi.py b/pyload/api/FileApi.py
new file mode 100644
index 000000000..2ca409165
--- /dev/null
+++ b/pyload/api/FileApi.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission, DownloadState, PackageDoesNotExists, FileDoesNotExists
+from pyload.utils import uniqify
+
+from ApiComponent import ApiComponent
+
+# TODO: user context
+class FileApi(ApiComponent):
+ """Everything related to available packages or files. Deleting, Modifying and so on."""
+
+ @RequirePerm(Permission.All)
+ def getAllFiles(self):
+ """ same as `getFileTree` for toplevel root and full tree"""
+ return self.getFileTree(-1, True)
+
+ @RequirePerm(Permission.All)
+ def getFilteredFiles(self, state):
+ """ same as `getFilteredFileTree` for toplevel root and full tree"""
+ return self.getFilteredFileTree(-1, state, True)
+
+ @RequirePerm(Permission.All)
+ def getFileTree(self, pid, full):
+ """ Retrieve data for specific package. full=True will retrieve all data available
+ and can result in greater delays.
+
+ :param pid: package id
+ :param full: go down the complete tree or only the first layer
+ :return: :class:`TreeCollection`
+ """
+ return self.core.files.getTree(pid, full, DownloadState.All)
+
+ @RequirePerm(Permission.All)
+ def getFilteredFileTree(self, pid, full, state):
+ """ Same as `getFileTree` but only contains files with specific download state.
+
+ :param pid: package id
+ :param full: go down the complete tree or only the first layer
+ :param state: :class:`DownloadState`, the attributes used for filtering
+ :return: :class:`TreeCollection`
+ """
+ return self.core.files.getTree(pid, full, state)
+
+ @RequirePerm(Permission.All)
+ def getPackageContent(self, pid):
+ """ Only retrieve content of a specific package. see `getFileTree`"""
+ return self.getFileTree(pid, False)
+
+ @RequirePerm(Permission.All)
+ def getPackageInfo(self, pid):
+ """Returns information about package, without detailed information about containing files
+
+ :param pid: package id
+ :raises PackageDoesNotExists:
+ :return: :class:`PackageInfo`
+ """
+ info = self.core.files.getPackageInfo(pid)
+ if not info:
+ raise PackageDoesNotExists(pid)
+ return info
+
+ @RequirePerm(Permission.All)
+ def getFileInfo(self, fid):
+ """ Info for specific file
+
+ :param fid: file id
+ :raises FileDoesNotExists:
+ :return: :class:`FileInfo`
+
+ """
+ info = self.core.files.getFileInfo(fid)
+ if not info:
+ raise FileDoesNotExists(fid)
+ return info
+
+ @RequirePerm(Permission.Download)
+ def getFilePath(self, fid):
+ """ Internal method to get the filepath"""
+ info = self.getFileInfo(fid)
+ pack = self.core.files.getPackage(info.package)
+ return pack.getPath(), info.name
+
+
+ @RequirePerm(Permission.All)
+ def findFiles(self, pattern):
+ return self.core.files.getTree(-1, True, DownloadState.All, pattern)
+
+ @RequirePerm(Permission.All)
+ def searchSuggestions(self, pattern):
+ names = self.core.db.getMatchingFilenames(pattern, self.primaryUID)
+ # TODO: stemming and reducing the names to provide better suggestions
+ return uniqify(names)
+
+ @RequirePerm(Permission.All)
+ def findPackages(self, tags):
+ pass
+
+ @RequirePerm(Permission.Modify)
+ def updatePackage(self, pack):
+ """Allows to modify several package attributes.
+
+ :param pid: package id
+ :param data: :class:`PackageInfo`
+ """
+ pid = pack.pid
+ p = self.core.files.getPackage(pid)
+ if not p: raise PackageDoesNotExists(pid)
+
+ #TODO: fix
+ for key, value in data.iteritems():
+ if key == "id": continue
+ setattr(p, key, value)
+
+ p.sync()
+ self.core.files.save()
+
+ @RequirePerm(Permission.Modify)
+ def setPackageFolder(self, pid, path):
+ pass
+
+ @RequirePerm(Permission.Modify)
+ def movePackage(self, pid, root):
+ """ Set a new root for specific package. This will also moves the files on disk\
+ and will only work when no file is currently downloading.
+
+ :param pid: package id
+ :param root: package id of new root
+ :raises PackageDoesNotExists: When pid or root is missing
+ :return: False if package can't be moved
+ """
+ return self.core.files.movePackage(pid, root)
+
+ @RequirePerm(Permission.Modify)
+ def moveFiles(self, fids, pid):
+ """Move multiple files to another package. This will move the files on disk and\
+ only work when files are not downloading. All files needs to be continuous ordered
+ in the current package.
+
+ :param fids: list of file ids
+ :param pid: destination package
+ :return: False if files can't be moved
+ """
+ return self.core.files.moveFiles(fids, pid)
+
+ @RequirePerm(Permission.Modify)
+ def orderPackage(self, pid, position):
+ """Set new position for a package.
+
+ :param pid: package id
+ :param position: new position, 0 for very beginning
+ """
+ self.core.files.orderPackage(pid, position)
+
+ @RequirePerm(Permission.Modify)
+ def orderFiles(self, fids, pid, position):
+ """ Set a new position for a bunch of files within a package.
+ All files have to be in the same package and must be **continuous**\
+ in the package. That means no gaps between them.
+
+ :param fids: list of file ids
+ :param pid: package id of parent package
+ :param position: new position: 0 for very beginning
+ """
+ self.core.files.orderFiles(fids, pid, position)
+
+
+if Api.extend(FileApi):
+ del FileApi \ No newline at end of file
diff --git a/pyload/api/UserInteractionApi.py b/pyload/api/UserInteractionApi.py
new file mode 100644
index 000000000..f5a9e9290
--- /dev/null
+++ b/pyload/api/UserInteractionApi.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.Api import Api, RequirePerm, Permission, Interaction
+
+from ApiComponent import ApiComponent
+
+class UserInteractionApi(ApiComponent):
+ """ Everything needed for user interaction """
+
+ @RequirePerm(Permission.Interaction)
+ def isInteractionWaiting(self, mode):
+ """ Check if task is waiting.
+
+ :param mode: binary or'ed output type
+ :return: boolean
+ """
+ return self.core.interactionManager.isTaskWaiting(self.primaryUID, mode)
+
+ @RequirePerm(Permission.Interaction)
+ def getInteractionTasks(self, mode):
+ """Retrieve task for specific mode.
+
+ :param mode: binary or'ed interaction types which should be retrieved
+ :rtype list of :class:`InteractionTask`
+ """
+ tasks = self.core.interactionManager.getTasks(self.primaryUID, mode)
+ # retrieved tasks count as seen
+ for t in tasks:
+ t.seen = True
+ if t.type == Interaction.Notification:
+ t.setWaiting(self.core.interactionManager.CLIENT_THRESHOLD)
+
+ return tasks
+
+ @RequirePerm(Permission.Interaction)
+ def setInteractionResult(self, iid, result):
+ """Set Result for a interaction task. It will be immediately removed from task queue afterwards
+
+ :param iid: interaction id
+ :param result: result as json string
+ """
+ task = self.core.interactionManager.getTaskByID(iid)
+ if task and self.primaryUID == task.owner:
+ task.setResult(result)
+
+ @RequirePerm(Permission.Interaction)
+ def getAddonHandler(self):
+ pass
+
+ @RequirePerm(Permission.Interaction)
+ def callAddonHandler(self, plugin, func, pid_or_fid):
+ pass
+
+ @RequirePerm(Permission.Download)
+ def generateDownloadLink(self, fid, timeout):
+ pass
+
+
+if Api.extend(UserInteractionApi):
+ del UserInteractionApi \ No newline at end of file
diff --git a/pyload/api/__init__.py b/pyload/api/__init__.py
new file mode 100644
index 000000000..1348fd26f
--- /dev/null
+++ b/pyload/api/__init__.py
@@ -0,0 +1,8 @@
+__all__ = ["CoreApi", "ConfigApi", "DownloadApi", "DownloadPreparingApi", "FileApi",
+ "CollectorApi", "UserInteractionApi", "AccountApi", "AddonApi"]
+
+# Import all components
+# from .import *
+# Above does not work in py 2.5
+for name in __all__:
+ __import__(__name__ + "." + name) \ No newline at end of file
diff --git a/pyload/cli/AddPackage.py b/pyload/cli/AddPackage.py
new file mode 100644
index 000000000..a73401586
--- /dev/null
+++ b/pyload/cli/AddPackage.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+#Copyright (C) 2011 RaNaN
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3 of the License,
+#or (at your option) any later version.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#See the GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+###
+
+from Handler import Handler
+from printer import *
+
+class AddPackage(Handler):
+ """ let the user add packages """
+
+ def init(self):
+ self.name = ""
+ self.urls = []
+
+ def onEnter(self, inp):
+ if inp == "0":
+ self.cli.reset()
+
+ if not self.name:
+ self.name = inp
+ self.setInput()
+ elif inp == "END":
+ #add package
+ self.client.addPackage(self.name, self.urls, 1)
+ self.cli.reset()
+ else:
+ if inp.strip():
+ self.urls.append(inp)
+ self.setInput()
+
+ def renderBody(self, line):
+ println(line, white(_("Add Package:")))
+ println(line + 1, "")
+ line += 2
+
+ if not self.name:
+ println(line, _("Enter a name for the new package"))
+ println(line + 1, "")
+ line += 2
+ else:
+ println(line, _("Package: %s") % self.name)
+ println(line + 1, _("Parse the links you want to add."))
+ println(line + 2, _("Type %s when done.") % mag("END"))
+ println(line + 3, _("Links added: ") + mag(len(self.urls)))
+ line += 4
+
+ println(line, "")
+ println(line + 1, mag("0.") + _(" back to main menu"))
+
+ return line + 2 \ No newline at end of file
diff --git a/pyload/cli/Cli.py b/pyload/cli/Cli.py
new file mode 100644
index 000000000..9285ad3a2
--- /dev/null
+++ b/pyload/cli/Cli.py
@@ -0,0 +1,586 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+#Copyright (C) 2012 RaNaN
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3 of the License,
+#or (at your option) any later version.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#See the GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+###
+from __future__ import with_statement
+from getopt import GetoptError, getopt
+
+import pyload.common.pylgettext as gettext
+import os
+from os import _exit
+from os.path import join, exists, abspath, basename
+import sys
+from sys import exit
+from threading import Thread, Lock
+from time import sleep
+from traceback import print_exc
+
+import ConfigParser
+
+from codecs import getwriter
+
+if os.name == "nt":
+ enc = "cp850"
+else:
+ enc = "utf8"
+
+sys.stdout = getwriter(enc)(sys.stdout, errors="replace")
+
+from pyload import InitHomeDir
+from pyload.cli.printer import *
+from pyload.cli import AddPackage, ManageFiles
+
+from pyload.Api import Destination
+from pyload.utils import formatSize, decode
+from pyload.remote.thriftbackend.ThriftClient import ThriftClient, NoConnection, NoSSL, WrongLogin, ConnectionClosed
+from pyload.lib.Getch import Getch
+from pyload.lib.rename_process import renameProcess
+
+class Cli:
+ def __init__(self, client, command):
+ self.client = client
+ self.command = command
+
+ if not self.command:
+ renameProcess('pyLoadCli')
+ self.getch = Getch()
+ self.input = ""
+ self.inputline = 0
+ self.lastLowestLine = 0
+ self.menuline = 0
+
+ self.lock = Lock()
+
+ #processor funcions, these will be changed dynamically depending on control flow
+ self.headerHandler = self #the download status
+ self.bodyHandler = self #the menu section
+ self.inputHandler = self
+
+ os.system("clear")
+ println(1, blue("py") + yellow("Load") + white(_(" Command Line Interface")))
+ println(2, "")
+
+ self.thread = RefreshThread(self)
+ self.thread.start()
+
+ self.start()
+ else:
+ self.processCommand()
+
+ def reset(self):
+ """ reset to initial main menu """
+ self.input = ""
+ self.headerHandler = self.bodyHandler = self.inputHandler = self
+
+ def start(self):
+ """ main loop. handle input """
+ while True:
+ #inp = raw_input()
+ inp = self.getch.impl()
+ if ord(inp) == 3:
+ os.system("clear")
+ sys.exit() # ctrl + c
+ elif ord(inp) == 13: #enter
+ try:
+ self.lock.acquire()
+ self.inputHandler.onEnter(self.input)
+
+ except Exception, e:
+ println(2, red(e))
+ finally:
+ self.lock.release()
+
+ elif ord(inp) == 127:
+ self.input = self.input[:-1] #backspace
+ try:
+ self.lock.acquire()
+ self.inputHandler.onBackSpace()
+ finally:
+ self.lock.release()
+
+ elif ord(inp) == 27: #ugly symbol
+ pass
+ else:
+ self.input += inp
+ try:
+ self.lock.acquire()
+ self.inputHandler.onChar(inp)
+ finally:
+ self.lock.release()
+
+ self.inputline = self.bodyHandler.renderBody(self.menuline)
+ self.renderFooter(self.inputline)
+
+
+ def refresh(self):
+ """refresh screen"""
+
+ println(1, blue("py") + yellow("Load") + white(_(" Command Line Interface")))
+ println(2, "")
+
+ self.lock.acquire()
+
+ self.menuline = self.headerHandler.renderHeader(3) + 1
+ println(self.menuline - 1, "")
+ self.inputline = self.bodyHandler.renderBody(self.menuline)
+ self.renderFooter(self.inputline)
+
+ self.lock.release()
+
+
+ def setInput(self, string=""):
+ self.input = string
+
+ def setHandler(self, klass):
+ #create new handler with reference to cli
+ self.bodyHandler = self.inputHandler = klass(self)
+ self.input = ""
+
+ def renderHeader(self, line):
+ """ prints download status """
+ #print updated information
+ # print "\033[J" #clear screen
+ # self.println(1, blue("py") + yellow("Load") + white(_(" Command Line Interface")))
+ # self.println(2, "")
+ # self.println(3, white(_("%s Downloads:") % (len(data))))
+
+ data = self.client.statusDownloads()
+ speed = 0
+
+ println(line, white(_("%s Downloads:") % (len(data))))
+ line += 1
+
+ for download in data:
+ if download.status == 12: # downloading
+ percent = download.percent
+ z = percent / 4
+ speed += download.speed
+ println(line, cyan(download.name))
+ line += 1
+ println(line,
+ blue("[") + yellow(z * "#" + (25 - z) * " ") + blue("] ") + green(str(percent) + "%") + _(
+ " Speed: ") + green(formatSize(download.speed) + "/s") + _(" Size: ") + green(
+ download.format_size) + _(" Finished in: ") + green(download.format_eta) + _(
+ " ID: ") + green(download.fid))
+ line += 1
+ if download.status == 5:
+ println(line, cyan(download.name))
+ line += 1
+ println(line, _("waiting: ") + green(download.format_wait))
+ line += 1
+
+ println(line, "")
+ line += 1
+ status = self.client.statusServer()
+ if status.pause:
+ paused = _("Status:") + " " + red(_("paused"))
+ else:
+ paused = _("Status:") + " " + red(_("running"))
+
+ println(line,"%s %s: %s %s: %s %s: %s" % (
+ paused, _("total Speed"), red(formatSize(speed) + "/s"), _("Files in queue"), red(
+ status.queue), _("Total"), red(status.total)))
+
+ return line + 1
+
+ def renderBody(self, line):
+ """ prints initial menu """
+ println(line, white(_("Menu:")))
+ println(line + 1, "")
+ println(line + 2, mag("1.") + _(" Add Links"))
+ println(line + 3, mag("2.") + _(" Manage Queue"))
+ println(line + 4, mag("3.") + _(" Manage Collector"))
+ println(line + 5, mag("4.") + _(" (Un)Pause Server"))
+ println(line + 6, mag("5.") + _(" Kill Server"))
+ println(line + 7, mag("6.") + _(" Quit"))
+
+ return line + 8
+
+ def renderFooter(self, line):
+ """ prints out the input line with input """
+ println(line, "")
+ line += 1
+
+ println(line, white(" Input: ") + decode(self.input))
+
+ #clear old output
+ if line < self.lastLowestLine:
+ for i in range(line + 1, self.lastLowestLine + 1):
+ println(i, "")
+
+ self.lastLowestLine = line
+
+ #set cursor to position
+ print "\033[" + str(self.inputline) + ";0H"
+
+ def onChar(self, char):
+ """ default no special handling for single chars """
+ if char == "1":
+ self.setHandler(AddPackage)
+ elif char == "2":
+ self.setHandler(ManageFiles)
+ elif char == "3":
+ self.setHandler(ManageFiles)
+ self.bodyHandler.target = Destination.Collector
+ elif char == "4":
+ self.client.togglePause()
+ self.setInput()
+ elif char == "5":
+ self.client.kill()
+ self.client.close()
+ sys.exit()
+ elif char == "6":
+ os.system('clear')
+ sys.exit()
+
+ def onEnter(self, inp):
+ pass
+
+ def onBackSpace(self):
+ pass
+
+ def processCommand(self):
+ command = self.command[0]
+ args = []
+ if len(self.command) > 1:
+ args = self.command[1:]
+
+ if command == "status":
+ files = self.client.statusDownloads()
+
+ if not files:
+ print "No downloads running."
+
+ for download in files:
+ if download.status == 12: # downloading
+ print print_status(download)
+ print "\tDownloading: %s @ %s/s\t %s (%s%%)" % (
+ download.format_eta, formatSize(download.speed), formatSize(download.size - download.bleft),
+ download.percent)
+ elif download.status == 5:
+ print print_status(download)
+ print "\tWaiting: %s" % download.format_wait
+ else:
+ print print_status(download)
+
+ elif command == "queue":
+ print_packages(self.client.getQueueData())
+
+ elif command == "collector":
+ print_packages(self.client.getCollectorData())
+
+ elif command == "add":
+ if len(args) < 2:
+ print _("Please use this syntax: add <Package name> <link> <link2> ...")
+ return
+
+ self.client.addPackage(args[0], args[1:], Destination.Queue, "")
+
+ elif command == "add_coll":
+ if len(args) < 2:
+ print _("Please use this syntax: add <Package name> <link> <link2> ...")
+ return
+
+ self.client.addPackage(args[0], args[1:], Destination.Collector, "")
+
+ elif command == "del_file":
+ self.client.deleteFiles([int(x) for x in args])
+ print "Files deleted."
+
+ elif command == "del_package":
+ self.client.deletePackages([int(x) for x in args])
+ print "Packages deleted."
+
+ elif command == "move":
+ for pid in args:
+ pack = self.client.getPackageInfo(int(pid))
+ self.client.movePackage((pack.dest + 1) % 2, pack.pid)
+
+ elif command == "check":
+ print _("Checking %d links:") % len(args)
+ print
+ rid = self.client.checkOnlineStatus(args).rid
+ self.printOnlineCheck(self.client, rid)
+
+
+ elif command == "check_container":
+ path = args[0]
+ if not exists(join(owd, path)):
+ print _("File does not exists.")
+ return
+
+ f = open(join(owd, path), "rb")
+ content = f.read()
+ f.close()
+
+ rid = self.client.checkOnlineStatusContainer([], basename(f.name), content).rid
+ self.printOnlineCheck(self.client, rid)
+
+
+ elif command == "pause":
+ self.client.pauseServer()
+
+ elif command == "unpause":
+ self.client.unpauseServer()
+
+ elif command == "toggle":
+ self.client.togglePause()
+
+ elif command == "kill":
+ self.client.kill()
+ elif command == "restart_file":
+ for x in args:
+ self.client.restartFile(int(x))
+ print "Files restarted."
+ elif command == "restart_package":
+ for pid in args:
+ self.client.restartPackage(int(pid))
+ print "Packages restarted."
+
+ else:
+ print_commands()
+
+ def printOnlineCheck(self, client, rid):
+ while True:
+ sleep(1)
+ result = client.pollResults(rid)
+ for url, status in result.data.iteritems():
+ if status.status == 2: check = "Online"
+ elif status.status == 1: check = "Offline"
+ else: check = "Unknown"
+
+ print "%-45s %-12s\t %-15s\t %s" % (status.name, formatSize(status.size), status.plugin, check)
+
+ if result.rid == -1: break
+
+
+class RefreshThread(Thread):
+ def __init__(self, cli):
+ Thread.__init__(self)
+ self.setDaemon(True)
+ self.cli = cli
+
+ def run(self):
+ while True:
+ sleep(1)
+ try:
+ self.cli.refresh()
+ except ConnectionClosed:
+ os.system("clear")
+ print _("pyLoad was terminated")
+ _exit(0)
+ except Exception, e:
+ println(2, red(str(e)))
+ self.cli.reset()
+ print_exc()
+
+
+def print_help(config):
+ print
+ print "pyLoadCli Copyright (c) 2008-2012 the pyLoad Team"
+ print
+ print "Usage: [python] pyLoadCli.py [options] [command]"
+ print
+ print "<Commands>"
+ print "See pyLoadCli.py -c for a complete listing."
+ print
+ print "<Options>"
+ print " -i, --interactive", " Start in interactive mode"
+ print
+ print " -u, --username=", " " * 2, "Specify user name"
+ print " --pw=<password>", " " * 2, "Password"
+ print " -a, --address=", " " * 3, "Use address (current=%s)" % config["addr"]
+ print " -p, --port", " " * 7, "Use port (current=%s)" % config["port"]
+ print
+ print " -l, --language", " " * 3, "Set user interface language (current=%s)" % config["language"]
+ print " -h, --help", " " * 7, "Display this help text"
+ print " -c, --commands", " " * 3, "List all available commands"
+ print
+
+
+def print_packages(data):
+ for pack in data:
+ print "Package %s (#%s):" % (pack.name, pack.pid)
+ for download in pack.links:
+ print "\t" + print_file(download)
+ print
+
+
+def print_file(download):
+ return "#%(id)-6d %(name)-30s %(statusmsg)-10s %(plugin)-8s" % {
+ "id": download.fid,
+ "name": download.name,
+ "statusmsg": download.statusmsg,
+ "plugin": download.plugin
+ }
+
+
+def print_status(download):
+ return "#%(id)-6s %(name)-40s Status: %(statusmsg)-10s Size: %(size)s" % {
+ "id": download.fid,
+ "name": download.name,
+ "statusmsg": download.statusmsg,
+ "size": download.format_size
+ }
+
+
+def print_commands():
+ commands = [("status", _("Prints server status")),
+ ("queue", _("Prints downloads in queue")),
+ ("collector", _("Prints downloads in collector")),
+ ("add <name> <link1> <link2>...", _("Adds package to queue")),
+ ("add_coll <name> <link1> <link2>...", _("Adds package to collector")),
+ ("del_file <fid> <fid2>...", _("Delete Files from Queue/Collector")),
+ ("del_package <pid> <pid2>...", _("Delete Packages from Queue/Collector")),
+ ("move <pid> <pid2>...", _("Move Packages from Queue to Collector or vice versa")),
+ ("restart_file <fid> <fid2>...", _("Restart files")),
+ ("restart_package <pid> <pid2>...", _("Restart packages")),
+ ("check <container|url> ...", _("Check online status, works with local container")),
+ ("check_container path", _("Checks online status of a container file")),
+ ("pause", _("Pause the server")),
+ ("unpause", _("continue downloads")),
+ ("toggle", _("Toggle pause/unpause")),
+ ("kill", _("kill server")), ]
+
+ print _("List of commands:")
+ print
+ for c in commands:
+ print "%-35s %s" % c
+
+
+def writeConfig(opts):
+ try:
+ with open(join(homedir, ".pyloadcli"), "w") as cfgfile:
+ cfgfile.write("[cli]")
+ for opt in opts:
+ cfgfile.write("%s=%s\n" % (opt, opts[opt]))
+ except:
+ print _("Couldn't write user config file")
+
+
+def main():
+ config = {"addr": "127.0.0.1", "port": "7227", "language": "en"}
+ try:
+ config["language"] = os.environ["LANG"][0:2]
+ except:
+ pass
+
+ if (not exists(join(pypath, "locale", config["language"]))) or config["language"] == "":
+ config["language"] = "en"
+
+ configFile = ConfigParser.ConfigParser()
+ configFile.read(join(homedir, ".pyloadcli"))
+
+ if configFile.has_section("cli"):
+ for opt in configFile.items("cli"):
+ config[opt[0]] = opt[1]
+
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation("pyLoadCli", join(pypath, "locale"),
+ languages=[config["language"],"en"],fallback=True)
+ translation.install(unicode=True)
+
+ interactive = False
+ command = None
+ username = ""
+ password = ""
+
+ shortOptions = 'iu:p:a:hcl:'
+ longOptions = ['interactive', "username=", "pw=", "address=", "port=", "help", "commands", "language="]
+
+ try:
+ opts, extraparams = getopt(sys.argv[1:], shortOptions, longOptions)
+ for option, params in opts:
+ if option in ("-i", "--interactive"):
+ interactive = True
+ elif option in ("-u", "--username"):
+ username = params
+ elif option in ("-a", "--address"):
+ config["addr"] = params
+ elif option in ("-p", "--port"):
+ config["port"] = params
+ elif option in ("-l", "--language"):
+ config["language"] = params
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation("pyLoadCli", join(pypath, "locale"),
+ languages=[config["language"],"en"],fallback=True)
+ translation.install(unicode=True)
+ elif option in ("-h", "--help"):
+ print_help(config)
+ exit()
+ elif option in ("--pw"):
+ password = params
+ elif option in ("-c", "--comands"):
+ print_commands()
+ exit()
+
+ except GetoptError:
+ print 'Unknown Argument(s) "%s"' % " ".join(sys.argv[1:])
+ print_help(config)
+ exit()
+
+ if len(extraparams) >= 1:
+ command = extraparams
+
+ client = False
+
+ if interactive:
+ try:
+ client = ThriftClient(config["addr"], int(config["port"]), username, password)
+ except WrongLogin:
+ pass
+ except NoSSL:
+ print _("You need py-openssl to connect to this pyLoad core.")
+ exit()
+ except NoConnection:
+ config["addr"] = False
+ config["port"] = False
+
+ if not client:
+ if not config["addr"]: config["addr"] = raw_input(_("Address: "))
+ if not config["port"]: config["port"] = raw_input(_("Port: "))
+ if not username: username = raw_input(_("Username: "))
+ if not password:
+ from getpass import getpass
+
+ password = getpass(_("Password: "))
+
+ try:
+ client = ThriftClient(config["addr"], int(config["port"]), username, password)
+ except WrongLogin:
+ print _("Login data is wrong.")
+ except NoConnection:
+ print _("Could not establish connection to %(addr)s:%(port)s." % {"addr": config["addr"],
+ "port": config["port"]})
+
+ else:
+ try:
+ client = ThriftClient(config["addr"], int(config["port"]), username, password)
+ except WrongLogin:
+ print _("Login data is wrong.")
+ except NoConnection:
+ print _("Could not establish connection to %(addr)s:%(port)s." % {"addr": config["addr"],
+ "port": config["port"]})
+ except NoSSL:
+ print _("You need py-openssl to connect to this pyLoad core.")
+
+ if interactive and command: print _("Interactive mode ignored since you passed some commands.")
+
+ if client:
+ writeConfig(config)
+ cli = Cli(client, command)
diff --git a/pyload/cli/Handler.py b/pyload/cli/Handler.py
new file mode 100644
index 000000000..476d09386
--- /dev/null
+++ b/pyload/cli/Handler.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+#Copyright (C) 2011 RaNaN
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3 of the License,
+#or (at your option) any later version.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#See the GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+###
+class Handler:
+ def __init__(self, cli):
+ self.cli = cli
+ self.init()
+
+ client = property(lambda self: self.cli.client)
+ input = property(lambda self: self.cli.input)
+
+ def init(self):
+ pass
+
+ def onChar(self, char):
+ pass
+
+ def onBackSpace(self):
+ pass
+
+ def onEnter(self, inp):
+ pass
+
+ def setInput(self, inp=""):
+ self.cli.setInput(inp)
+
+ def backspace(self):
+ self.cli.setInput(self.input[:-1])
+
+ def renderBody(self, line):
+ """ gets the line where to render output and should return the line number below its content """
+ return line + 1 \ No newline at end of file
diff --git a/pyload/cli/ManageFiles.py b/pyload/cli/ManageFiles.py
new file mode 100644
index 000000000..2304af355
--- /dev/null
+++ b/pyload/cli/ManageFiles.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+#Copyright (C) 2011 RaNaN
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3 of the License,
+#or (at your option) any later version.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#See the GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+###
+
+from itertools import islice
+from time import time
+
+from Handler import Handler
+from printer import *
+
+from pyload.Api import Destination, PackageData
+
+class ManageFiles(Handler):
+ """ possibility to manage queue/collector """
+
+ def init(self):
+ self.target = Destination.Queue
+ self.pos = 0 #position in queue
+ self.package = -1 #chosen package
+ self.mode = "" # move/delete/restart
+
+ self.cache = None
+ self.links = None
+ self.time = 0
+
+ def onChar(self, char):
+ if char in ("m", "d", "r"):
+ self.mode = char
+ self.setInput()
+ elif char == "p":
+ self.pos = max(0, self.pos - 5)
+ self.backspace()
+ elif char == "n":
+ self.pos += 5
+ self.backspace()
+
+ def onBackSpace(self):
+ if not self.input and self.mode:
+ self.mode = ""
+ if not self.input and self.package > -1:
+ self.package = -1
+
+ def onEnter(self, input):
+ if input == "0":
+ self.cli.reset()
+ elif self.package < 0 and self.mode:
+ #mode select
+ packs = self.parseInput(input)
+ if self.mode == "m":
+ [self.client.movePackage((self.target + 1) % 2, x) for x in packs]
+ elif self.mode == "d":
+ self.client.deletePackages(packs)
+ elif self.mode == "r":
+ [self.client.restartPackage(x) for x in packs]
+
+ elif self.mode:
+ #edit links
+ links = self.parseInput(input, False)
+
+ if self.mode == "d":
+ self.client.deleteFiles(links)
+ elif self.mode == "r":
+ map(self.client.restartFile, links)
+
+ else:
+ #look into package
+ try:
+ self.package = int(input)
+ except:
+ pass
+
+ self.cache = None
+ self.links = None
+ self.pos = 0
+ self.mode = ""
+ self.setInput()
+
+
+ def renderBody(self, line):
+ if self.package < 0:
+ println(line, white(_("Manage Packages:")))
+ else:
+ println(line, white((_("Manage Links:"))))
+ line += 1
+
+ if self.mode:
+ if self.mode == "m":
+ println(line, _("What do you want to move?"))
+ elif self.mode == "d":
+ println(line, _("What do you want to delete?"))
+ elif self.mode == "r":
+ println(line, _("What do you want to restart?"))
+
+ println(line + 1, "Enter a single number, comma separated numbers or ranges. e.g.: 1,2,3 or 1-3.")
+ line += 2
+ else:
+ println(line, _("Choose what you want to do, or enter package number."))
+ println(line + 1, ("%s - %%s, %s - %%s, %s - %%s" % (mag("d"), mag("m"), mag("r"))) % (
+ _("delete"), _("move"), _("restart")))
+ line += 2
+
+ if self.package < 0:
+ #print package info
+ pack = self.getPackages()
+ i = 0
+ for value in islice(pack, self.pos, self.pos + 5):
+ try:
+ println(line, mag(str(value.pid)) + ": " + value.name)
+ line += 1
+ i += 1
+ except Exception, e:
+ pass
+ for x in range(5 - i):
+ println(line, "")
+ line += 1
+ else:
+ #print links info
+ pack = self.getLinks()
+ i = 0
+ for value in islice(pack.links, self.pos, self.pos + 5):
+ try:
+ println(line, mag(value.fid) + ": %s | %s | %s" % (
+ value.name, value.statusmsg, value.plugin))
+ line += 1
+ i += 1
+ except Exception, e:
+ pass
+ for x in range(5 - i):
+ println(line, "")
+ line += 1
+
+ println(line, mag("p") + _(" - previous") + " | " + mag("n") + _(" - next"))
+ println(line + 1, mag("0.") + _(" back to main menu"))
+
+ return line + 2
+
+
+ def getPackages(self):
+ if self.cache and self.time + 2 < time():
+ return self.cache
+
+ if self.target == Destination.Queue:
+ data = self.client.getQueue()
+ else:
+ data = self.client.getCollector()
+
+
+ self.cache = data
+ self.time = time()
+
+ return data
+
+ def getLinks(self):
+ if self.links and self.time + 1 < time():
+ return self.links
+
+ try:
+ data = self.client.getPackageData(self.package)
+ except:
+ data = PackageData(links=[])
+
+ self.links = data
+ self.time = time()
+
+ return data
+
+ def parseInput(self, inp, package=True):
+ inp = inp.strip()
+ if "-" in inp:
+ l, n, h = inp.partition("-")
+ l = int(l)
+ h = int(h)
+ r = range(l, h + 1)
+
+ ret = []
+ if package:
+ for p in self.cache:
+ if p.pid in r:
+ ret.append(p.pid)
+ else:
+ for l in self.links.links:
+ if l.lid in r:
+ ret.append(l.lid)
+
+ return ret
+
+ else:
+ return [int(x) for x in inp.split(",")]
diff --git a/pyload/cli/__init__.py b/pyload/cli/__init__.py
new file mode 100644
index 000000000..fa8a09291
--- /dev/null
+++ b/pyload/cli/__init__.py
@@ -0,0 +1,2 @@
+from AddPackage import AddPackage
+from ManageFiles import ManageFiles \ No newline at end of file
diff --git a/pyload/cli/printer.py b/pyload/cli/printer.py
new file mode 100644
index 000000000..c62c1800e
--- /dev/null
+++ b/pyload/cli/printer.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+def blue(string):
+ return "\033[1;34m" + unicode(string) + "\033[0m"
+
+def green(string):
+ return "\033[1;32m" + unicode(string) + "\033[0m"
+
+def yellow(string):
+ return "\033[1;33m" + unicode(string) + "\033[0m"
+
+def red(string):
+ return "\033[1;31m" + unicode(string) + "\033[0m"
+
+def cyan(string):
+ return "\033[1;36m" + unicode(string) + "\033[0m"
+
+def mag(string):
+ return "\033[1;35m" + unicode(string) + "\033[0m"
+
+def white(string):
+ return "\033[1;37m" + unicode(string) + "\033[0m"
+
+def println(line, content):
+ print "\033[" + str(line) + ";0H\033[2K" + content \ No newline at end of file
diff --git a/pyload/config/ConfigManager.py b/pyload/config/ConfigManager.py
new file mode 100644
index 000000000..33bd151c3
--- /dev/null
+++ b/pyload/config/ConfigManager.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from new_collections import OrderedDict
+
+from pyload.Api import InvalidConfigSection
+from pyload.utils import json
+
+from ConfigParser import ConfigParser
+
+from convert import to_input, from_string
+
+def convertKeyError(func):
+ """ converts KeyError into InvalidConfigSection """
+
+ def conv(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except KeyError:
+ raise InvalidConfigSection(args[1])
+
+ return conv
+
+
+class ConfigManager(ConfigParser):
+ """ Manages the core config and configs for addons and single user.
+ Has similar interface to ConfigParser. """
+
+ def __init__(self, core, parser):
+ # No __init__ call to super class is needed!
+
+ self.core = core
+ self.db = core.db
+ # The config parser, holding the core config
+ self.parser = parser
+
+ # similar to parser, separated meta data and values
+ self.config = OrderedDict()
+
+ # Value cache for multiple user configs
+ # Values are populated from db on first access
+ # Entries are saved as (user, section) keys
+ self.values = {}
+ # TODO: similar to a cache, could be deleted periodically
+
+ def save(self):
+ self.parser.save()
+
+ @convertKeyError
+ def get(self, section, option, user=None):
+ """get config value, core config only available for admins.
+ if user is not valid default value will be returned"""
+
+ # Core config loaded from parser, when no user is given or he is admin
+ if section in self.parser and user is None:
+ return self.parser.get(section, option)
+ else:
+ # We need the id and not the instance
+ # Will be None for admin user and so the same as internal access
+ try:
+ # Check if this config exists
+ # Configs without meta data can not be loaded!
+ data = self.config[section].config[option]
+ return self.loadValues(user, section)[option]
+ except KeyError:
+ pass # Returns default value later
+
+ return self.config[section].config[option].input.default_value
+
+ def loadValues(self, user, section):
+ if (user, section) not in self.values:
+ conf = self.db.loadConfig(section, user)
+ try:
+ self.values[user, section] = json.loads(conf) if conf else {}
+ except ValueError: # Something did go wrong when parsing
+ self.values[user, section] = {}
+ self.core.print_exc()
+
+ return self.values[user, section]
+
+ @convertKeyError
+ def set(self, section, option, value, sync=True, user=None):
+ """ set config value """
+
+ changed = False
+ if section in self.parser and user is None:
+ changed = self.parser.set(section, option, value, sync)
+ else:
+ data = self.config[section].config[option]
+ value = from_string(value, data.input.type)
+ old_value = self.get(section, option)
+
+ # Values will always be saved to db, sync is ignored
+ if value != old_value:
+ changed = True
+ self.values[user, section][option] = value
+ if sync: self.saveValues(user, section)
+
+ if changed: self.core.evm.dispatchEvent("config:changed", section, option, value)
+ return changed
+
+ def saveValues(self, user, section):
+ if section in self.parser and user is None:
+ self.save()
+ elif (user, section) in self.values:
+ self.db.saveConfig(section, json.dumps(self.values[user, section]), user)
+
+ def delete(self, section, user=None):
+ """ Deletes values saved in db and cached values for given user, NOT meta data
+ Does not trigger an error when nothing was deleted. """
+ if (user, section) in self.values:
+ del self.values[user, section]
+
+ self.db.deleteConfig(section, user)
+ self.core.evm.dispatchEvent("config:deleted", section, user)
+
+ def iterCoreSections(self):
+ return self.parser.iterSections()
+
+ def iterSections(self, user=None):
+ """ Yields: section, metadata, values """
+ values = self.db.loadConfigsForUser(user)
+
+ # Every section needs to be json decoded
+ for section, data in values.items():
+ try:
+ values[section] = json.loads(data) if data else {}
+ except ValueError:
+ values[section] = {}
+ self.core.print_exc()
+
+ for name, config in self.config.iteritems():
+ yield name, config, values[name] if name in values else {}
+
+ def getSection(self, section, user=None):
+ if section in self.parser and user is None:
+ return self.parser.getSection(section)
+
+ values = self.loadValues(user, section)
+ return self.config.get(section), values
diff --git a/pyload/config/ConfigParser.py b/pyload/config/ConfigParser.py
new file mode 100644
index 000000000..bda3f7bd4
--- /dev/null
+++ b/pyload/config/ConfigParser.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+from time import sleep
+from os.path import exists
+from gettext import gettext
+from new_collections import namedtuple, OrderedDict
+
+
+from pyload.Api import Input, InputType
+from pyload.utils.fs import chmod
+
+from default import make_config
+from convert import to_input, from_string
+
+CONF_VERSION = 2
+SectionTuple = namedtuple("SectionTuple", "label description explanation config")
+ConfigData = namedtuple("ConfigData", "label description input")
+
+class ConfigParser:
+ """
+ Holds and manages the configuration + meta data for config read from file.
+ """
+
+ CONFIG = "pyload.conf"
+
+ def __init__(self, config=None):
+
+ if config: self.CONFIG = config
+
+ # Meta data information
+ self.config = OrderedDict()
+ # The actual config values
+ self.values = {}
+
+ self.checkVersion()
+
+ self.loadDefault()
+ self.parseValues(self.CONFIG)
+
+ def loadDefault(self):
+ make_config(self)
+
+ def checkVersion(self):
+ """Determines if config needs to be deleted"""
+ if exists(self.CONFIG):
+ f = open(self.CONFIG, "rb")
+ v = f.readline()
+ f.close()
+ v = v[v.find(":") + 1:].strip()
+
+ if not v or int(v) < CONF_VERSION:
+ f = open(self.CONFIG, "wb")
+ f.write("version: " + str(CONF_VERSION))
+ f.close()
+ print "Old version of %s deleted" % self.CONFIG
+ else:
+ f = open(self.CONFIG, "wb")
+ f.write("version:" + str(CONF_VERSION))
+ f.close()
+
+ def parseValues(self, filename):
+ """read config values from file"""
+ f = open(filename, "rb")
+ config = f.readlines()[1:]
+
+ # save the current section
+ section = ""
+
+ for line in config:
+ line = line.strip()
+
+ # comment line, different variants
+ if not line or line.startswith("#") or line.startswith("//") or line.startswith(";"): continue
+
+ if line.startswith("["):
+ section = line.replace("[", "").replace("]", "")
+
+ if section not in self.config:
+ print "Unrecognized section", section
+ section = ""
+
+ else:
+ name, non, value = line.rpartition("=")
+ name = name.strip()
+ value = value.strip()
+
+ if not section:
+ print "Value without section", name
+ continue
+
+ if name in self.config[section].config:
+ self.set(section, name, value, sync=False)
+ else:
+ print "Unrecognized option", section, name
+
+
+ def save(self):
+ """saves config to filename"""
+
+ configs = []
+ f = open(self.CONFIG, "wb")
+ configs.append(f)
+ chmod(self.CONFIG, 0600)
+ f.write("version: %i\n\n" % CONF_VERSION)
+
+ for section, data in self.config.iteritems():
+ f.write("[%s]\n" % section)
+
+ for option, data in data.config.iteritems():
+ value = self.get(section, option)
+ if type(value) == unicode: value = value.encode("utf8")
+ else: value = str(value)
+
+ f.write('%s = %s\n' % (option, value))
+
+ f.write("\n")
+
+ f.close()
+
+ def __getitem__(self, section):
+ """provides dictionary like access: c['section']['option']"""
+ return Section(self, section)
+
+ def __contains__(self, section):
+ """ checks if parser contains section """
+ return section in self.config
+
+ def get(self, section, option):
+ """get value or default"""
+ try:
+ return self.values[section][option]
+ except KeyError:
+ return self.config[section].config[option].input.default_value
+
+ def set(self, section, option, value, sync=True):
+ """set value"""
+
+ data = self.config[section].config[option]
+ value = from_string(value, data.input.type)
+ old_value = self.get(section, option)
+
+ # only save when different values
+ if value != old_value:
+ if section not in self.values: self.values[section] = {}
+ self.values[section][option] = value
+ if sync:
+ self.save()
+ return True
+
+ return False
+
+ def getMetaData(self, section, option):
+ """ get all config data for an option """
+ return self.config[section].config[option]
+
+ def iterSections(self):
+ """ Yields section, config info, values, for all sections """
+
+ for name, config in self.config.iteritems():
+ yield name, config, self.values[name] if name in self.values else {}
+
+ def getSection(self, section):
+ """ Retrieves single config as tuple (section, values) """
+ return self.config[section], self.values[section] if section in self.values else {}
+
+ def addConfigSection(self, section, label, desc, expl, config):
+ """Adds a section to the config. `config` is a list of config tuples as used in plugin api defined as:
+ The order of the config elements is preserved with OrderedDict
+ """
+ d = OrderedDict()
+
+ for entry in config:
+ if len(entry) != 4:
+ raise ValueError("Config entry must be of length 4")
+
+ # Values can have different roles depending on the two config formats
+ conf_name, type_label, label_desc, default_input = entry
+
+ # name, label, desc, input
+ if isinstance(default_input, Input):
+ input = default_input
+ conf_label = type_label
+ conf_desc = label_desc
+ # name, type, label, default
+ else:
+ input = Input(to_input(type_label))
+ input.default_value = from_string(default_input, input.type)
+ conf_label = label_desc
+ conf_desc = ""
+
+ d[conf_name] = ConfigData(gettext(conf_label), gettext(conf_desc), input)
+
+ data = SectionTuple(gettext(label), gettext(desc), gettext(expl), d)
+ self.config[section] = data
+
+class Section:
+ """provides dictionary like access for configparser"""
+
+ def __init__(self, parser, section):
+ """Constructor"""
+ self.parser = parser
+ self.section = section
+
+ def __getitem__(self, item):
+ """getitem"""
+ return self.parser.get(self.section, item)
+
+ def __setitem__(self, item, value):
+ """setitem"""
+ self.parser.set(self.section, item, value)
diff --git a/pyload/config/__init__.py b/pyload/config/__init__.py
new file mode 100644
index 000000000..4b31e848b
--- /dev/null
+++ b/pyload/config/__init__.py
@@ -0,0 +1 @@
+__author__ = 'christian'
diff --git a/pyload/config/convert.py b/pyload/config/convert.py
new file mode 100644
index 000000000..7a110e0f3
--- /dev/null
+++ b/pyload/config/convert.py
@@ -0,0 +1,39 @@
+
+from pyload.Api import Input, InputType
+from pyload.utils import decode, to_bool
+
+# Maps old config formats to new values
+input_dict = {
+ "int": InputType.Int,
+ "bool": InputType.Bool,
+ "time": InputType.Time,
+ "file": InputType.File,
+ "list": InputType.List,
+ "folder": InputType.Folder
+}
+
+
+def to_input(typ):
+ """ Converts old config format to input type"""
+ return input_dict.get(typ, InputType.Text)
+
+
+def from_string(value, typ=None):
+ """ cast value to given type, unicode for strings """
+
+ # value is no string
+ if not isinstance(value, basestring):
+ return value
+
+ value = decode(value)
+
+ if typ == InputType.Int:
+ return int(value)
+ elif typ == InputType.Bool:
+ return to_bool(value)
+ elif typ == InputType.Time:
+ if not value: value = "0:00"
+ if not ":" in value: value += ":00"
+ return value
+ else:
+ return value \ No newline at end of file
diff --git a/pyload/config/default.py b/pyload/config/default.py
new file mode 100644
index 000000000..103dcdebb
--- /dev/null
+++ b/pyload/config/default.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+
+"""
+Configuration layout for default base config
+"""
+
+#TODO: write tooltips and descriptions
+#TODO: use apis config related classes
+
+def make_config(config):
+ # Check if gettext is installed
+ _ = lambda x: x
+
+ config.addConfigSection("remote", _("Remote"), _("Description"), _("Long description"),
+ [
+ ("activated", "bool", _("Activated"), True),
+ ("port", "int", _("Port"), 7227),
+ ("listenaddr", "ip", _("Address"), "0.0.0.0"),
+ ])
+
+ config.addConfigSection("log", _("Log"), _("Description"), _("Long description"),
+ [
+ ("log_size", "int", _("Size in kb"), 100),
+ ("log_folder", "folder", _("Folder"), "Logs"),
+ ("file_log", "bool", _("File Log"), True),
+ ("log_count", "int", _("Count"), 5),
+ ("log_rotate", "bool", _("Log Rotate"), True),
+ ])
+
+ config.addConfigSection("permission", _("Permissions"), _("Description"), _("Long description"),
+ [
+ ("group", "str", _("Groupname"), "users"),
+ ("change_dl", "bool", _("Change Group and User of Downloads"), False),
+ ("change_file", "bool", _("Change file mode of downloads"), False),
+ ("user", "str", _("Username"), "user"),
+ ("file", "str", _("Filemode for Downloads"), "0644"),
+ ("change_group", "bool", _("Change group of running process"), False),
+ ("folder", "str", _("Folder Permission mode"), "0755"),
+ ("change_user", "bool", _("Change user of running process"), False),
+ ])
+
+ config.addConfigSection("general", _("General"), _("Description"), _("Long description"),
+ [
+ ("language", "en;de;fr;it;es;nl;sv;ru;pl;cs;sr;pt_BR", _("Language"), "en"),
+ ("download_folder", "folder", _("Download Folder"), "Downloads"),
+ ("checksum", "bool", _("Use Checksum"), False),
+ ("folder_per_package", "bool", _("Create folder for each package"), True),
+ ("debug_mode", "bool", _("Debug Mode"), False),
+ ("min_free_space", "int", _("Min Free Space (MB)"), 200),
+ ("renice", "int", _("CPU Priority"), 0),
+ ])
+
+ config.addConfigSection("ssl", _("SSL"), _("Description"), _("Long description"),
+ [
+ ("cert", "file", _("SSL Certificate"), "ssl.crt"),
+ ("activated", "bool", _("Activated"), False),
+ ("key", "file", _("SSL Key"), "ssl.key"),
+ ])
+
+ config.addConfigSection("webinterface", _("Webinterface"), _("Description"), _("Long description"),
+ [
+ ("template", "str", _("Template"), "default"),
+ ("activated", "bool", _("Activated"), True),
+ ("prefix", "str", _("Path Prefix"), ""),
+ ("server", "auto;threaded;fallback;fastcgi", _("Server"), "auto"),
+ ("force_server", "str", _("Favor specific server"), ""),
+ ("host", "ip", _("IP"), "0.0.0.0"),
+ ("https", "bool", _("Use HTTPS"), False),
+ ("port", "int", _("Port"), 8001),
+ ("develop", "str", _("Development mode"), False),
+ ])
+
+ config.addConfigSection("proxy", _("Proxy"), _("Description"), _("Long description"),
+ [
+ ("username", "str", _("Username"), ""),
+ ("proxy", "bool", _("Use Proxy"), False),
+ ("address", "str", _("Address"), "localhost"),
+ ("password", "password", _("Password"), ""),
+ ("type", "http;socks4;socks5", _("Protocol"), "http"),
+ ("port", "int", _("Port"), 7070),
+ ])
+
+ config.addConfigSection("reconnect", _("Reconnect"), _("Description"), _("Long description"),
+ [
+ ("endTime", "time", _("End"), "0:00"),
+ ("activated", "bool", _("Use Reconnect"), False),
+ ("method", "str", _("Method"), "./reconnect.sh"),
+ ("startTime", "time", _("Start"), "0:00"),
+ ])
+
+ config.addConfigSection("download", _("Download"), _("Description"), _("Long description"),
+ [
+ ("max_downloads", "int", _("Max Parallel Downloads"), 3),
+ ("limit_speed", "bool", _("Limit Download Speed"), False),
+ ("interface", "str", _("Download interface to bind (ip or Name)"), ""),
+ ("skip_existing", "bool", _("Skip already existing files"), False),
+ ("max_speed", "int", _("Max Download Speed in kb/s"), -1),
+ ("ipv6", "bool", _("Allow IPv6"), False),
+ ("chunks", "int", _("Max connections for one download"), 3),
+ ("restart_failed", "bool", _("Restart failed downloads on startup"), False),
+ ])
+
+ config.addConfigSection("downloadTime", _("Download Time"), _("Description"), _("Long description"),
+ [
+ ("start", "time", _("Start"), "0:00"),
+ ("end", "time", _("End"), "0:00"),
+ ])
diff --git a/pyload/database/AccountDatabase.py b/pyload/database/AccountDatabase.py
new file mode 100644
index 000000000..eaa1a3203
--- /dev/null
+++ b/pyload/database/AccountDatabase.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+from pyload.database import queue, async
+from pyload.database import DatabaseBackend
+
+
+class AccountMethods:
+ @queue
+ def loadAccounts(db):
+ db.c.execute('SELECT plugin, loginname, activated, password, options FROM accounts;')
+ return db.c.fetchall()
+
+ @async
+ def saveAccounts(db, data):
+ # TODO: owner, shared
+
+ db.c.executemany(
+ 'INSERT INTO accounts(plugin, loginname, activated, password, options) VALUES(?,?,?,?,?)', data)
+
+ @async
+ def removeAccount(db, plugin, loginname):
+ db.c.execute('DELETE FROM accounts WHERE plugin=? AND loginname=?', (plugin, loginname))
+
+
+DatabaseBackend.registerSub(AccountMethods) \ No newline at end of file
diff --git a/pyload/database/ConfigDatabase.py b/pyload/database/ConfigDatabase.py
new file mode 100644
index 000000000..0c0dd72dd
--- /dev/null
+++ b/pyload/database/ConfigDatabase.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.database import DatabaseMethods, queue, async
+
+class ConfigMethods(DatabaseMethods):
+
+ @async
+ def saveConfig(self, plugin, config, user=None):
+ if user is None: user = -1
+ self.c.execute('INSERT INTO settings(plugin, config, user) VALUES(?,?,?)', (plugin, config, user))
+
+
+ @queue
+ def loadConfig(self, plugin, user=None):
+ if user is None: user = -1
+ self.c.execute('SELECT config FROM settings WHERE plugin=? AND user=?', (plugin, user))
+
+ r = self.c.fetchone()
+ return r[0] if r else ""
+
+ @async
+ def deleteConfig(self, plugin, user=None):
+ if user is None:
+ self.c.execute('DELETE FROM settings WHERE plugin=?', (plugin, ))
+ else:
+ self.c.execute('DELETE FROM settings WHERE plugin=? AND user=?', (plugin, user))
+
+ @queue
+ def loadAllConfigs(self):
+ self.c.execute('SELECT user, plugin, config FROM settings')
+ configs = {}
+ for r in self.c:
+ if r[0] in configs:
+ configs[r[0]][r[1]] = r[2]
+ else:
+ configs[r[0]] = {r[1]: r[2]}
+
+ return configs
+
+ @queue
+ def loadConfigsForUser(self, user=None):
+ if user is None: user = -1
+ self.c.execute('SELECT plugin, config FROM settings WHERE user=?', (user,))
+ configs = {}
+ for r in self.c:
+ configs[r[0]] = r[1]
+
+ return configs
+
+ @async
+ def clearAllConfigs(self):
+ self.c.execute('DELETE FROM settings')
+
+
+ConfigMethods.register() \ No newline at end of file
diff --git a/pyload/database/DatabaseBackend.py b/pyload/database/DatabaseBackend.py
new file mode 100644
index 000000000..2244a3026
--- /dev/null
+++ b/pyload/database/DatabaseBackend.py
@@ -0,0 +1,500 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN, mkaay
+###############################################################################
+
+from threading import Thread, Event
+from shutil import move
+
+from Queue import Queue
+from traceback import print_exc
+
+from pyload.utils.fs import chmod, exists, remove
+
+try:
+ from pysqlite2 import dbapi2 as sqlite3
+except:
+ import sqlite3
+
+DB = None
+DB_VERSION = 6
+
+
+def set_DB(db):
+ global DB
+ DB = db
+
+
+def queue(f):
+ @staticmethod
+ def x(*args, **kwargs):
+ if DB:
+ return DB.queue(f, *args, **kwargs)
+
+ return x
+
+
+def async(f):
+ @staticmethod
+ def x(*args, **kwargs):
+ if DB:
+ return DB.async(f, *args, **kwargs)
+
+ return x
+
+
+def inner(f):
+ @staticmethod
+ def x(*args, **kwargs):
+ if DB:
+ return f(DB, *args, **kwargs)
+
+ return x
+
+
+class DatabaseMethods:
+ # stubs for autocompletion
+ core = None
+ manager = None
+ conn = None
+ c = None
+
+ @classmethod
+ def register(cls):
+ DatabaseBackend.registerSub(cls)
+
+
+class DatabaseJob():
+ def __init__(self, f, *args, **kwargs):
+ self.done = Event()
+
+ self.f = f
+ self.args = args
+ self.kwargs = kwargs
+
+ self.result = None
+ self.exception = False
+
+ # import inspect
+ # self.frame = inspect.currentframe()
+
+ def __repr__(self):
+ from os.path import basename
+
+ frame = self.frame.f_back
+ output = ""
+ for i in range(5):
+ output += "\t%s:%s, %s\n" % (basename(frame.f_code.co_filename), frame.f_lineno, frame.f_code.co_name)
+ frame = frame.f_back
+ del frame
+ del self.frame
+
+ return "DataBase Job %s:%s\n%sResult: %s" % (self.f.__name__, self.args[1:], output, self.result)
+
+ def processJob(self):
+ try:
+ self.result = self.f(*self.args, **self.kwargs)
+ except Exception, e:
+ print_exc()
+ try:
+ print "Database Error @", self.f.__name__, self.args[1:], self.kwargs, e
+ except:
+ pass
+
+ self.exception = e
+ finally:
+ self.done.set()
+
+ def wait(self):
+ self.done.wait()
+
+
+class DatabaseBackend(Thread):
+ subs = []
+
+ DB_FILE = "pyload.db"
+ VERSION_FILE = "db.version"
+
+ def __init__(self, core):
+ Thread.__init__(self)
+ self.setDaemon(True)
+ self.core = core
+ self.manager = None # set later
+ self.error = None
+ self.running = Event()
+
+ self.jobs = Queue()
+
+ set_DB(self)
+
+ def setup(self):
+ """ *MUST* be called before db can be used !"""
+ self.start()
+ self.running.wait()
+
+ def init(self):
+ """main loop, which executes commands"""
+
+ version = self._checkVersion()
+
+ self.conn = sqlite3.connect(self.DB_FILE)
+ chmod(self.DB_FILE, 0600)
+
+ self.c = self.conn.cursor()
+
+ if version is not None and version < DB_VERSION:
+ success = self._convertDB(version)
+
+ # delete database
+ if not success:
+ self.c.close()
+ self.conn.close()
+
+ try:
+ self.manager.core.log.warning(_("Database was deleted due to incompatible version."))
+ except:
+ print "Database was deleted due to incompatible version."
+
+ remove(self.VERSION_FILE)
+ move(self.DB_FILE, self.DB_FILE + ".backup")
+ f = open(self.VERSION_FILE, "wb")
+ f.write(str(DB_VERSION))
+ f.close()
+
+ self.conn = sqlite3.connect(self.DB_FILE)
+ chmod(self.DB_FILE, 0600)
+ self.c = self.conn.cursor()
+
+ self._createTables()
+ self.conn.commit()
+
+
+ def run(self):
+ try:
+ self.init()
+ except Exception, e:
+ self.error = e
+ finally:
+ self.running.set()
+
+ while True:
+ j = self.jobs.get()
+ if j == "quit":
+ self.c.close()
+ self.conn.commit()
+ self.conn.close()
+ self.closing.set()
+ break
+ j.processJob()
+
+
+ def shutdown(self):
+ self.running.clear()
+ self.closing = Event()
+ self.jobs.put("quit")
+ self.closing.wait(1)
+
+ def _checkVersion(self):
+ """ get db version"""
+ if not exists(self.VERSION_FILE):
+ f = open(self.VERSION_FILE, "wb")
+ f.write(str(DB_VERSION))
+ f.close()
+ return
+
+ f = open(self.VERSION_FILE, "rb")
+ v = int(f.read().strip())
+ f.close()
+
+ return v
+
+ def _convertDB(self, v):
+ try:
+ return getattr(self, "_convertV%i" % v)()
+ except:
+ return False
+
+ #--convert scripts start
+
+ def _convertV6(self):
+ return False
+
+ #--convert scripts end
+
+ def _createTables(self):
+ """create tables for database"""
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "packages" ('
+ '"pid" INTEGER PRIMARY KEY AUTOINCREMENT, '
+ '"name" TEXT NOT NULL, '
+ '"folder" TEXT DEFAULT "" NOT NULL, '
+ '"site" TEXT DEFAULT "" NOT NULL, '
+ '"comment" TEXT DEFAULT "" NOT NULL, '
+ '"password" TEXT DEFAULT "" NOT NULL, '
+ '"added" INTEGER DEFAULT 0 NOT NULL,' # set by trigger
+ '"status" INTEGER DEFAULT 0 NOT NULL,'
+ '"tags" TEXT DEFAULT "" NOT NULL,'
+ '"shared" INTEGER DEFAULT 0 NOT NULL,'
+ '"packageorder" INTEGER DEFAULT -1 NOT NULL,' #incremented by trigger
+ '"root" INTEGER DEFAULT -1 NOT NULL, '
+ '"owner" INTEGER NOT NULL, '
+ 'FOREIGN KEY(owner) REFERENCES users(uid), '
+ 'CHECK (root != pid)'
+ ')'
+ )
+
+ self.c.execute(
+ 'CREATE TRIGGER IF NOT EXISTS "insert_package" AFTER INSERT ON "packages"'
+ 'BEGIN '
+ 'UPDATE packages SET added = strftime("%s", "now"), '
+ 'packageorder = (SELECT max(p.packageorder) + 1 FROM packages p WHERE p.root=new.root) '
+ 'WHERE rowid = new.rowid;'
+ 'END'
+ )
+
+ self.c.execute(
+ 'CREATE TRIGGER IF NOT EXISTS "delete_package" AFTER DELETE ON "packages"'
+ 'BEGIN '
+ 'DELETE FROM files WHERE package = old.pid;'
+ 'UPDATE packages SET packageorder=packageorder-1 WHERE packageorder > old.packageorder AND root=old.pid;'
+ 'END'
+ )
+ self.c.execute('CREATE INDEX IF NOT EXISTS "package_index" ON packages(root, owner)')
+ self.c.execute('CREATE INDEX IF NOT EXISTS "package_owner" ON packages(owner)')
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "files" ('
+ '"fid" INTEGER PRIMARY KEY AUTOINCREMENT, '
+ '"name" TEXT NOT NULL, '
+ '"size" INTEGER DEFAULT 0 NOT NULL, '
+ '"status" INTEGER DEFAULT 0 NOT NULL, '
+ '"media" INTEGER DEFAULT 1 NOT NULL,'
+ '"added" INTEGER DEFAULT 0 NOT NULL,'
+ '"fileorder" INTEGER DEFAULT -1 NOT NULL, '
+ '"url" TEXT DEFAULT "" NOT NULL, '
+ '"plugin" TEXT DEFAULT "" NOT NULL, '
+ '"hash" TEXT DEFAULT "" NOT NULL, '
+ '"dlstatus" INTEGER DEFAULT 0 NOT NULL, '
+ '"error" TEXT DEFAULT "" NOT NULL, '
+ '"package" INTEGER NOT NULL, '
+ '"owner" INTEGER NOT NULL, '
+ 'FOREIGN KEY(owner) REFERENCES users(uid), '
+ 'FOREIGN KEY(package) REFERENCES packages(id)'
+ ')'
+ )
+ self.c.execute('CREATE INDEX IF NOT EXISTS "file_index" ON files(package, owner)')
+ self.c.execute('CREATE INDEX IF NOT EXISTS "file_owner" ON files(owner)')
+
+ self.c.execute(
+ 'CREATE TRIGGER IF NOT EXISTS "insert_file" AFTER INSERT ON "files"'
+ 'BEGIN '
+ 'UPDATE files SET added = strftime("%s", "now"), '
+ 'fileorder = (SELECT max(f.fileorder) + 1 FROM files f WHERE f.package=new.package) '
+ 'WHERE rowid = new.rowid;'
+ 'END'
+ )
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "collector" ('
+ '"owner" INTEGER NOT NULL, '
+ '"data" TEXT NOT NULL, '
+ 'FOREIGN KEY(owner) REFERENCES users(uid), '
+ 'PRIMARY KEY(owner) ON CONFLICT REPLACE'
+ ') '
+ )
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "storage" ('
+ '"identifier" TEXT NOT NULL, '
+ '"key" TEXT NOT NULL, '
+ '"value" TEXT DEFAULT "", '
+ 'PRIMARY KEY (identifier, key) ON CONFLICT REPLACE'
+ ')'
+ )
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "users" ('
+ '"uid" INTEGER PRIMARY KEY AUTOINCREMENT, '
+ '"name" TEXT NOT NULL UNIQUE, '
+ '"email" TEXT DEFAULT "" NOT NULL, '
+ '"password" TEXT NOT NULL, '
+ '"role" INTEGER DEFAULT 0 NOT NULL, '
+ '"permission" INTEGER DEFAULT 0 NOT NULL, '
+ '"folder" TEXT DEFAULT "" NOT NULL, '
+ '"traffic" INTEGER DEFAULT -1 NOT NULL, '
+ '"dllimit" INTEGER DEFAULT -1 NOT NULL, '
+ '"dlquota" TEXT DEFAULT "" NOT NULL, '
+ '"hddquota" INTEGER DEFAULT -1 NOT NULL, '
+ '"template" TEXT DEFAULT "default" NOT NULL, '
+ '"user" INTEGER DEFAULT -1 NOT NULL, ' # set by trigger to self
+ 'FOREIGN KEY(user) REFERENCES users(uid)'
+ ')'
+ )
+ self.c.execute('CREATE INDEX IF NOT EXISTS "username_index" ON users(name)')
+
+ self.c.execute(
+ 'CREATE TRIGGER IF NOT EXISTS "insert_user" AFTER INSERT ON "users"'
+ 'BEGIN '
+ 'UPDATE users SET user = new.uid, folder=new.name '
+ 'WHERE rowid = new.rowid;'
+ 'END'
+ )
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "settings" ('
+ '"plugin" TEXT NOT NULL, '
+ '"user" INTEGER DEFAULT -1 NOT NULL, '
+ '"config" TEXT NOT NULL, '
+ 'FOREIGN KEY(user) REFERENCES users(uid), '
+ 'PRIMARY KEY (plugin, user) ON CONFLICT REPLACE'
+ ')'
+ )
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "accounts" ('
+ '"plugin" TEXT NOT NULL, '
+ '"loginname" TEXT NOT NULL, '
+ '"owner" INTEGER NOT NULL DEFAULT -1, '
+ '"activated" INTEGER DEFAULT 1, '
+ '"password" TEXT DEFAULT "", '
+ '"shared" INTEGER DEFAULT 0, '
+ '"options" TEXT DEFAULT "", '
+ 'FOREIGN KEY(owner) REFERENCES users(uid), '
+ 'PRIMARY KEY (plugin, loginname, owner) ON CONFLICT REPLACE'
+ ')'
+ )
+
+ self.c.execute(
+ 'CREATE TABLE IF NOT EXISTS "stats" ('
+ '"user" INTEGER NOT NULL, '
+ '"plugin" TEXT NOT NULL, '
+ '"time" INTEGER NOT NULL, '
+ '"premium" INTEGER DEFAULT 0 NOT NULL, '
+ '"amount" INTEGER DEFAULT 0 NOT NULL, '
+ 'FOREIGN KEY(user) REFERENCES users(uid), '
+ 'PRIMARY KEY(user, plugin, time)'
+ ')'
+ )
+ self.c.execute('CREATE INDEX IF NOT EXISTS "stats_time" ON stats(time)')
+
+ #try to lower ids
+ self.c.execute('SELECT max(fid) FROM files')
+ fid = self.c.fetchone()[0]
+ fid = int(fid) if fid else 0
+ self.c.execute('UPDATE SQLITE_SEQUENCE SET seq=? WHERE name=?', (fid, "files"))
+
+ self.c.execute('SELECT max(pid) FROM packages')
+ pid = self.c.fetchone()[0]
+ pid = int(pid) if pid else 0
+ self.c.execute('UPDATE SQLITE_SEQUENCE SET seq=? WHERE name=?', (pid, "packages"))
+
+ self.c.execute('VACUUM')
+
+
+ def createCursor(self):
+ return self.conn.cursor()
+
+ @async
+ def commit(self):
+ self.conn.commit()
+
+ @queue
+ def syncSave(self):
+ self.conn.commit()
+
+ @async
+ def rollback(self):
+ self.conn.rollback()
+
+ def async(self, f, *args, **kwargs):
+ args = (self, ) + args
+ job = DatabaseJob(f, *args, **kwargs)
+ self.jobs.put(job)
+
+ def queue(self, f, *args, **kwargs):
+ # Raise previous error of initialization
+ if self.error: raise self.error
+ args = (self, ) + args
+ job = DatabaseJob(f, *args, **kwargs)
+ self.jobs.put(job)
+
+ # only wait when db is running
+ if self.running.isSet(): job.wait()
+ return job.result
+
+ @classmethod
+ def registerSub(cls, klass):
+ cls.subs.append(klass)
+
+ @classmethod
+ def unregisterSub(cls, klass):
+ cls.subs.remove(klass)
+
+ def __getattr__(self, attr):
+ for sub in DatabaseBackend.subs:
+ if hasattr(sub, attr):
+ return getattr(sub, attr)
+ raise AttributeError(attr)
+
+
+if __name__ == "__main__":
+ db = DatabaseBackend()
+ db.setup()
+
+ class Test():
+ @queue
+ def insert(db):
+ c = db.createCursor()
+ for i in range(1000):
+ c.execute("INSERT INTO storage (identifier, key, value) VALUES (?, ?, ?)", ("foo", i, "bar"))
+
+ @async
+ def insert2(db):
+ c = db.createCursor()
+ for i in range(1000 * 1000):
+ c.execute("INSERT INTO storage (identifier, key, value) VALUES (?, ?, ?)", ("foo", i, "bar"))
+
+ @queue
+ def select(db):
+ c = db.createCursor()
+ for i in range(10):
+ res = c.execute("SELECT value FROM storage WHERE identifier=? AND key=?", ("foo", i))
+ print res.fetchone()
+
+ @queue
+ def error(db):
+ c = db.createCursor()
+ print "a"
+ c.execute("SELECT myerror FROM storage WHERE identifier=? AND key=?", ("foo", i))
+ print "e"
+
+ db.registerSub(Test)
+ from time import time
+
+ start = time()
+ for i in range(100):
+ db.insert()
+ end = time()
+ print end - start
+
+ start = time()
+ db.insert2()
+ end = time()
+ print end - start
+
+ db.error()
+
diff --git a/pyload/database/FileDatabase.py b/pyload/database/FileDatabase.py
new file mode 100644
index 000000000..7b39cfa47
--- /dev/null
+++ b/pyload/database/FileDatabase.py
@@ -0,0 +1,448 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from new_collections import OrderedDict
+
+from pyload.Api import DownloadInfo, FileInfo, PackageInfo, PackageStats, DownloadState as DS, state_string
+from pyload.database import DatabaseMethods, queue, async, inner
+
+zero_stats = PackageStats(0, 0, 0, 0)
+
+
+class FileMethods(DatabaseMethods):
+
+ @queue
+ def filecount(self):
+ """returns number of files, currently only used for debugging"""
+ self.c.execute("SELECT COUNT(*) FROM files")
+ return self.c.fetchone()[0]
+
+ @queue
+ def downloadstats(self, user=None):
+ """ number of downloads and size """
+ if user is None:
+ self.c.execute("SELECT COUNT(*), SUM(f.size) FROM files f WHERE dlstatus != 0")
+ else:
+ self.c.execute(
+ "SELECT COUNT(*), SUM(f.size) FROM files f, packages p WHERE f.package = p.pid AND dlstatus != 0",
+ user)
+
+ r = self.c.fetchone()
+ # sum is None when no elements are added
+ return (r[0], r[1] if r[1] is not None else 0) if r else (0, 0)
+
+ @queue
+ def queuestats(self, user=None):
+ """ number and size of files in queue not finished yet"""
+ # status not in NA, finished, skipped
+ if user is None:
+ self.c.execute("SELECT COUNT(*), SUM(f.size) FROM files f WHERE dlstatus NOT IN (0,5,6)")
+ else:
+ self.c.execute(
+ "SELECT COUNT(*), SUM(f.size) FROM files f, package p WHERE f.package = p.pid AND p.owner=? AND dlstatus NOT IN (0,5,6)",
+ user)
+
+ r = self.c.fetchone()
+ return (r[0], r[1] if r[1] is not None else 0) if r else (0, 0)
+
+
+ # TODO: multi user?
+ @queue
+ def processcount(self, fid=-1, user=None):
+ """ number of files which have to be processed """
+ # status in online, queued, starting, waiting, downloading
+ self.c.execute("SELECT COUNT(*), SUM(size) FROM files WHERE dlstatus IN (2,3,8,9,10) AND fid != ?", (fid, ))
+ return self.c.fetchone()[0]
+
+ @queue
+ def processstats(self, user=None):
+ if user is None:
+ self.c.execute("SELECT COUNT(*), SUM(size) FROM files WHERE dlstatus IN (2,3,8,9,10)")
+ else:
+ self.c.execute(
+ "SELECT COUNT(*), SUM(f.size) FROM files f, packages p WHERE f.package = p.pid AND dlstatus IN (2,3,8,9,10)",
+ user)
+ r = self.c.fetchone()
+ return (r[0], r[1] if r[1] is not None else 0) if r else (0, 0)
+
+ @queue
+ def addLink(self, url, name, plugin, package, owner):
+ # mark file status initially as missing, dlstatus - queued
+ self.c.execute('INSERT INTO files(url, name, plugin, status, dlstatus, package, owner) VALUES(?,?,?,1,3,?,?)',
+ (url, name, plugin, package, owner))
+ return self.c.lastrowid
+
+ @async
+ def addLinks(self, links, package, owner):
+ """ links is a list of tuples (url, plugin)"""
+ links = [(x[0], x[0], x[1], package, owner) for x in links]
+ self.c.executemany(
+ 'INSERT INTO files(url, name, plugin, status, dlstatus, package, owner) VALUES(?,?,?,1,3,?,?)',
+ links)
+
+ @queue
+ def addFile(self, name, size, media, package, owner):
+ # file status - ok, dl status NA
+ self.c.execute('INSERT INTO files(name, size, media, package, owner) VALUES(?,?,?,?,?)',
+ (name, size, media, package, owner))
+ return self.c.lastrowid
+
+ @queue
+ def addPackage(self, name, folder, root, password, site, comment, status, owner):
+ self.c.execute(
+ 'INSERT INTO packages(name, folder, root, password, site, comment, status, owner) VALUES(?,?,?,?,?,?,?,?)'
+ , (name, folder, root, password, site, comment, status, owner))
+ return self.c.lastrowid
+
+ @async
+ def deletePackage(self, pid, owner=None):
+ # order updated by trigger, as well as links deleted
+ if owner is None:
+ self.c.execute('DELETE FROM packages WHERE pid=?', (pid,))
+ else:
+ self.c.execute('DELETE FROM packages WHERE pid=? AND owner=?', (pid, owner))
+
+ @async
+ def deleteFile(self, fid, order, package, owner=None):
+ """ To delete a file order and package of it is needed """
+ if owner is None:
+ self.c.execute('DELETE FROM files WHERE fid=?', (fid,))
+ self.c.execute('UPDATE files SET fileorder=fileorder-1 WHERE fileorder > ? AND package=?',
+ (order, package))
+ else:
+ self.c.execute('DELETE FROM files WHERE fid=? AND owner=?', (fid, owner))
+ self.c.execute('UPDATE files SET fileorder=fileorder-1 WHERE fileorder > ? AND package=? AND owner=?',
+ (order, package, owner))
+
+ @async
+ def saveCollector(self, owner, data):
+ """ simply save the json string to database """
+ self.c.execute("INSERT INTO collector(owner, data) VALUES (?,?)", (owner, data))
+
+ @queue
+ def retrieveCollector(self, owner):
+ """ retrieve the saved string """
+ self.c.execute('SELECT data FROM collector WHERE owner=?', (owner,))
+ r = self.c.fetchone()
+ if not r: return None
+ return r[0]
+
+ @async
+ def deleteCollector(self, owner):
+ """ drop saved user collector """
+ self.c.execute('DELETE FROM collector WHERE owner=?', (owner,))
+
+ @queue
+ def getAllFiles(self, package=None, search=None, state=None, owner=None):
+ """ Return dict with file information
+
+ :param package: optional package to filter out
+ :param search: or search string for file name
+ :param unfinished: filter by dlstatus not finished
+ :param owner: only specific owner
+ """
+ qry = ('SELECT fid, name, owner, size, status, media, added, fileorder, '
+ 'url, plugin, hash, dlstatus, error, package FROM files WHERE ')
+
+ arg = []
+
+ if state is not None and state != DS.All:
+ qry += 'dlstatus IN (%s) AND ' % state_string(state)
+ if owner is not None:
+ qry += 'owner=? AND '
+ arg.append(owner)
+
+ if package is not None:
+ arg.append(package)
+ qry += 'package=? AND '
+ if search is not None:
+ search = "%%%s%%" % search.strip("%")
+ arg.append(search)
+ qry += "name LIKE ? "
+
+ # make qry valid
+ if qry.endswith("WHERE "): qry = qry[:-6]
+ if qry.endswith("AND "): qry = qry[:-4]
+
+ self.c.execute(qry + "ORDER BY package, fileorder", arg)
+
+ data = OrderedDict()
+ for r in self.c:
+ f = FileInfo(r[0], r[1], r[13], r[2], r[3], r[4], r[5], r[6], r[7])
+ if r[11] > 0: # dl status != NA
+ f.download = DownloadInfo(r[8], r[9], r[10], r[11], self.manager.statusMsg[r[11]], r[12])
+
+ data[r[0]] = f
+
+ return data
+
+ @queue
+ def getMatchingFilenames(self, pattern, owner=None):
+ """ Return matching file names for pattern, useful for search suggestions """
+ qry = 'SELECT name FROM files WHERE name LIKE ?'
+ args = ["%%%s%%" % pattern.strip("%")]
+ if owner:
+ qry += " AND owner=?"
+ args.append(owner)
+
+ self.c.execute(qry, args)
+ return [r[0] for r in self.c]
+
+ @queue
+ def getAllPackages(self, root=None, owner=None, tags=None):
+ """ Return dict with package information
+
+ :param root: optional root to filter
+ :param owner: optional user id
+ :param tags: optional tag list
+ """
+ qry = (
+ 'SELECT pid, name, folder, root, owner, site, comment, password, added, tags, status, shared, packageorder '
+ 'FROM packages%s ORDER BY root, packageorder')
+
+ if root is None:
+ stats = self.getPackageStats(owner=owner)
+ if owner is None:
+ self.c.execute(qry % "")
+ else:
+ self.c.execute(qry % " WHERE owner=?", (owner,))
+ else:
+ stats = self.getPackageStats(root=root, owner=owner)
+ if owner is None:
+ self.c.execute(qry % ' WHERE root=? OR pid=?', (root, root))
+ else:
+ self.c.execute(qry % ' WHERE (root=? OR pid=?) AND owner=?', (root, root, owner))
+
+ data = OrderedDict()
+ for r in self.c:
+ data[r[0]] = PackageInfo(
+ r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8], r[9].split(","), r[10], r[11], r[12],
+ stats.get(r[0], zero_stats)
+ )
+
+ return data
+
+ @inner
+ def getPackageStats(self, pid=None, root=None, owner=None):
+ qry = ("SELECT p.pid, SUM(f.size) AS sizetotal, COUNT(f.fid) AS linkstotal, sizedone, linksdone "
+ "FROM packages p JOIN files f ON p.pid = f.package AND f.dlstatus > 0 %(sub)s LEFT OUTER JOIN "
+ "(SELECT p.pid AS pid, SUM(f.size) AS sizedone, COUNT(f.fid) AS linksdone "
+ "FROM packages p JOIN files f ON p.pid = f.package %(sub)s AND f.dlstatus in (5,6) GROUP BY p.pid) s ON s.pid = p.pid "
+ "GROUP BY p.pid")
+
+ # status in (finished, skipped, processing)
+
+ if root is not None:
+ self.c.execute(qry % {"sub": "AND (p.root=:root OR p.pid=:root)"}, locals())
+ elif pid is not None:
+ self.c.execute(qry % {"sub": "AND p.pid=:pid"}, locals())
+ elif owner is not None:
+ self.c.execute(qry % {"sub": "AND p.owner=:owner"}, locals())
+ else:
+ self.c.execute(qry % {"sub": ""})
+
+ data = {}
+ for r in self.c:
+ data[r[0]] = PackageStats(
+ r[2] if r[2] else 0,
+ r[4] if r[4] else 0,
+ int(r[1]) if r[1] else 0,
+ int(r[3]) if r[3] else 0,
+ )
+
+ return data
+
+ @queue
+ def getStatsForPackage(self, pid):
+ return self.getPackageStats(pid=pid)[pid]
+
+ @queue
+ def getFileInfo(self, fid, force=False):
+ """get data for specific file, when force is true download info will be appended"""
+ self.c.execute('SELECT fid, name, owner, size, status, media, added, fileorder, '
+ 'url, plugin, hash, dlstatus, error, package FROM files '
+ 'WHERE fid=?', (fid,))
+ r = self.c.fetchone()
+ if not r:
+ return None
+ else:
+ f = FileInfo(r[0], r[1], r[13], r[2], r[3], r[4], r[5], r[6], r[7])
+ if r[11] > 0 or force:
+ f.download = DownloadInfo(r[8], r[9], r[10], r[11], self.manager.statusMsg[r[11]], r[12])
+
+ return f
+
+ @queue
+ def getPackageInfo(self, pid, stats=True):
+ """get data for a specific package, optionally with package stats"""
+ if stats:
+ stats = self.getPackageStats(pid=pid)
+
+ self.c.execute(
+ 'SELECT pid, name, folder, root, owner, site, comment, password, added, tags, status, shared, packageorder '
+ 'FROM packages WHERE pid=?', (pid,))
+
+ r = self.c.fetchone()
+ if not r:
+ return None
+ else:
+ return PackageInfo(
+ r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8], r[9].split(","), r[10], r[11], r[12],
+ stats.get(r[0], zero_stats) if stats else None
+ )
+
+ # TODO: does this need owner?
+ @async
+ def updateLinkInfo(self, data):
+ """ data is list of tuples (name, size, status,[ hash,] url)"""
+ # status in (NA, Offline, Online, Queued, TempOffline)
+ if data and len(data[0]) == 4:
+ self.c.executemany('UPDATE files SET name=?, size=?, dlstatus=? WHERE url=? AND dlstatus IN (0,1,2,3,11)',
+ data)
+ else:
+ self.c.executemany(
+ 'UPDATE files SET name=?, size=?, dlstatus=?, hash=? WHERE url=? AND dlstatus IN (0,1,2,3,11)', data)
+
+ @async
+ def updateFile(self, f):
+ self.c.execute('UPDATE files SET name=?, size=?, status=?,'
+ 'media=?, url=?, hash=?, dlstatus=?, error=? WHERE fid=?',
+ (f.name, f.size, f.filestatus, f.media, f.url,
+ f.hash, f.status, f.error, f.fid))
+
+ @async
+ def updatePackage(self, p):
+ self.c.execute(
+ 'UPDATE packages SET name=?, folder=?, site=?, comment=?, password=?, tags=?, status=?, shared=? WHERE pid=?',
+ (p.name, p.folder, p.site, p.comment, p.password, ",".join(p.tags), p.status, p.shared, p.pid))
+
+ # TODO: most modifying methods needs owner argument to avoid checking beforehand
+ @async
+ def orderPackage(self, pid, root, oldorder, order):
+ if oldorder > order: # package moved upwards
+ self.c.execute(
+ 'UPDATE packages SET packageorder=packageorder+1 WHERE packageorder >= ? AND packageorder < ? AND root=? AND packageorder >= 0'
+ , (order, oldorder, root))
+ elif oldorder < order: # moved downwards
+ self.c.execute(
+ 'UPDATE packages SET packageorder=packageorder-1 WHERE packageorder <= ? AND packageorder > ? AND root=? AND packageorder >= 0'
+ , (order, oldorder, root))
+
+ self.c.execute('UPDATE packages SET packageorder=? WHERE pid=?', (order, pid))
+
+ @async
+ def orderFiles(self, pid, fids, oldorder, order):
+ diff = len(fids)
+ data = []
+
+ if oldorder > order: # moved upwards
+ self.c.execute('UPDATE files SET fileorder=fileorder+? WHERE fileorder >= ? AND fileorder < ? AND package=?'
+ , (diff, order, oldorder, pid))
+ data = [(order + i, fid) for i, fid in enumerate(fids)]
+ elif oldorder < order:
+ self.c.execute(
+ 'UPDATE files SET fileorder=fileorder-? WHERE fileorder <= ? AND fileorder >= ? AND package=?'
+ , (diff, order, oldorder + diff, pid))
+ data = [(order - diff + i + 1, fid) for i, fid in enumerate(fids)]
+
+ self.c.executemany('UPDATE files SET fileorder=? WHERE fid=?', data)
+
+ @async
+ def moveFiles(self, pid, fids, package):
+ self.c.execute('SELECT max(fileorder) FROM files WHERE package=?', (package,))
+ r = self.c.fetchone()
+ order = (r[0] if r[0] else 0) + 1
+
+ self.c.execute('UPDATE files SET fileorder=fileorder-? WHERE fileorder > ? AND package=?',
+ (len(fids), order, pid))
+
+ data = [(package, order + i, fid) for i, fid in enumerate(fids)]
+ self.c.executemany('UPDATE files SET package=?, fileorder=? WHERE fid=?', data)
+
+ @async
+ def movePackage(self, root, order, pid, dpid):
+ self.c.execute('SELECT max(packageorder) FROM packages WHERE root=?', (dpid,))
+ r = self.c.fetchone()
+ max = (r[0] if r[0] else 0) + 1
+
+ self.c.execute('UPDATE packages SET packageorder=packageorder-1 WHERE packageorder > ? AND root=?',
+ (order, root))
+
+ self.c.execute('UPDATE packages SET root=?, packageorder=? WHERE pid=?', (dpid, max, pid))
+
+ @async
+ def restartFile(self, fid):
+ # status -> queued
+ self.c.execute('UPDATE files SET dlstatus=3, error="" WHERE fid=?', (fid,))
+
+ @async
+ def restartPackage(self, pid):
+ # status -> queued
+ self.c.execute('UPDATE files SET status=3 WHERE package=?', (pid,))
+
+
+ # TODO: multi user approach
+ @queue
+ def getJob(self, occ):
+ """return pyfile ids, which are suitable for download and don't use a occupied plugin"""
+ cmd = "(%s)" % ", ".join(["'%s'" % x for x in occ])
+ #TODO
+
+ # dlstatus in online, queued | package status = ok
+ cmd = ("SELECT f.fid FROM files as f INNER JOIN packages as p ON f.package=p.pid "
+ "WHERE f.plugin NOT IN %s AND f.dlstatus IN (2,3) AND p.status=0 "
+ "ORDER BY p.packageorder ASC, f.fileorder ASC LIMIT 5") % cmd
+
+ self.c.execute(cmd)
+
+ return [x[0] for x in self.c]
+
+ @queue
+ def getUnfinished(self, pid):
+ """return list of max length 3 ids with pyfiles in package not finished or processed"""
+
+ # status in finished, skipped, processing
+ self.c.execute("SELECT fid FROM files WHERE package=? AND dlstatus NOT IN (5, 6, 14) LIMIT 3", (pid,))
+ return [r[0] for r in self.c]
+
+ @queue
+ def restartFailed(self, owner):
+ # status=queued, where status in failed, aborted, temp offline
+ self.c.execute("UPDATE files SET dlstatus=3, error='' WHERE dlstatus IN (7, 11, 12)")
+
+ @queue
+ def findDuplicates(self, id, folder, filename):
+ """ checks if filename exists with different id and same package, dlstatus = finished """
+ # TODO: also check root of package
+ self.c.execute(
+ "SELECT f.plugin FROM files f INNER JOIN packages as p ON f.package=p.pid AND p.folder=? WHERE f.fid!=? AND f.dlstatus=5 AND f.name=?"
+ , (folder, id, filename))
+ return self.c.fetchone()
+
+ @queue
+ def purgeLinks(self):
+ # fstatus = missing
+ self.c.execute("DELETE FROM files WHERE status == 1")
+
+ @queue
+ def purgeAll(self): # only used for debugging
+ self.c.execute("DELETE FROM packages")
+ self.c.execute("DELETE FROM files")
+ self.c.execute("DELETE FROM collector")
+
+
+FileMethods.register() \ No newline at end of file
diff --git a/pyload/database/StatisticDatabase.py b/pyload/database/StatisticDatabase.py
new file mode 100644
index 000000000..d5f9658f2
--- /dev/null
+++ b/pyload/database/StatisticDatabase.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from pyload.database import DatabaseMethods, queue, async, inner
+
+# TODO
+
+class StatisticMethods(DatabaseMethods):
+ pass
+
+
+
+StatisticMethods.register() \ No newline at end of file
diff --git a/pyload/database/StorageDatabase.py b/pyload/database/StorageDatabase.py
new file mode 100644
index 000000000..2d4c8a9c7
--- /dev/null
+++ b/pyload/database/StorageDatabase.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from pyload.database import DatabaseBackend, queue
+
+class StorageMethods():
+ @queue
+ def setStorage(db, identifier, key, value):
+ db.c.execute("SELECT id FROM storage WHERE identifier=? AND key=?", (identifier, key))
+ if db.c.fetchone() is not None:
+ db.c.execute("UPDATE storage SET value=? WHERE identifier=? AND key=?", (value, identifier, key))
+ else:
+ db.c.execute("INSERT INTO storage (identifier, key, value) VALUES (?, ?, ?)", (identifier, key, value))
+
+ @queue
+ def getStorage(db, identifier, key=None):
+ if key is not None:
+ db.c.execute("SELECT value FROM storage WHERE identifier=? AND key=?", (identifier, key))
+ row = db.c.fetchone()
+ if row is not None:
+ return row[0]
+ else:
+ db.c.execute("SELECT key, value FROM storage WHERE identifier=?", (identifier, ))
+ d = {}
+ for row in db.c:
+ d[row[0]] = row[1]
+ return d
+
+ @queue
+ def delStorage(db, identifier, key):
+ db.c.execute("DELETE FROM storage WHERE identifier=? AND key=?", (identifier, key))
+
+DatabaseBackend.registerSub(StorageMethods)
diff --git a/pyload/database/UserDatabase.py b/pyload/database/UserDatabase.py
new file mode 100644
index 000000000..8d8381a40
--- /dev/null
+++ b/pyload/database/UserDatabase.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from hashlib import sha1
+from string import letters, digits
+from random import choice
+
+alphnum = letters + digits
+
+from pyload.Api import UserData
+
+from DatabaseBackend import DatabaseMethods, queue, async
+
+
+def random_salt():
+ return "".join(choice(alphnum) for x in range(0, 5))
+
+
+class UserMethods(DatabaseMethods):
+ @queue
+ def addUser(self, user, password):
+ salt = random_salt()
+ h = sha1(salt + password)
+ password = salt + h.hexdigest()
+
+ self.c.execute('SELECT name FROM users WHERE name=?', (user, ))
+ if self.c.fetchone() is not None:
+ self.c.execute('UPDATE users SET password=? WHERE name=?', (password, user))
+ else:
+ self.c.execute('INSERT INTO users (name, password) VALUES (?, ?)', (user, password))
+
+ @queue
+ def getUserData(self, name=None, uid=None):
+ qry = ('SELECT uid, name, email, role, permission, folder, traffic, dllimit, dlquota, '
+ 'hddquota, user, template FROM "users" WHERE ')
+
+ if name is not None:
+ self.c.execute(qry + "name=?", (name,))
+ r = self.c.fetchone()
+ if r:
+ return UserData(*r)
+
+ elif uid is not None:
+ self.c.execute(qry + "uid=?", (uid,))
+ r = self.c.fetchone()
+ if r:
+ return UserData(*r)
+
+ return None
+
+ @queue
+ def getAllUserData(self):
+ self.c.execute('SELECT uid, name, email, role, permission, folder, traffic, dllimit, dlquota, '
+ 'hddquota, user, template FROM "users"')
+ user = {}
+ for r in self.c:
+ user[r[0]] = UserData(*r)
+
+ return user
+
+
+ @queue
+ def checkAuth(self, user, password):
+ self.c.execute('SELECT uid, name, email, role, permission, folder, traffic, dllimit, dlquota, '
+ 'hddquota, user, template, password FROM "users" WHERE name=?', (user, ))
+ r = self.c.fetchone()
+ if not r:
+ return None
+ salt = r[-1][:5]
+ pw = r[-1][5:]
+ h = sha1(salt + password)
+ if h.hexdigest() == pw:
+ return UserData(*r[:-1])
+ else:
+ return None
+
+ @queue #TODO
+ def changePassword(self, user, oldpw, newpw):
+ self.c.execute('SELECT rowid, name, password FROM users WHERE name=?', (user, ))
+ r = self.c.fetchone()
+ if not r:
+ return False
+
+ salt = r[2][:5]
+ pw = r[2][5:]
+ h = sha1(salt + oldpw)
+ if h.hexdigest() == pw:
+ salt = random_salt()
+ h = sha1(salt + newpw)
+ password = salt + h.hexdigest()
+
+ self.c.execute("UPDATE users SET password=? WHERE name=?", (password, user))
+ return True
+
+ return False
+
+ @async
+ def setPermission(self, user, perms):
+ self.c.execute("UPDATE users SET permission=? WHERE name=?", (perms, user))
+
+ @async
+ def setRole(self, user, role):
+ self.c.execute("UPDATE users SET role=? WHERE name=?", (role, user))
+
+ # TODO update methods
+ @async
+ def removeUserByName(self, name):
+ self.c.execute("SELECT uid from users WHERE name=?", (name,))
+ uid = self.c.fetchone()
+ if uid:
+ # deletes user and all associated accounts
+ self.c.execute('DELETE FROM users WHERE user=?', (uid[0], ))
+
+
+UserMethods.register()
diff --git a/pyload/database/__init__.py b/pyload/database/__init__.py
new file mode 100644
index 000000000..d3f97fb53
--- /dev/null
+++ b/pyload/database/__init__.py
@@ -0,0 +1,8 @@
+from DatabaseBackend import DatabaseMethods, DatabaseBackend, queue, async, inner
+
+from FileDatabase import FileMethods
+from UserDatabase import UserMethods
+from StorageDatabase import StorageMethods
+from AccountDatabase import AccountMethods
+from ConfigDatabase import ConfigMethods
+from StatisticDatabase import StatisticMethods \ No newline at end of file
diff --git a/pyload/datatypes/PyFile.py b/pyload/datatypes/PyFile.py
new file mode 100644
index 000000000..7bb3a4e31
--- /dev/null
+++ b/pyload/datatypes/PyFile.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from time import sleep, time
+from ReadWriteLock import ReadWriteLock
+
+from pyload.Api import ProgressInfo, DownloadProgress, FileInfo, DownloadInfo, DownloadStatus
+from pyload.utils import lock, read_lock
+
+statusMap = {
+ "none": 0,
+ "offline": 1,
+ "online": 2,
+ "queued": 3,
+ "paused": 4,
+ "finished": 5,
+ "skipped": 6,
+ "failed": 7,
+ "starting": 8,
+ "waiting": 9,
+ "downloading": 10,
+ "temp. offline": 11,
+ "aborted": 12,
+ "decrypting": 13,
+ "processing": 14,
+ "custom": 15,
+ "unknown": 16,
+}
+
+
+class PyFile(object):
+ """
+ Represents a file object at runtime
+ """
+ __slots__ = ("m", "fid", "_name", "_size", "filestatus", "media", "added", "fileorder",
+ "url", "pluginname", "hash", "status", "error", "packageid", "ownerid",
+ "lock", "plugin", "waitUntil", "abort", "statusname",
+ "reconnected", "pluginclass")
+
+ @staticmethod
+ def fromInfoData(m, info):
+ f = PyFile(m, info.fid, info.name, info.size, info.status, info.media, info.added, info.fileorder,
+ "", "", "", DownloadStatus.NA, "", info.package, info.owner)
+ if info.download:
+ f.url = info.download.url
+ f.pluginname = info.download.plugin
+ f.hash = info.download.hash
+ f.status = info.download.status
+ f.error = info.download.error
+
+ return f
+
+ def __init__(self, manager, fid, name, size, filestatus, media, added, fileorder,
+ url, pluginname, hash, status, error, package, owner):
+
+ self.m = manager
+
+ self.fid = int(fid)
+ self._name = name
+ self._size = size
+ self.filestatus = filestatus
+ self.media = media
+ self.added = added
+ self.fileorder = fileorder
+ self.url = url
+ self.pluginname = pluginname
+ self.hash = hash
+ self.status = status
+ self.error = error
+ self.ownerid = owner
+ self.packageid = package
+ # database information ends here
+
+ self.lock = ReadWriteLock()
+
+ self.plugin = None
+
+ self.waitUntil = 0 # time() + time to wait
+
+ # status attributes
+ self.abort = False
+ self.reconnected = False
+ self.statusname = None
+
+
+ @property
+ def id(self):
+ self.m.core.log.debug("Deprecated attr .id, use .fid instead")
+ return self.fid
+
+ def setSize(self, value):
+ self._size = int(value)
+
+ # will convert all sizes to ints
+ size = property(lambda self: self._size, setSize)
+
+ def getName(self):
+ try:
+ if self.plugin.req.name:
+ return self.plugin.req.name
+ else:
+ return self._name
+ except:
+ return self._name
+
+ def setName(self, name):
+ """ Only set unicode or utf8 strings as name """
+ if type(name) == str:
+ name = name.decode("utf8")
+
+ self._name = name
+
+ name = property(getName, setName)
+
+ def __repr__(self):
+ return "<PyFile %s: %s@%s>" % (self.id, self.name, self.pluginname)
+
+ @lock
+ def initPlugin(self):
+ """ inits plugin instance """
+ if not self.plugin:
+ self.pluginclass = self.m.core.pluginManager.getPlugin(self.pluginname)
+ self.plugin = self.pluginclass(self)
+
+ @read_lock
+ def hasPlugin(self):
+ """Thread safe way to determine this file has initialized plugin attribute"""
+ return hasattr(self, "plugin") and self.plugin
+
+ def package(self):
+ """ return package instance"""
+ return self.m.getPackage(self.packageid)
+
+ def setStatus(self, status):
+ self.status = statusMap[status]
+ # needs to sync so status is written to database
+ self.sync()
+
+ def setCustomStatus(self, msg, status="processing"):
+ self.statusname = msg
+ self.setStatus(status)
+
+ def getStatusName(self):
+ if self.status not in (13, 14) or not self.statusname:
+ return self.m.statusMsg[self.status]
+ else:
+ return self.statusname
+
+ def hasStatus(self, status):
+ return statusMap[status] == self.status
+
+ def sync(self):
+ """sync PyFile instance with database"""
+ self.m.updateFile(self)
+
+ @lock
+ def release(self):
+ """sync and remove from cache"""
+ if hasattr(self, "plugin") and self.plugin:
+ self.plugin.clean()
+ del self.plugin
+
+ self.m.releaseFile(self.fid)
+
+
+ def toInfoData(self):
+ return FileInfo(self.fid, self.getName(), self.packageid, self.ownerid, self.getSize(), self.filestatus,
+ self.media, self.added, self.fileorder, DownloadInfo(
+ self.url, self.pluginname, self.hash, self.status, self.getStatusName(), self.error
+ )
+ )
+
+ def getPath(self):
+ pass
+
+ def move(self, pid):
+ pass
+
+ @read_lock
+ def abortDownload(self):
+ """abort pyfile if possible"""
+ # TODO: abort timeout, currently dead locks
+ while self.id in self.m.core.threadManager.processingIds():
+ self.abort = True
+ if self.plugin and self.plugin.req:
+ self.plugin.req.abortDownloads()
+ sleep(0.1)
+
+ self.abort = False
+ if self.plugin:
+ self.plugin.req.abort()
+ if self.plugin.dl:
+ self.plugin.dl.abort()
+
+ self.release()
+
+ def finishIfDone(self):
+ """set status to finish and release file if every thread is finished with it"""
+
+ if self.id in self.m.core.threadManager.processingIds():
+ return False
+
+ self.setStatus("finished")
+ self.release()
+ self.m.checkAllLinksFinished()
+ return True
+
+ def checkIfProcessed(self):
+ self.m.checkAllLinksProcessed(self.id)
+
+ def getSpeed(self):
+ """ calculates speed """
+ try:
+ return self.plugin.dl.speed
+ except:
+ return 0
+
+ def getETA(self):
+ """ gets established time of arrival / or waiting time"""
+ try:
+ if self.status == DownloadStatus.Waiting:
+ return self.waitUntil - time()
+
+ return self.getBytesLeft() / self.getSpeed()
+ except:
+ return 0
+
+ def getBytesArrived(self):
+ """ gets bytes arrived """
+ try:
+ return self.plugin.dl.arrived
+ except:
+ return 0
+
+ def getBytesLeft(self):
+ """ gets bytes left """
+ try:
+ return self.plugin.dl.size - self.plugin.dl.arrived
+ except:
+ return 0
+
+ def getSize(self):
+ """ get size of download """
+ try:
+ if self.plugin.dl.size:
+ return self.plugin.dl.size
+ else:
+ return self.size
+ except:
+ return self.size
+
+ def getProgressInfo(self):
+ return ProgressInfo(self.pluginname, self.name, self.getStatusName(), self.getETA(),
+ self.getBytesArrived(), self.getSize(),
+ DownloadProgress(self.fid, self.packageid, self.getSpeed(), self.status))
diff --git a/pyload/datatypes/PyPackage.py b/pyload/datatypes/PyPackage.py
new file mode 100644
index 000000000..d23ae88e7
--- /dev/null
+++ b/pyload/datatypes/PyPackage.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from time import time
+
+from pyload.Api import PackageInfo, PackageStatus
+from pyload.utils.fs import join
+
+class PyPackage:
+ """
+ Represents a package object at runtime
+ """
+
+ @staticmethod
+ def fromInfoData(m, info):
+ return PyPackage(m, info.pid, info.name, info.folder, info.root, info.owner,
+ info.site, info.comment, info.password, info.added, info.tags, info.status, info.shared, info.packageorder)
+
+ def __init__(self, manager, pid, name, folder, root, owner, site, comment, password, added, tags, status,
+ shared, packageorder):
+ self.m = manager
+
+ self.pid = pid
+ self.name = name
+ self.folder = folder
+ self.root = root
+ self.ownerid = owner
+ self.site = site
+ self.comment = comment
+ self.password = password
+ self.added = added
+ self.tags = tags
+ self.status = status
+ self.shared = shared
+ self.packageorder = packageorder
+ self.timestamp = time()
+
+ #: Finish event already fired
+ self.setFinished = False
+
+ @property
+ def id(self):
+ self.m.core.log.debug("Deprecated package attr .id, use .pid instead")
+ return self.pid
+
+ def isStale(self):
+ return self.timestamp + 30 * 60 > time()
+
+ def toInfoData(self):
+ return PackageInfo(self.pid, self.name, self.folder, self.root, self.ownerid, self.site,
+ self.comment, self.password, self.added, self.tags, self.status, self.shared, self.packageorder
+ )
+
+ def getChildren(self):
+ """get information about contained links"""
+ return self.m.getPackageData(self.pid)["links"]
+
+ def getPath(self, name=""):
+ self.timestamp = time()
+ return join(self.m.getPackage(self.root).getPath(), self.folder, name)
+
+ def sync(self):
+ """sync with db"""
+ self.m.updatePackage(self)
+
+ def release(self):
+ """sync and delete from cache"""
+ self.sync()
+ self.m.releasePackage(self.id)
+
+ def delete(self):
+ self.m.deletePackage(self.id)
+
+ def deleteIfEmpty(self):
+ """ True if deleted """
+ if not len(self.getChildren()):
+ self.delete()
+ return True
+ return False
+
+ def notifyChange(self):
+ self.m.core.eventManager.dispatchEvent("packageUpdated", self.id)
+
+
+class RootPackage(PyPackage):
+ def __init__(self, m, owner):
+ PyPackage.__init__(self, m, -1, "root", "", owner, -2, "", "", "", 0, [], PackageStatus.Ok, False, 0)
+
+ def getPath(self, name=""):
+ return join(self.m.core.config["general"]["download_folder"], name)
+
+ # no database operations
+ def sync(self):
+ pass
+
+ def delete(self):
+ pass
+
+ def release(self):
+ pass \ No newline at end of file
diff --git a/pyload/datatypes/User.py b/pyload/datatypes/User.py
new file mode 100644
index 000000000..31c9a55cc
--- /dev/null
+++ b/pyload/datatypes/User.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+
+from pyload.Api import UserData, Permission, Role
+from pyload.utils import bits_set
+
+#TODO: activate user
+#noinspection PyUnresolvedReferences
+class User(UserData):
+
+ @staticmethod
+ def fromUserData(api, user):
+ return User(api, user.uid, user.name, user.email, user.role, user.permission, user.folder,
+ user.traffic, user.dllimit, user.dlquota, user.hddquota, user.user, user.templateName)
+
+ def __init__(self, api, *args, **kwargs):
+ UserData.__init__(self, *args, **kwargs)
+ self.api = api
+
+
+ def toUserData(self):
+ # TODO
+ return UserData()
+
+ def hasPermission(self, perms):
+ """ Accepts permission bit or name """
+ if isinstance(perms, basestring) and hasattr(Permission, perms):
+ perms = getattr(Permission, perms)
+
+ return bits_set(perms, self.permission)
+
+ def hasRole(self, role):
+ if isinstance(role, basestring) and hasattr(Role, role):
+ role = getattr(Role, role)
+
+ return self.role == role
+
+ def isAdmin(self):
+ return self.hasRole(Role.Admin)
+
+ @property
+ def primary(self):
+ """ Primary user id, Internal user handle used for most operations
+ Secondary user account share id with primary user. Only Admins have no primary id. """
+ if self.hasRole(Role.Admin):
+ return None
+ return self.user if self.user else self.uid \ No newline at end of file
diff --git a/pyload/datatypes/__init__.py b/pyload/datatypes/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/datatypes/__init__.py
diff --git a/pyload/interaction/EventManager.py b/pyload/interaction/EventManager.py
new file mode 100644
index 000000000..329961d93
--- /dev/null
+++ b/pyload/interaction/EventManager.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+from threading import Lock
+from traceback import print_exc
+
+
+class EventManager:
+ """
+ Handles all event-related tasks, also stores an event queue for clients, so they can retrieve them later.
+
+ **Known Events:**
+ Most addon methods exist as events. These are some additional known events.
+
+ ===================== ================ ===========================================================
+ Name Arguments Description
+ ===================== ================ ===========================================================
+ event eventName, *args Called for every event, with eventName and original args
+ download:preparing fid A download was just queued and will be prepared now.
+ download:start fid A plugin will immediately start the download afterwards.
+ download:allProcessed All links were handled, pyLoad would idle afterwards.
+ download:allFinished All downloads in the queue are finished.
+ config:changed sec, opt, value The config was changed.
+ ===================== ================ ===========================================================
+
+ | Notes:
+ | download:allProcessed is *always* called before download:allFinished.
+ """
+
+ def __init__(self, core):
+ self.core = core
+ self.log = core.log
+
+ self.events = {"event": []}
+
+ self.lock = Lock()
+
+ def listenTo(self, event, func):
+ """Adds an event listener for event name"""
+ if event in self.events:
+ if func in self.events[event]:
+ self.log.debug("Function already registered %s" % func)
+ else:
+ self.events[event].append(func)
+ else:
+ self.events[event] = [func]
+
+ def removeEvent(self, event, func):
+ """removes previously added event listener"""
+ if event in self.events:
+ self.events[event].remove(func)
+
+ def removeFromEvents(self, func):
+ """ Removes func from all known events """
+ for name, events in self.events.iteritems():
+ if func in events:
+ events.remove(func)
+
+ def dispatchEvent(self, event, *args):
+ """dispatches event with args"""
+ for f in self.events["event"]:
+ try:
+ f(event, *args)
+ except Exception, e:
+ self.log.warning("Error calling event handler %s: %s, %s, %s"
+ % ("event", f, args, str(e)))
+ if self.core.debug:
+ print_exc()
+
+ if event in self.events:
+ for f in self.events[event]:
+ try:
+ f(*args)
+ except Exception, e:
+ self.log.warning("Error calling event handler %s: %s, %s, %s"
+ % (event, f, args, str(e)))
+ if self.core.debug:
+ print_exc() \ No newline at end of file
diff --git a/pyload/interaction/InteractionManager.py b/pyload/interaction/InteractionManager.py
new file mode 100644
index 000000000..36d457323
--- /dev/null
+++ b/pyload/interaction/InteractionManager.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+from threading import Lock
+from time import time
+from base64 import standard_b64encode
+
+from new_collections import OrderedDict
+
+from pyload.utils import lock, bits_set
+from pyload.Api import Interaction as IA
+from pyload.Api import InputType, Input
+
+from InteractionTask import InteractionTask
+
+class InteractionManager:
+ """
+ Class that gives ability to interact with the user.
+ Arbitrary tasks with predefined output and input types can be set off.
+ """
+
+ # number of seconds a client is classified as active
+ CLIENT_THRESHOLD = 60
+ NOTIFICATION_TIMEOUT = 60 * 60 * 30
+ MAX_NOTIFICATIONS = 50
+
+ def __init__(self, core):
+ self.lock = Lock()
+ self.core = core
+ self.tasks = OrderedDict() #task store, for all outgoing tasks
+
+ self.last_clients = {}
+ self.ids = 0 #uniue interaction ids
+
+ def isClientConnected(self, user):
+ return self.last_clients.get(user, 0) + self.CLIENT_THRESHOLD > time()
+
+ @lock
+ def work(self):
+ # old notifications will be removed
+ for n in [k for k, v in self.tasks.iteritems() if v.timedOut()]:
+ del self.tasks[n]
+
+ # keep notifications count limited
+ n = [k for k,v in self.tasks.iteritems() if v.type == IA.Notification]
+ n.reverse()
+ for v in n[:self.MAX_NOTIFICATIONS]:
+ del self.tasks[v]
+
+ @lock
+ def createNotification(self, title, content, desc="", plugin="", owner=None):
+ """ Creates and queues a new Notification
+
+ :param title: short title
+ :param content: text content
+ :param desc: short form of the notification
+ :param plugin: plugin name
+ :return: :class:`InteractionTask`
+ """
+ task = InteractionTask(self.ids, IA.Notification, Input(InputType.Text, None, content), title, desc, plugin,
+ owner=owner)
+ self.ids += 1
+ self.queueTask(task)
+ return task
+
+ @lock
+ def createQueryTask(self, input, desc, plugin="", owner=None):
+ # input type was given, create a input widget
+ if type(input) == int:
+ input = Input(input)
+ if not isinstance(input, Input):
+ raise TypeError("'Input' class expected not '%s'" % type(input))
+
+ task = InteractionTask(self.ids, IA.Query, input, _("Query"), desc, plugin, owner=owner)
+ self.ids += 1
+ self.queueTask(task)
+ return task
+
+ @lock
+ def createCaptchaTask(self, img, format, filename, plugin="", type=InputType.Text, owner=None):
+ """ Createss a new captcha task.
+
+ :param img: image content (not base encoded)
+ :param format: img format
+ :param type: :class:`InputType`
+ :return:
+ """
+ if type == 'textual':
+ type = InputType.Text
+ elif type == 'positional':
+ type = InputType.Click
+
+ input = Input(type, data=[standard_b64encode(img), format, filename])
+
+ #todo: title desc plugin
+ task = InteractionTask(self.ids, IA.Captcha, input,
+ _("Captcha request"), _("Please solve the captcha."), plugin, owner=owner)
+
+ self.ids += 1
+ self.queueTask(task)
+ return task
+
+ @lock
+ def removeTask(self, task):
+ if task.iid in self.tasks:
+ del self.tasks[task.iid]
+ self.core.evm.dispatchEvent("interaction:deleted", task.iid)
+
+ @lock
+ def getTaskByID(self, iid):
+ return self.tasks.get(iid, None)
+
+ @lock
+ def getTasks(self, user, mode=IA.All):
+ # update last active clients
+ self.last_clients[user] = time()
+
+ # filter current mode
+ tasks = [t for t in self.tasks.itervalues() if mode == IA.All or bits_set(t.type, mode)]
+ # filter correct user / or shared
+ tasks = [t for t in tasks if user is None or user == t.owner or t.shared]
+
+ return tasks
+
+ def isTaskWaiting(self, user, mode=IA.All):
+ tasks = [t for t in self.getTasks(user, mode) if not t.type == IA.Notification or not t.seen]
+ return len(tasks) > 0
+
+ def queueTask(self, task):
+ cli = self.isClientConnected(task.owner)
+
+ # set waiting times based on threshold
+ if cli:
+ task.setWaiting(self.CLIENT_THRESHOLD)
+ else: # TODO: higher threshold after client connects?
+ task.setWaiting(self.CLIENT_THRESHOLD / 3)
+
+ if task.type == IA.Notification:
+ task.setWaiting(self.NOTIFICATION_TIMEOUT) # notifications are valid for 30h
+
+ for plugin in self.core.addonManager.activePlugins():
+ try:
+ plugin.newInteractionTask(task)
+ except:
+ self.core.print_exc()
+
+ self.tasks[task.iid] = task
+ self.core.evm.dispatchEvent("interaction:added", task)
+
+
+if __name__ == "__main__":
+ it = InteractionTask() \ No newline at end of file
diff --git a/pyload/interaction/InteractionTask.py b/pyload/interaction/InteractionTask.py
new file mode 100644
index 000000000..b404aa6ce
--- /dev/null
+++ b/pyload/interaction/InteractionTask.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from time import time
+
+from pyload.Api import InteractionTask as BaseInteractionTask
+from pyload.Api import Interaction, InputType, Input
+
+#noinspection PyUnresolvedReferences
+class InteractionTask(BaseInteractionTask):
+ """
+ General Interaction Task extends ITask defined by api with additional fields and methods.
+ """
+ #: Plugins can put needed data here
+ storage = None
+ #: Timestamp when task expires
+ wait_until = 0
+ #: The received result
+ result = None
+ #: List of registered handles
+ handler = None
+ #: Error Message
+ error = None
+ #: Timeout locked
+ locked = False
+ #: A task that was retrieved counts as seen
+ seen = False
+ #: A task that is relevant to every user
+ shared = False
+ #: primary uid of the owner
+ owner = None
+
+ def __init__(self, *args, **kwargs):
+ if 'owner' in kwargs:
+ self.owner = kwargs['owner']
+ del kwargs['owner']
+ if 'shared' in kwargs:
+ self.shared = kwargs['shared']
+ del kwargs['shared']
+
+ BaseInteractionTask.__init__(self, *args, **kwargs)
+
+ # additional internal attributes
+ self.storage = {}
+ self.handler = []
+ self.wait_until = 0
+
+ def convertResult(self, value):
+ #TODO: convert based on input/output
+ return value
+
+ def getResult(self):
+ return self.result
+
+ def setShared(self):
+ """ enable shared mode, should not be reversed"""
+ self.shared = True
+
+ def setResult(self, value):
+ self.result = self.convertResult(value)
+
+ def setWaiting(self, sec, lock=False):
+ """ sets waiting in seconds from now, < 0 can be used as infinitive """
+ if not self.locked:
+ if sec < 0:
+ self.wait_until = -1
+ else:
+ self.wait_until = max(time() + sec, self.wait_until)
+
+ if lock: self.locked = True
+
+ def isWaiting(self):
+ if self.result or self.error or self.timedOut():
+ return False
+
+ return True
+
+ def timedOut(self):
+ return time() > self.wait_until > -1
+
+ def correct(self):
+ [x.taskCorrect(self) for x in self.handler]
+
+ def invalid(self):
+ [x.taskInvalid(self) for x in self.handler] \ No newline at end of file
diff --git a/pyload/interaction/__init__.py b/pyload/interaction/__init__.py
new file mode 100644
index 000000000..de6d13128
--- /dev/null
+++ b/pyload/interaction/__init__.py
@@ -0,0 +1,2 @@
+__author__ = 'christian'
+ \ No newline at end of file
diff --git a/pyload/lib/Getch.py b/pyload/lib/Getch.py
new file mode 100644
index 000000000..22b7ea7f8
--- /dev/null
+++ b/pyload/lib/Getch.py
@@ -0,0 +1,76 @@
+class Getch:
+ """
+ Gets a single character from standard input. Does not echo to
+ the screen.
+ """
+
+ def __init__(self):
+ try:
+ self.impl = _GetchWindows()
+ except ImportError:
+ try:
+ self.impl = _GetchMacCarbon()
+ except(AttributeError, ImportError):
+ self.impl = _GetchUnix()
+
+ def __call__(self): return self.impl()
+
+
+class _GetchUnix:
+ def __init__(self):
+ import tty
+ import sys
+
+ def __call__(self):
+ import sys
+ import tty
+ import termios
+
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+
+class _GetchWindows:
+ def __init__(self):
+ import msvcrt
+
+ def __call__(self):
+ import msvcrt
+
+ return msvcrt.getch()
+
+class _GetchMacCarbon:
+ """
+ A function which returns the current ASCII key that is down;
+ if no ASCII key is down, the null string is returned. The
+ page http://www.mactech.com/macintosh-c/chap02-1.html was
+ very helpful in figuring out how to do this.
+ """
+
+ def __init__(self):
+ import Carbon
+ Carbon.Evt #see if it has this (in Unix, it doesn't)
+
+ def __call__(self):
+ import Carbon
+
+ if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
+ return ''
+ else:
+ #
+ # The event contains the following info:
+ # (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
+ #
+ # The message (msg) contains the ASCII char which is
+ # extracted with the 0x000000FF charCodeMask; this
+ # number is converted to an ASCII character with chr() and
+ # returned
+ #
+ (what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
+ return chr(msg) \ No newline at end of file
diff --git a/pyload/lib/ReadWriteLock.py b/pyload/lib/ReadWriteLock.py
new file mode 100644
index 000000000..cc82f3d48
--- /dev/null
+++ b/pyload/lib/ReadWriteLock.py
@@ -0,0 +1,232 @@
+# -*- coding: iso-8859-15 -*-
+"""locks.py - Read-Write lock thread lock implementation
+
+See the class documentation for more info.
+
+Copyright (C) 2007, Heiko Wundram.
+Released under the BSD-license.
+
+http://code.activestate.com/recipes/502283-read-write-lock-class-rlock-like/
+"""
+
+# Imports
+# -------
+
+from threading import Condition, Lock, currentThread
+from time import time
+
+
+# Read write lock
+# ---------------
+
+class ReadWriteLock(object):
+ """Read-Write lock class. A read-write lock differs from a standard
+ threading.RLock() by allowing multiple threads to simultaneously hold a
+ read lock, while allowing only a single thread to hold a write lock at the
+ same point of time.
+
+ When a read lock is requested while a write lock is held, the reader
+ is blocked; when a write lock is requested while another write lock is
+ held or there are read locks, the writer is blocked.
+
+ Writers are always preferred by this implementation: if there are blocked
+ threads waiting for a write lock, current readers may request more read
+ locks (which they eventually should free, as they starve the waiting
+ writers otherwise), but a new thread requesting a read lock will not
+ be granted one, and block. This might mean starvation for readers if
+ two writer threads interweave their calls to acquireWrite() without
+ leaving a window only for readers.
+
+ In case a current reader requests a write lock, this can and will be
+ satisfied without giving up the read locks first, but, only one thread
+ may perform this kind of lock upgrade, as a deadlock would otherwise
+ occur. After the write lock has been granted, the thread will hold a
+ full write lock, and not be downgraded after the upgrading call to
+ acquireWrite() has been match by a corresponding release().
+ """
+
+ def __init__(self):
+ """Initialize this read-write lock."""
+
+ # Condition variable, used to signal waiters of a change in object
+ # state.
+ self.__condition = Condition(Lock())
+
+ # Initialize with no writers.
+ self.__writer = None
+ self.__upgradewritercount = 0
+ self.__pendingwriters = []
+
+ # Initialize with no readers.
+ self.__readers = {}
+
+ def acquire(self, blocking=True, timeout=None, shared=False):
+ if shared:
+ self.acquireRead(timeout)
+ else:
+ self.acquireWrite(timeout)
+
+ def acquireRead(self, timeout=None):
+ """Acquire a read lock for the current thread, waiting at most
+ timeout seconds or doing a non-blocking check in case timeout is <= 0.
+
+ In case timeout is None, the call to acquireRead blocks until the
+ lock request can be serviced.
+
+ In case the timeout expires before the lock could be serviced, a
+ RuntimeError is thrown."""
+
+ if timeout is not None:
+ endtime = time() + timeout
+ me = currentThread()
+ self.__condition.acquire()
+ try:
+ if self.__writer is me:
+ # If we are the writer, grant a new read lock, always.
+ self.__writercount += 1
+ return
+ while True:
+ if self.__writer is None:
+ # Only test anything if there is no current writer.
+ if self.__upgradewritercount or self.__pendingwriters:
+ if me in self.__readers:
+ # Only grant a read lock if we already have one
+ # in case writers are waiting for their turn.
+ # This means that writers can't easily get starved
+ # (but see below, readers can).
+ self.__readers[me] += 1
+ return
+ # No, we aren't a reader (yet), wait for our turn.
+ else:
+ # Grant a new read lock, always, in case there are
+ # no pending writers (and no writer).
+ self.__readers[me] = self.__readers.get(me, 0) + 1
+ return
+ if timeout is not None:
+ remaining = endtime - time()
+ if remaining <= 0:
+ # Timeout has expired, signal caller of this.
+ raise RuntimeError("Acquiring read lock timed out")
+ self.__condition.wait(remaining)
+ else:
+ self.__condition.wait()
+ finally:
+ self.__condition.release()
+
+ def acquireWrite(self, timeout=None):
+ """Acquire a write lock for the current thread, waiting at most
+ timeout seconds or doing a non-blocking check in case timeout is <= 0.
+
+ In case the write lock cannot be serviced due to the deadlock
+ condition mentioned above, a ValueError is raised.
+
+ In case timeout is None, the call to acquireWrite blocks until the
+ lock request can be serviced.
+
+ In case the timeout expires before the lock could be serviced, a
+ RuntimeError is thrown."""
+
+ if timeout is not None:
+ endtime = time() + timeout
+ me, upgradewriter = currentThread(), False
+ self.__condition.acquire()
+ try:
+ if self.__writer is me:
+ # If we are the writer, grant a new write lock, always.
+ self.__writercount += 1
+ return
+ elif me in self.__readers:
+ # If we are a reader, no need to add us to pendingwriters,
+ # we get the upgradewriter slot.
+ if self.__upgradewritercount:
+ # If we are a reader and want to upgrade, and someone
+ # else also wants to upgrade, there is no way we can do
+ # this except if one of us releases all his read locks.
+ # Signal this to user.
+ raise ValueError(
+ "Inevitable dead lock, denying write lock"
+ )
+ upgradewriter = True
+ self.__upgradewritercount = self.__readers.pop(me)
+ else:
+ # We aren't a reader, so add us to the pending writers queue
+ # for synchronization with the readers.
+ self.__pendingwriters.append(me)
+ while True:
+ if not self.__readers and self.__writer is None:
+ # Only test anything if there are no readers and writers.
+ if self.__upgradewritercount:
+ if upgradewriter:
+ # There is a writer to upgrade, and it's us. Take
+ # the write lock.
+ self.__writer = me
+ self.__writercount = self.__upgradewritercount + 1
+ self.__upgradewritercount = 0
+ return
+ # There is a writer to upgrade, but it's not us.
+ # Always leave the upgrade writer the advance slot,
+ # because he presumes he'll get a write lock directly
+ # from a previously held read lock.
+ elif self.__pendingwriters[0] is me:
+ # If there are no readers and writers, it's always
+ # fine for us to take the writer slot, removing us
+ # from the pending writers queue.
+ # This might mean starvation for readers, though.
+ self.__writer = me
+ self.__writercount = 1
+ self.__pendingwriters = self.__pendingwriters[1:]
+ return
+ if timeout is not None:
+ remaining = endtime - time()
+ if remaining <= 0:
+ # Timeout has expired, signal caller of this.
+ if upgradewriter:
+ # Put us back on the reader queue. No need to
+ # signal anyone of this change, because no other
+ # writer could've taken our spot before we got
+ # here (because of remaining readers), as the test
+ # for proper conditions is at the start of the
+ # loop, not at the end.
+ self.__readers[me] = self.__upgradewritercount
+ self.__upgradewritercount = 0
+ else:
+ # We were a simple pending writer, just remove us
+ # from the FIFO list.
+ self.__pendingwriters.remove(me)
+ raise RuntimeError("Acquiring write lock timed out")
+ self.__condition.wait(remaining)
+ else:
+ self.__condition.wait()
+ finally:
+ self.__condition.release()
+
+ def release(self):
+ """Release the currently held lock.
+
+ In case the current thread holds no lock, a ValueError is thrown."""
+
+ me = currentThread()
+ self.__condition.acquire()
+ try:
+ if self.__writer is me:
+ # We are the writer, take one nesting depth away.
+ self.__writercount -= 1
+ if not self.__writercount:
+ # No more write locks; take our writer position away and
+ # notify waiters of the new circumstances.
+ self.__writer = None
+ self.__condition.notifyAll()
+ elif me in self.__readers:
+ # We are a reader currently, take one nesting depth away.
+ self.__readers[me] -= 1
+ if not self.__readers[me]:
+ # No more read locks, take our reader position away.
+ del self.__readers[me]
+ if not self.__readers:
+ # No more readers, notify waiters of the new
+ # circumstances.
+ self.__condition.notifyAll()
+ else:
+ raise ValueError("Trying to release unheld lock")
+ finally:
+ self.__condition.release()
diff --git a/pyload/lib/SafeEval.py b/pyload/lib/SafeEval.py
new file mode 100644
index 000000000..8fc57f261
--- /dev/null
+++ b/pyload/lib/SafeEval.py
@@ -0,0 +1,47 @@
+## {{{ http://code.activestate.com/recipes/286134/ (r3) (modified)
+import dis
+
+_const_codes = map(dis.opmap.__getitem__, [
+ 'POP_TOP','ROT_TWO','ROT_THREE','ROT_FOUR','DUP_TOP',
+ 'BUILD_LIST','BUILD_MAP','BUILD_TUPLE',
+ 'LOAD_CONST','RETURN_VALUE','STORE_SUBSCR'
+ ])
+
+
+_load_names = ['False', 'True', 'null', 'true', 'false']
+
+_locals = {'null': None, 'true': True, 'false': False}
+
+def _get_opcodes(codeobj):
+ i = 0
+ opcodes = []
+ s = codeobj.co_code
+ names = codeobj.co_names
+ while i < len(s):
+ code = ord(s[i])
+ opcodes.append(code)
+ if code >= dis.HAVE_ARGUMENT:
+ i += 3
+ else:
+ i += 1
+ return opcodes, names
+
+def test_expr(expr, allowed_codes):
+ try:
+ c = compile(expr, "", "eval")
+ except:
+ raise ValueError, "%s is not a valid expression" % expr
+ codes, names = _get_opcodes(c)
+ for code in codes:
+ if code not in allowed_codes:
+ for n in names:
+ if n not in _load_names:
+ raise ValueError, "opcode %s not allowed" % dis.opname[code]
+ return c
+
+
+def const_eval(expr):
+ c = test_expr(expr, _const_codes)
+ return eval(c, None, _locals)
+
+## end of http://code.activestate.com/recipes/286134/ }}}
diff --git a/pyload/lib/__init__.py b/pyload/lib/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/lib/__init__.py
diff --git a/pyload/lib/beaker/__init__.py b/pyload/lib/beaker/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/pyload/lib/beaker/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/pyload/lib/beaker/cache.py b/pyload/lib/beaker/cache.py
new file mode 100644
index 000000000..4a96537ff
--- /dev/null
+++ b/pyload/lib/beaker/cache.py
@@ -0,0 +1,459 @@
+"""Cache object
+
+The Cache object is used to manage a set of cache files and their
+associated backend. The backends can be rotated on the fly by
+specifying an alternate type when used.
+
+Advanced users can add new backends in beaker.backends
+
+"""
+
+import warnings
+
+import beaker.container as container
+import beaker.util as util
+from beaker.exceptions import BeakerException, InvalidCacheBackendError
+
+import beaker.ext.memcached as memcached
+import beaker.ext.database as database
+import beaker.ext.sqla as sqla
+import beaker.ext.google as google
+
+# Initialize the basic available backends
+clsmap = {
+ 'memory':container.MemoryNamespaceManager,
+ 'dbm':container.DBMNamespaceManager,
+ 'file':container.FileNamespaceManager,
+ 'ext:memcached':memcached.MemcachedNamespaceManager,
+ 'ext:database':database.DatabaseNamespaceManager,
+ 'ext:sqla': sqla.SqlaNamespaceManager,
+ 'ext:google': google.GoogleNamespaceManager,
+ }
+
+# Initialize the cache region dict
+cache_regions = {}
+cache_managers = {}
+
+try:
+ import pkg_resources
+
+ # Load up the additional entry point defined backends
+ for entry_point in pkg_resources.iter_entry_points('beaker.backends'):
+ try:
+ NamespaceManager = entry_point.load()
+ name = entry_point.name
+ if name in clsmap:
+ raise BeakerException("NamespaceManager name conflict,'%s' "
+ "already loaded" % name)
+ clsmap[name] = NamespaceManager
+ except (InvalidCacheBackendError, SyntaxError):
+ # Ignore invalid backends
+ pass
+ except:
+ import sys
+ from pkg_resources import DistributionNotFound
+ # Warn when there's a problem loading a NamespaceManager
+ if not isinstance(sys.exc_info()[1], DistributionNotFound):
+ import traceback
+ from StringIO import StringIO
+ tb = StringIO()
+ traceback.print_exc(file=tb)
+ warnings.warn("Unable to load NamespaceManager entry point: '%s': "
+ "%s" % (entry_point, tb.getvalue()), RuntimeWarning,
+ 2)
+except ImportError:
+ pass
+
+
+
+
+def cache_region(region, *deco_args):
+ """Decorate a function to cache itself using a cache region
+
+ The region decorator requires arguments if there are more than
+ 2 of the same named function, in the same module. This is
+ because the namespace used for the functions cache is based on
+ the functions name and the module.
+
+
+ Example::
+
+ # Add cache region settings to beaker:
+ beaker.cache.cache_regions.update(dict_of_config_region_options))
+
+ @cache_region('short_term', 'some_data')
+ def populate_things(search_term, limit, offset):
+ return load_the_data(search_term, limit, offset)
+
+ return load('rabbits', 20, 0)
+
+ .. note::
+
+ The function being decorated must only be called with
+ positional arguments.
+
+ """
+ cache = [None]
+
+ def decorate(func):
+ namespace = util.func_namespace(func)
+ def cached(*args):
+ reg = cache_regions[region]
+ if not reg.get('enabled', True):
+ return func(*args)
+
+ if not cache[0]:
+ if region not in cache_regions:
+ raise BeakerException('Cache region not configured: %s' % region)
+ cache[0] = Cache._get_cache(namespace, reg)
+
+ cache_key = " ".join(map(str, deco_args + args))
+ def go():
+ return func(*args)
+
+ return cache[0].get_value(cache_key, createfunc=go)
+ cached._arg_namespace = namespace
+ cached._arg_region = region
+ return cached
+ return decorate
+
+
+def region_invalidate(namespace, region, *args):
+ """Invalidate a cache region namespace or decorated function
+
+ This function only invalidates cache spaces created with the
+ cache_region decorator.
+
+ :param namespace: Either the namespace of the result to invalidate, or the
+ cached function reference
+
+ :param region: The region the function was cached to. If the function was
+ cached to a single region then this argument can be None
+
+ :param args: Arguments that were used to differentiate the cached
+ function as well as the arguments passed to the decorated
+ function
+
+ Example::
+
+ # Add cache region settings to beaker:
+ beaker.cache.cache_regions.update(dict_of_config_region_options))
+
+ def populate_things(invalidate=False):
+
+ @cache_region('short_term', 'some_data')
+ def load(search_term, limit, offset):
+ return load_the_data(search_term, limit, offset)
+
+ # If the results should be invalidated first
+ if invalidate:
+ region_invalidate(load, None, 'some_data',
+ 'rabbits', 20, 0)
+ return load('rabbits', 20, 0)
+
+ """
+ if callable(namespace):
+ if not region:
+ region = namespace._arg_region
+ namespace = namespace._arg_namespace
+
+ if not region:
+ raise BeakerException("Region or callable function "
+ "namespace is required")
+ else:
+ region = cache_regions[region]
+
+ cache = Cache._get_cache(namespace, region)
+ cache_key = " ".join(str(x) for x in args)
+ cache.remove_value(cache_key)
+
+
+class Cache(object):
+ """Front-end to the containment API implementing a data cache.
+
+ :param namespace: the namespace of this Cache
+
+ :param type: type of cache to use
+
+ :param expire: seconds to keep cached data
+
+ :param expiretime: seconds to keep cached data (legacy support)
+
+ :param starttime: time when cache was cache was
+
+ """
+ def __init__(self, namespace, type='memory', expiretime=None,
+ starttime=None, expire=None, **nsargs):
+ try:
+ cls = clsmap[type]
+ if isinstance(cls, InvalidCacheBackendError):
+ raise cls
+ except KeyError:
+ raise TypeError("Unknown cache implementation %r" % type)
+
+ self.namespace = cls(namespace, **nsargs)
+ self.expiretime = expiretime or expire
+ self.starttime = starttime
+ self.nsargs = nsargs
+
+ @classmethod
+ def _get_cache(cls, namespace, kw):
+ key = namespace + str(kw)
+ try:
+ return cache_managers[key]
+ except KeyError:
+ cache_managers[key] = cache = cls(namespace, **kw)
+ return cache
+
+ def put(self, key, value, **kw):
+ self._get_value(key, **kw).set_value(value)
+ set_value = put
+
+ def get(self, key, **kw):
+ """Retrieve a cached value from the container"""
+ return self._get_value(key, **kw).get_value()
+ get_value = get
+
+ def remove_value(self, key, **kw):
+ mycontainer = self._get_value(key, **kw)
+ if mycontainer.has_current_value():
+ mycontainer.clear_value()
+ remove = remove_value
+
+ def _get_value(self, key, **kw):
+ if isinstance(key, unicode):
+ key = key.encode('ascii', 'backslashreplace')
+
+ if 'type' in kw:
+ return self._legacy_get_value(key, **kw)
+
+ kw.setdefault('expiretime', self.expiretime)
+ kw.setdefault('starttime', self.starttime)
+
+ return container.Value(key, self.namespace, **kw)
+
+ @util.deprecated("Specifying a "
+ "'type' and other namespace configuration with cache.get()/put()/etc. "
+ "is deprecated. Specify 'type' and other namespace configuration to "
+ "cache_manager.get_cache() and/or the Cache constructor instead.")
+ def _legacy_get_value(self, key, type, **kw):
+ expiretime = kw.pop('expiretime', self.expiretime)
+ starttime = kw.pop('starttime', None)
+ createfunc = kw.pop('createfunc', None)
+ kwargs = self.nsargs.copy()
+ kwargs.update(kw)
+ c = Cache(self.namespace.namespace, type=type, **kwargs)
+ return c._get_value(key, expiretime=expiretime, createfunc=createfunc,
+ starttime=starttime)
+
+ def clear(self):
+ """Clear all the values from the namespace"""
+ self.namespace.remove()
+
+ # dict interface
+ def __getitem__(self, key):
+ return self.get(key)
+
+ def __contains__(self, key):
+ return self._get_value(key).has_current_value()
+
+ def has_key(self, key):
+ return key in self
+
+ def __delitem__(self, key):
+ self.remove_value(key)
+
+ def __setitem__(self, key, value):
+ self.put(key, value)
+
+
+class CacheManager(object):
+ def __init__(self, **kwargs):
+ """Initialize a CacheManager object with a set of options
+
+ Options should be parsed with the
+ :func:`~beaker.util.parse_cache_config_options` function to
+ ensure only valid options are used.
+
+ """
+ self.kwargs = kwargs
+ self.regions = kwargs.pop('cache_regions', {})
+
+ # Add these regions to the module global
+ cache_regions.update(self.regions)
+
+ def get_cache(self, name, **kwargs):
+ kw = self.kwargs.copy()
+ kw.update(kwargs)
+ return Cache._get_cache(name, kw)
+
+ def get_cache_region(self, name, region):
+ if region not in self.regions:
+ raise BeakerException('Cache region not configured: %s' % region)
+ kw = self.regions[region]
+ return Cache._get_cache(name, kw)
+
+ def region(self, region, *args):
+ """Decorate a function to cache itself using a cache region
+
+ The region decorator requires arguments if there are more than
+ 2 of the same named function, in the same module. This is
+ because the namespace used for the functions cache is based on
+ the functions name and the module.
+
+
+ Example::
+
+ # Assuming a cache object is available like:
+ cache = CacheManager(dict_of_config_options)
+
+
+ def populate_things():
+
+ @cache.region('short_term', 'some_data')
+ def load(search_term, limit, offset):
+ return load_the_data(search_term, limit, offset)
+
+ return load('rabbits', 20, 0)
+
+ .. note::
+
+ The function being decorated must only be called with
+ positional arguments.
+
+ """
+ return cache_region(region, *args)
+
+ def region_invalidate(self, namespace, region, *args):
+ """Invalidate a cache region namespace or decorated function
+
+ This function only invalidates cache spaces created with the
+ cache_region decorator.
+
+ :param namespace: Either the namespace of the result to invalidate, or the
+ name of the cached function
+
+ :param region: The region the function was cached to. If the function was
+ cached to a single region then this argument can be None
+
+ :param args: Arguments that were used to differentiate the cached
+ function as well as the arguments passed to the decorated
+ function
+
+ Example::
+
+ # Assuming a cache object is available like:
+ cache = CacheManager(dict_of_config_options)
+
+ def populate_things(invalidate=False):
+
+ @cache.region('short_term', 'some_data')
+ def load(search_term, limit, offset):
+ return load_the_data(search_term, limit, offset)
+
+ # If the results should be invalidated first
+ if invalidate:
+ cache.region_invalidate(load, None, 'some_data',
+ 'rabbits', 20, 0)
+ return load('rabbits', 20, 0)
+
+
+ """
+ return region_invalidate(namespace, region, *args)
+ if callable(namespace):
+ if not region:
+ region = namespace._arg_region
+ namespace = namespace._arg_namespace
+
+ if not region:
+ raise BeakerException("Region or callable function "
+ "namespace is required")
+ else:
+ region = self.regions[region]
+
+ cache = self.get_cache(namespace, **region)
+ cache_key = " ".join(str(x) for x in args)
+ cache.remove_value(cache_key)
+
+ def cache(self, *args, **kwargs):
+ """Decorate a function to cache itself with supplied parameters
+
+ :param args: Used to make the key unique for this function, as in region()
+ above.
+
+ :param kwargs: Parameters to be passed to get_cache(), will override defaults
+
+ Example::
+
+ # Assuming a cache object is available like:
+ cache = CacheManager(dict_of_config_options)
+
+
+ def populate_things():
+
+ @cache.cache('mycache', expire=15)
+ def load(search_term, limit, offset):
+ return load_the_data(search_term, limit, offset)
+
+ return load('rabbits', 20, 0)
+
+ .. note::
+
+ The function being decorated must only be called with
+ positional arguments.
+
+ """
+ cache = [None]
+ key = " ".join(str(x) for x in args)
+
+ def decorate(func):
+ namespace = util.func_namespace(func)
+ def cached(*args):
+ if not cache[0]:
+ cache[0] = self.get_cache(namespace, **kwargs)
+ cache_key = key + " " + " ".join(str(x) for x in args)
+ def go():
+ return func(*args)
+ return cache[0].get_value(cache_key, createfunc=go)
+ cached._arg_namespace = namespace
+ return cached
+ return decorate
+
+ def invalidate(self, func, *args, **kwargs):
+ """Invalidate a cache decorated function
+
+ This function only invalidates cache spaces created with the
+ cache decorator.
+
+ :param func: Decorated function to invalidate
+
+ :param args: Used to make the key unique for this function, as in region()
+ above.
+
+ :param kwargs: Parameters that were passed for use by get_cache(), note that
+ this is only required if a ``type`` was specified for the
+ function
+
+ Example::
+
+ # Assuming a cache object is available like:
+ cache = CacheManager(dict_of_config_options)
+
+
+ def populate_things(invalidate=False):
+
+ @cache.cache('mycache', type="file", expire=15)
+ def load(search_term, limit, offset):
+ return load_the_data(search_term, limit, offset)
+
+ # If the results should be invalidated first
+ if invalidate:
+ cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file")
+ return load('rabbits', 20, 0)
+
+ """
+ namespace = func._arg_namespace
+
+ cache = self.get_cache(namespace, **kwargs)
+ cache_key = " ".join(str(x) for x in args)
+ cache.remove_value(cache_key)
diff --git a/pyload/lib/beaker/container.py b/pyload/lib/beaker/container.py
new file mode 100644
index 000000000..515e97af6
--- /dev/null
+++ b/pyload/lib/beaker/container.py
@@ -0,0 +1,633 @@
+"""Container and Namespace classes"""
+import anydbm
+import cPickle
+import logging
+import os
+import time
+
+import beaker.util as util
+from beaker.exceptions import CreationAbortedError, MissingCacheParameter
+from beaker.synchronization import _threading, file_synchronizer, \
+ mutex_synchronizer, NameLock, null_synchronizer
+
+__all__ = ['Value', 'Container', 'ContainerContext',
+ 'MemoryContainer', 'DBMContainer', 'NamespaceManager',
+ 'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer',
+ 'OpenResourceNamespaceManager',
+ 'FileNamespaceManager', 'CreationAbortedError']
+
+
+logger = logging.getLogger('beaker.container')
+if logger.isEnabledFor(logging.DEBUG):
+ debug = logger.debug
+else:
+ def debug(message, *args):
+ pass
+
+
+class NamespaceManager(object):
+ """Handles dictionary operations and locking for a namespace of
+ values.
+
+ The implementation for setting and retrieving the namespace data is
+ handled by subclasses.
+
+ NamespaceManager may be used alone, or may be privately accessed by
+ one or more Container objects. Container objects provide per-key
+ services like expiration times and automatic recreation of values.
+
+ Multiple NamespaceManagers created with a particular name will all
+ share access to the same underlying datasource and will attempt to
+ synchronize against a common mutex object. The scope of this
+ sharing may be within a single process or across multiple
+ processes, depending on the type of NamespaceManager used.
+
+ The NamespaceManager itself is generally threadsafe, except in the
+ case of the DBMNamespaceManager in conjunction with the gdbm dbm
+ implementation.
+
+ """
+
+ @classmethod
+ def _init_dependencies(cls):
+ pass
+
+ def __init__(self, namespace):
+ self._init_dependencies()
+ self.namespace = namespace
+
+ def get_creation_lock(self, key):
+ raise NotImplementedError()
+
+ def do_remove(self):
+ raise NotImplementedError()
+
+ def acquire_read_lock(self):
+ pass
+
+ def release_read_lock(self):
+ pass
+
+ def acquire_write_lock(self, wait=True):
+ return True
+
+ def release_write_lock(self):
+ pass
+
+ def has_key(self, key):
+ return self.__contains__(key)
+
+ def __getitem__(self, key):
+ raise NotImplementedError()
+
+ def __setitem__(self, key, value):
+ raise NotImplementedError()
+
+ def set_value(self, key, value, expiretime=None):
+ """Optional set_value() method called by Value.
+
+ Allows an expiretime to be passed, for namespace
+ implementations which can prune their collections
+ using expiretime.
+
+ """
+ self[key] = value
+
+ def __contains__(self, key):
+ raise NotImplementedError()
+
+ def __delitem__(self, key):
+ raise NotImplementedError()
+
+ def keys(self):
+ raise NotImplementedError()
+
+ def remove(self):
+ self.do_remove()
+
+
+class OpenResourceNamespaceManager(NamespaceManager):
+ """A NamespaceManager where read/write operations require opening/
+ closing of a resource which is possibly mutexed.
+
+ """
+ def __init__(self, namespace):
+ NamespaceManager.__init__(self, namespace)
+ self.access_lock = self.get_access_lock()
+ self.openers = 0
+ self.mutex = _threading.Lock()
+
+ def get_access_lock(self):
+ raise NotImplementedError()
+
+ def do_open(self, flags):
+ raise NotImplementedError()
+
+ def do_close(self):
+ raise NotImplementedError()
+
+ def acquire_read_lock(self):
+ self.access_lock.acquire_read_lock()
+ try:
+ self.open('r', checkcount = True)
+ except:
+ self.access_lock.release_read_lock()
+ raise
+
+ def release_read_lock(self):
+ try:
+ self.close(checkcount = True)
+ finally:
+ self.access_lock.release_read_lock()
+
+ def acquire_write_lock(self, wait=True):
+ r = self.access_lock.acquire_write_lock(wait)
+ try:
+ if (wait or r):
+ self.open('c', checkcount = True)
+ return r
+ except:
+ self.access_lock.release_write_lock()
+ raise
+
+ def release_write_lock(self):
+ try:
+ self.close(checkcount=True)
+ finally:
+ self.access_lock.release_write_lock()
+
+ def open(self, flags, checkcount=False):
+ self.mutex.acquire()
+ try:
+ if checkcount:
+ if self.openers == 0:
+ self.do_open(flags)
+ self.openers += 1
+ else:
+ self.do_open(flags)
+ self.openers = 1
+ finally:
+ self.mutex.release()
+
+ def close(self, checkcount=False):
+ self.mutex.acquire()
+ try:
+ if checkcount:
+ self.openers -= 1
+ if self.openers == 0:
+ self.do_close()
+ else:
+ if self.openers > 0:
+ self.do_close()
+ self.openers = 0
+ finally:
+ self.mutex.release()
+
+ def remove(self):
+ self.access_lock.acquire_write_lock()
+ try:
+ self.close(checkcount=False)
+ self.do_remove()
+ finally:
+ self.access_lock.release_write_lock()
+
+class Value(object):
+ __slots__ = 'key', 'createfunc', 'expiretime', 'expire_argument', 'starttime', 'storedtime',\
+ 'namespace'
+
+ def __init__(self, key, namespace, createfunc=None, expiretime=None, starttime=None):
+ self.key = key
+ self.createfunc = createfunc
+ self.expire_argument = expiretime
+ self.starttime = starttime
+ self.storedtime = -1
+ self.namespace = namespace
+
+ def has_value(self):
+ """return true if the container has a value stored.
+
+ This is regardless of it being expired or not.
+
+ """
+ self.namespace.acquire_read_lock()
+ try:
+ return self.namespace.has_key(self.key)
+ finally:
+ self.namespace.release_read_lock()
+
+ def can_have_value(self):
+ return self.has_current_value() or self.createfunc is not None
+
+ def has_current_value(self):
+ self.namespace.acquire_read_lock()
+ try:
+ has_value = self.namespace.has_key(self.key)
+ if has_value:
+ try:
+ stored, expired, value = self._get_value()
+ return not self._is_expired(stored, expired)
+ except KeyError:
+ pass
+ return False
+ finally:
+ self.namespace.release_read_lock()
+
+ def _is_expired(self, storedtime, expiretime):
+ """Return true if this container's value is expired."""
+ return (
+ (
+ self.starttime is not None and
+ storedtime < self.starttime
+ )
+ or
+ (
+ expiretime is not None and
+ time.time() >= expiretime + storedtime
+ )
+ )
+
+ def get_value(self):
+ self.namespace.acquire_read_lock()
+ try:
+ has_value = self.has_value()
+ if has_value:
+ try:
+ stored, expired, value = self._get_value()
+ if not self._is_expired(stored, expired):
+ return value
+ except KeyError:
+ # guard against un-mutexed backends raising KeyError
+ has_value = False
+
+ if not self.createfunc:
+ raise KeyError(self.key)
+ finally:
+ self.namespace.release_read_lock()
+
+ has_createlock = False
+ creation_lock = self.namespace.get_creation_lock(self.key)
+ if has_value:
+ if not creation_lock.acquire(wait=False):
+ debug("get_value returning old value while new one is created")
+ return value
+ else:
+ debug("lock_creatfunc (didnt wait)")
+ has_createlock = True
+
+ if not has_createlock:
+ debug("lock_createfunc (waiting)")
+ creation_lock.acquire()
+ debug("lock_createfunc (waited)")
+
+ try:
+ # see if someone created the value already
+ self.namespace.acquire_read_lock()
+ try:
+ if self.has_value():
+ try:
+ stored, expired, value = self._get_value()
+ if not self._is_expired(stored, expired):
+ return value
+ except KeyError:
+ # guard against un-mutexed backends raising KeyError
+ pass
+ finally:
+ self.namespace.release_read_lock()
+
+ debug("get_value creating new value")
+ v = self.createfunc()
+ self.set_value(v)
+ return v
+ finally:
+ creation_lock.release()
+ debug("released create lock")
+
+ def _get_value(self):
+ value = self.namespace[self.key]
+ try:
+ stored, expired, value = value
+ except ValueError:
+ if not len(value) == 2:
+ raise
+ # Old format: upgrade
+ stored, value = value
+ expired = self.expire_argument
+ debug("get_value upgrading time %r expire time %r", stored, self.expire_argument)
+ self.namespace.release_read_lock()
+ self.set_value(value, stored)
+ self.namespace.acquire_read_lock()
+ except TypeError:
+ # occurs when the value is None. memcached
+ # may yank the rug from under us in which case
+ # that's the result
+ raise KeyError(self.key)
+ return stored, expired, value
+
+ def set_value(self, value, storedtime=None):
+ self.namespace.acquire_write_lock()
+ try:
+ if storedtime is None:
+ storedtime = time.time()
+ debug("set_value stored time %r expire time %r", storedtime, self.expire_argument)
+ self.namespace.set_value(self.key, (storedtime, self.expire_argument, value))
+ finally:
+ self.namespace.release_write_lock()
+
+ def clear_value(self):
+ self.namespace.acquire_write_lock()
+ try:
+ debug("clear_value")
+ if self.namespace.has_key(self.key):
+ try:
+ del self.namespace[self.key]
+ except KeyError:
+ # guard against un-mutexed backends raising KeyError
+ pass
+ self.storedtime = -1
+ finally:
+ self.namespace.release_write_lock()
+
+class AbstractDictionaryNSManager(NamespaceManager):
+ """A subclassable NamespaceManager that places data in a dictionary.
+
+ Subclasses should provide a "dictionary" attribute or descriptor
+ which returns a dict-like object. The dictionary will store keys
+ that are local to the "namespace" attribute of this manager, so
+ ensure that the dictionary will not be used by any other namespace.
+
+ e.g.::
+
+ import collections
+ cached_data = collections.defaultdict(dict)
+
+ class MyDictionaryManager(AbstractDictionaryNSManager):
+ def __init__(self, namespace):
+ AbstractDictionaryNSManager.__init__(self, namespace)
+ self.dictionary = cached_data[self.namespace]
+
+ The above stores data in a global dictionary called "cached_data",
+ which is structured as a dictionary of dictionaries, keyed
+ first on namespace name to a sub-dictionary, then on actual
+ cache key to value.
+
+ """
+
+ def get_creation_lock(self, key):
+ return NameLock(
+ identifier="memorynamespace/funclock/%s/%s" % (self.namespace, key),
+ reentrant=True
+ )
+
+ def __getitem__(self, key):
+ return self.dictionary[key]
+
+ def __contains__(self, key):
+ return self.dictionary.__contains__(key)
+
+ def has_key(self, key):
+ return self.dictionary.__contains__(key)
+
+ def __setitem__(self, key, value):
+ self.dictionary[key] = value
+
+ def __delitem__(self, key):
+ del self.dictionary[key]
+
+ def do_remove(self):
+ self.dictionary.clear()
+
+ def keys(self):
+ return self.dictionary.keys()
+
+class MemoryNamespaceManager(AbstractDictionaryNSManager):
+ namespaces = util.SyncDict()
+
+ def __init__(self, namespace, **kwargs):
+ AbstractDictionaryNSManager.__init__(self, namespace)
+ self.dictionary = MemoryNamespaceManager.namespaces.get(self.namespace,
+ dict)
+
+class DBMNamespaceManager(OpenResourceNamespaceManager):
+ def __init__(self, namespace, dbmmodule=None, data_dir=None,
+ dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs):
+ self.digest_filenames = digest_filenames
+
+ if not dbm_dir and not data_dir:
+ raise MissingCacheParameter("data_dir or dbm_dir is required")
+ elif dbm_dir:
+ self.dbm_dir = dbm_dir
+ else:
+ self.dbm_dir = data_dir + "/container_dbm"
+ util.verify_directory(self.dbm_dir)
+
+ if not lock_dir and not data_dir:
+ raise MissingCacheParameter("data_dir or lock_dir is required")
+ elif lock_dir:
+ self.lock_dir = lock_dir
+ else:
+ self.lock_dir = data_dir + "/container_dbm_lock"
+ util.verify_directory(self.lock_dir)
+
+ self.dbmmodule = dbmmodule or anydbm
+
+ self.dbm = None
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ self.file = util.encoded_path(root= self.dbm_dir,
+ identifiers=[self.namespace],
+ extension='.dbm',
+ digest_filenames=self.digest_filenames)
+
+ debug("data file %s", self.file)
+ self._checkfile()
+
+ def get_access_lock(self):
+ return file_synchronizer(identifier=self.namespace,
+ lock_dir=self.lock_dir)
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier = "dbmcontainer/funclock/%s" % self.namespace,
+ lock_dir=self.lock_dir
+ )
+
+ def file_exists(self, file):
+ if os.access(file, os.F_OK):
+ return True
+ else:
+ for ext in ('db', 'dat', 'pag', 'dir'):
+ if os.access(file + os.extsep + ext, os.F_OK):
+ return True
+
+ return False
+
+ def _checkfile(self):
+ if not self.file_exists(self.file):
+ g = self.dbmmodule.open(self.file, 'c')
+ g.close()
+
+ def get_filenames(self):
+ list = []
+ if os.access(self.file, os.F_OK):
+ list.append(self.file)
+
+ for ext in ('pag', 'dir', 'db', 'dat'):
+ if os.access(self.file + os.extsep + ext, os.F_OK):
+ list.append(self.file + os.extsep + ext)
+ return list
+
+ def do_open(self, flags):
+ debug("opening dbm file %s", self.file)
+ try:
+ self.dbm = self.dbmmodule.open(self.file, flags)
+ except:
+ self._checkfile()
+ self.dbm = self.dbmmodule.open(self.file, flags)
+
+ def do_close(self):
+ if self.dbm is not None:
+ debug("closing dbm file %s", self.file)
+ self.dbm.close()
+
+ def do_remove(self):
+ for f in self.get_filenames():
+ os.remove(f)
+
+ def __getitem__(self, key):
+ return cPickle.loads(self.dbm[key])
+
+ def __contains__(self, key):
+ return self.dbm.has_key(key)
+
+ def __setitem__(self, key, value):
+ self.dbm[key] = cPickle.dumps(value)
+
+ def __delitem__(self, key):
+ del self.dbm[key]
+
+ def keys(self):
+ return self.dbm.keys()
+
+
+class FileNamespaceManager(OpenResourceNamespaceManager):
+ def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None,
+ digest_filenames=True, **kwargs):
+ self.digest_filenames = digest_filenames
+
+ if not file_dir and not data_dir:
+ raise MissingCacheParameter("data_dir or file_dir is required")
+ elif file_dir:
+ self.file_dir = file_dir
+ else:
+ self.file_dir = data_dir + "/container_file"
+ util.verify_directory(self.file_dir)
+
+ if not lock_dir and not data_dir:
+ raise MissingCacheParameter("data_dir or lock_dir is required")
+ elif lock_dir:
+ self.lock_dir = lock_dir
+ else:
+ self.lock_dir = data_dir + "/container_file_lock"
+ util.verify_directory(self.lock_dir)
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ self.file = util.encoded_path(root=self.file_dir,
+ identifiers=[self.namespace],
+ extension='.cache',
+ digest_filenames=self.digest_filenames)
+ self.hash = {}
+
+ debug("data file %s", self.file)
+
+ def get_access_lock(self):
+ return file_synchronizer(identifier=self.namespace,
+ lock_dir=self.lock_dir)
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier = "filecontainer/funclock/%s" % self.namespace,
+ lock_dir = self.lock_dir
+ )
+
+ def file_exists(self, file):
+ return os.access(file, os.F_OK)
+
+ def do_open(self, flags):
+ if self.file_exists(self.file):
+ fh = open(self.file, 'rb')
+ try:
+ self.hash = cPickle.load(fh)
+ except (IOError, OSError, EOFError, cPickle.PickleError, ValueError):
+ pass
+ fh.close()
+
+ self.flags = flags
+
+ def do_close(self):
+ if self.flags == 'c' or self.flags == 'w':
+ fh = open(self.file, 'wb')
+ cPickle.dump(self.hash, fh)
+ fh.close()
+
+ self.hash = {}
+ self.flags = None
+
+ def do_remove(self):
+ try:
+ os.remove(self.file)
+ except OSError, err:
+ # for instance, because we haven't yet used this cache,
+ # but client code has asked for a clear() operation...
+ pass
+ self.hash = {}
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return self.hash.has_key(key)
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+
+#### legacy stuff to support the old "Container" class interface
+
+namespace_classes = {}
+
+ContainerContext = dict
+
+class ContainerMeta(type):
+ def __init__(cls, classname, bases, dict_):
+ namespace_classes[cls] = cls.namespace_class
+ return type.__init__(cls, classname, bases, dict_)
+ def __call__(self, key, context, namespace, createfunc=None,
+ expiretime=None, starttime=None, **kwargs):
+ if namespace in context:
+ ns = context[namespace]
+ else:
+ nscls = namespace_classes[self]
+ context[namespace] = ns = nscls(namespace, **kwargs)
+ return Value(key, ns, createfunc=createfunc,
+ expiretime=expiretime, starttime=starttime)
+
+class Container(object):
+ __metaclass__ = ContainerMeta
+ namespace_class = NamespaceManager
+
+class FileContainer(Container):
+ namespace_class = FileNamespaceManager
+
+class MemoryContainer(Container):
+ namespace_class = MemoryNamespaceManager
+
+class DBMContainer(Container):
+ namespace_class = DBMNamespaceManager
+
+DbmContainer = DBMContainer
diff --git a/pyload/lib/beaker/converters.py b/pyload/lib/beaker/converters.py
new file mode 100644
index 000000000..f0ad34963
--- /dev/null
+++ b/pyload/lib/beaker/converters.py
@@ -0,0 +1,26 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+def asbool(obj):
+ if isinstance(obj, (str, unicode)):
+ obj = obj.strip().lower()
+ if obj in ['true', 'yes', 'on', 'y', 't', '1']:
+ return True
+ elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
+ return False
+ else:
+ raise ValueError(
+ "String is not true/false: %r" % obj)
+ return bool(obj)
+
+def aslist(obj, sep=None, strip=True):
+ if isinstance(obj, (str, unicode)):
+ lst = obj.split(sep)
+ if strip:
+ lst = [v.strip() for v in lst]
+ return lst
+ elif isinstance(obj, (list, tuple)):
+ return obj
+ elif obj is None:
+ return []
+ else:
+ return [obj]
diff --git a/pyload/lib/beaker/crypto/__init__.py b/pyload/lib/beaker/crypto/__init__.py
new file mode 100644
index 000000000..3e26b0c13
--- /dev/null
+++ b/pyload/lib/beaker/crypto/__init__.py
@@ -0,0 +1,40 @@
+from warnings import warn
+
+from beaker.crypto.pbkdf2 import PBKDF2, strxor
+from beaker.crypto.util import hmac, sha1, hmac_sha1, md5
+from beaker import util
+
+keyLength = None
+
+if util.jython:
+ try:
+ from beaker.crypto.jcecrypto import getKeyLength, aesEncrypt
+ keyLength = getKeyLength()
+ except ImportError:
+ pass
+else:
+ try:
+ from beaker.crypto.pycrypto import getKeyLength, aesEncrypt, aesDecrypt
+ keyLength = getKeyLength()
+ except ImportError:
+ pass
+
+if not keyLength:
+ has_aes = False
+else:
+ has_aes = True
+
+if has_aes and keyLength < 32:
+ warn('Crypto implementation only supports key lengths up to %d bits. '
+ 'Generated session cookies may be incompatible with other '
+ 'environments' % (keyLength * 8))
+
+
+def generateCryptoKeys(master_key, salt, iterations):
+ # NB: We XOR parts of the keystream into the randomly-generated parts, just
+ # in case os.urandom() isn't as random as it should be. Note that if
+ # os.urandom() returns truly random data, this will have no effect on the
+ # overall security.
+ keystream = PBKDF2(master_key, salt, iterations=iterations)
+ cipher_key = keystream.read(keyLength)
+ return cipher_key
diff --git a/pyload/lib/beaker/crypto/jcecrypto.py b/pyload/lib/beaker/crypto/jcecrypto.py
new file mode 100644
index 000000000..4062d513e
--- /dev/null
+++ b/pyload/lib/beaker/crypto/jcecrypto.py
@@ -0,0 +1,30 @@
+"""
+Encryption module that uses the Java Cryptography Extensions (JCE).
+
+Note that in default installations of the Java Runtime Environment, the
+maximum key length is limited to 128 bits due to US export
+restrictions. This makes the generated keys incompatible with the ones
+generated by pycryptopp, which has no such restrictions. To fix this,
+download the "Unlimited Strength Jurisdiction Policy Files" from Sun,
+which will allow encryption using 256 bit AES keys.
+"""
+from javax.crypto import Cipher
+from javax.crypto.spec import SecretKeySpec, IvParameterSpec
+
+import jarray
+
+# Initialization vector filled with zeros
+_iv = IvParameterSpec(jarray.zeros(16, 'b'))
+
+def aesEncrypt(data, key):
+ cipher = Cipher.getInstance('AES/CTR/NoPadding')
+ skeySpec = SecretKeySpec(key, 'AES')
+ cipher.init(Cipher.ENCRYPT_MODE, skeySpec, _iv)
+ return cipher.doFinal(data).tostring()
+
+# magic.
+aesDecrypt = aesEncrypt
+
+def getKeyLength():
+ maxlen = Cipher.getMaxAllowedKeyLength('AES/CTR/NoPadding')
+ return min(maxlen, 256) / 8
diff --git a/pyload/lib/beaker/crypto/pbkdf2.py b/pyload/lib/beaker/crypto/pbkdf2.py
new file mode 100644
index 000000000..96dc5fbb2
--- /dev/null
+++ b/pyload/lib/beaker/crypto/pbkdf2.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: ascii -*-
+###########################################################################
+# PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation
+#
+# Copyright (C) 2007 Dwayne C. Litzenberger <dlitz@dlitz.net>
+# All rights reserved.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation.
+#
+# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Country of origin: Canada
+#
+###########################################################################
+# Sample PBKDF2 usage:
+# from Crypto.Cipher import AES
+# from PBKDF2 import PBKDF2
+# import os
+#
+# salt = os.urandom(8) # 64-bit salt
+# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
+# iv = os.urandom(16) # 128-bit IV
+# cipher = AES.new(key, AES.MODE_CBC, iv)
+# ...
+#
+# Sample crypt() usage:
+# from PBKDF2 import crypt
+# pwhash = crypt("secret")
+# alleged_pw = raw_input("Enter password: ")
+# if pwhash == crypt(alleged_pw, pwhash):
+# print "Password good"
+# else:
+# print "Invalid password"
+#
+###########################################################################
+# History:
+#
+# 2007-07-27 Dwayne C. Litzenberger <dlitz@dlitz.net>
+# - Initial Release (v1.0)
+#
+# 2007-07-31 Dwayne C. Litzenberger <dlitz@dlitz.net>
+# - Bugfix release (v1.1)
+# - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor
+# function in the previous release) silently truncates all keys to 64
+# bytes. The way it was used in the previous release, this would only be
+# problem if the pseudorandom function that returned values larger than
+# 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like
+# anything that silently reduces the security margin from what is
+# expected.
+#
+###########################################################################
+
+__version__ = "1.1"
+
+from struct import pack
+from binascii import b2a_hex
+from random import randint
+
+from base64 import b64encode
+
+from beaker.crypto.util import hmac as HMAC, hmac_sha1 as SHA1
+
+def strxor(a, b):
+ return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
+
+class PBKDF2(object):
+ """PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
+
+ This implementation takes a passphrase and a salt (and optionally an
+ iteration count, a digest module, and a MAC module) and provides a
+ file-like object from which an arbitrarily-sized key can be read.
+
+ If the passphrase and/or salt are unicode objects, they are encoded as
+ UTF-8 before they are processed.
+
+ The idea behind PBKDF2 is to derive a cryptographic key from a
+ passphrase and a salt.
+
+ PBKDF2 may also be used as a strong salted password hash. The
+ 'crypt' function is provided for that purpose.
+
+ Remember: Keys generated using PBKDF2 are only as strong as the
+ passphrases they are derived from.
+ """
+
+ def __init__(self, passphrase, salt, iterations=1000,
+ digestmodule=SHA1, macmodule=HMAC):
+ if not callable(macmodule):
+ macmodule = macmodule.new
+ self.__macmodule = macmodule
+ self.__digestmodule = digestmodule
+ self._setup(passphrase, salt, iterations, self._pseudorandom)
+
+ def _pseudorandom(self, key, msg):
+ """Pseudorandom function. e.g. HMAC-SHA1"""
+ return self.__macmodule(key=key, msg=msg,
+ digestmod=self.__digestmodule).digest()
+
+ def read(self, bytes):
+ """Read the specified number of key bytes."""
+ if self.closed:
+ raise ValueError("file-like object is closed")
+
+ size = len(self.__buf)
+ blocks = [self.__buf]
+ i = self.__blockNum
+ while size < bytes:
+ i += 1
+ if i > 0xffffffff:
+ # We could return "" here, but
+ raise OverflowError("derived key too long")
+ block = self.__f(i)
+ blocks.append(block)
+ size += len(block)
+ buf = "".join(blocks)
+ retval = buf[:bytes]
+ self.__buf = buf[bytes:]
+ self.__blockNum = i
+ return retval
+
+ def __f(self, i):
+ # i must fit within 32 bits
+ assert (1 <= i <= 0xffffffff)
+ U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
+ result = U
+ for j in xrange(2, 1+self.__iterations):
+ U = self.__prf(self.__passphrase, U)
+ result = strxor(result, U)
+ return result
+
+ def hexread(self, octets):
+ """Read the specified number of octets. Return them as hexadecimal.
+
+ Note that len(obj.hexread(n)) == 2*n.
+ """
+ return b2a_hex(self.read(octets))
+
+ def _setup(self, passphrase, salt, iterations, prf):
+ # Sanity checks:
+
+ # passphrase and salt must be str or unicode (in the latter
+ # case, we convert to UTF-8)
+ if isinstance(passphrase, unicode):
+ passphrase = passphrase.encode("UTF-8")
+ if not isinstance(passphrase, str):
+ raise TypeError("passphrase must be str or unicode")
+ if isinstance(salt, unicode):
+ salt = salt.encode("UTF-8")
+ if not isinstance(salt, str):
+ raise TypeError("salt must be str or unicode")
+
+ # iterations must be an integer >= 1
+ if not isinstance(iterations, (int, long)):
+ raise TypeError("iterations must be an integer")
+ if iterations < 1:
+ raise ValueError("iterations must be at least 1")
+
+ # prf must be callable
+ if not callable(prf):
+ raise TypeError("prf must be callable")
+
+ self.__passphrase = passphrase
+ self.__salt = salt
+ self.__iterations = iterations
+ self.__prf = prf
+ self.__blockNum = 0
+ self.__buf = ""
+ self.closed = False
+
+ def close(self):
+ """Close the stream."""
+ if not self.closed:
+ del self.__passphrase
+ del self.__salt
+ del self.__iterations
+ del self.__prf
+ del self.__blockNum
+ del self.__buf
+ self.closed = True
+
+def crypt(word, salt=None, iterations=None):
+ """PBKDF2-based unix crypt(3) replacement.
+
+ The number of iterations specified in the salt overrides the 'iterations'
+ parameter.
+
+ The effective hash length is 192 bits.
+ """
+
+ # Generate a (pseudo-)random salt if the user hasn't provided one.
+ if salt is None:
+ salt = _makesalt()
+
+ # salt must be a string or the us-ascii subset of unicode
+ if isinstance(salt, unicode):
+ salt = salt.encode("us-ascii")
+ if not isinstance(salt, str):
+ raise TypeError("salt must be a string")
+
+ # word must be a string or unicode (in the latter case, we convert to UTF-8)
+ if isinstance(word, unicode):
+ word = word.encode("UTF-8")
+ if not isinstance(word, str):
+ raise TypeError("word must be a string or unicode")
+
+ # Try to extract the real salt and iteration count from the salt
+ if salt.startswith("$p5k2$"):
+ (iterations, salt, dummy) = salt.split("$")[2:5]
+ if iterations == "":
+ iterations = 400
+ else:
+ converted = int(iterations, 16)
+ if iterations != "%x" % converted: # lowercase hex, minimum digits
+ raise ValueError("Invalid salt")
+ iterations = converted
+ if not (iterations >= 1):
+ raise ValueError("Invalid salt")
+
+ # Make sure the salt matches the allowed character set
+ allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
+ for ch in salt:
+ if ch not in allowed:
+ raise ValueError("Illegal character %r in salt" % (ch,))
+
+ if iterations is None or iterations == 400:
+ iterations = 400
+ salt = "$p5k2$$" + salt
+ else:
+ salt = "$p5k2$%x$%s" % (iterations, salt)
+ rawhash = PBKDF2(word, salt, iterations).read(24)
+ return salt + "$" + b64encode(rawhash, "./")
+
+# Add crypt as a static method of the PBKDF2 class
+# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
+# crypt.
+PBKDF2.crypt = staticmethod(crypt)
+
+def _makesalt():
+ """Return a 48-bit pseudorandom salt for crypt().
+
+ This function is not suitable for generating cryptographic secrets.
+ """
+ binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
+ return b64encode(binarysalt, "./")
+
+def test_pbkdf2():
+ """Module self-test"""
+ from binascii import a2b_hex
+
+ #
+ # Test vectors from RFC 3962
+ #
+
+ # Test 1
+ result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16)
+ expected = a2b_hex("cdedb5281bb2f801565a1122b2563515")
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ # Test 2
+ result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32)
+ expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b"
+ "a7e52ddbc5e5142f708a31e2e62b1e13")
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ # Test 3
+ result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32)
+ expected = ("139c30c0966bc32ba55fdbf212530ac9"
+ "c5ec59f1a452f5cc9ad940fea0598ed1")
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ # Test 4
+ result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32)
+ expected = ("9ccad6d468770cd51b10e6a68721be61"
+ "1a8b4d282601db3b36be9246915ec82a")
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ #
+ # Other test vectors
+ #
+
+ # Chunked read
+ f = PBKDF2("kickstart", "workbench", 256)
+ result = f.read(17)
+ result += f.read(17)
+ result += f.read(1)
+ result += f.read(2)
+ result += f.read(3)
+ expected = PBKDF2("kickstart", "workbench", 256).read(40)
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ #
+ # crypt() test vectors
+ #
+
+ # crypt 1
+ result = crypt("cloadm", "exec")
+ expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql'
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ # crypt 2
+ result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....')
+ expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g'
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ # crypt 3
+ result = crypt("dcl", "tUsch7fU", iterations=13)
+ expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL"
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+ # crypt 4 (unicode)
+ result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2',
+ '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ')
+ expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ'
+ if result != expected:
+ raise RuntimeError("self-test failed")
+
+if __name__ == '__main__':
+ test_pbkdf2()
+
+# vim:set ts=4 sw=4 sts=4 expandtab:
diff --git a/pyload/lib/beaker/crypto/pycrypto.py b/pyload/lib/beaker/crypto/pycrypto.py
new file mode 100644
index 000000000..a3eb4d9db
--- /dev/null
+++ b/pyload/lib/beaker/crypto/pycrypto.py
@@ -0,0 +1,31 @@
+"""Encryption module that uses pycryptopp or pycrypto"""
+try:
+ # Pycryptopp is preferred over Crypto because Crypto has had
+ # various periods of not being maintained, and pycryptopp uses
+ # the Crypto++ library which is generally considered the 'gold standard'
+ # of crypto implementations
+ from pycryptopp.cipher import aes
+
+ def aesEncrypt(data, key):
+ cipher = aes.AES(key)
+ return cipher.process(data)
+
+ # magic.
+ aesDecrypt = aesEncrypt
+
+except ImportError:
+ from Crypto.Cipher import AES
+
+ def aesEncrypt(data, key):
+ cipher = AES.new(key)
+
+ data = data + (" " * (16 - (len(data) % 16)))
+ return cipher.encrypt(data)
+
+ def aesDecrypt(data, key):
+ cipher = AES.new(key)
+
+ return cipher.decrypt(data).rstrip()
+
+def getKeyLength():
+ return 32
diff --git a/pyload/lib/beaker/crypto/util.py b/pyload/lib/beaker/crypto/util.py
new file mode 100644
index 000000000..d97e8ce6f
--- /dev/null
+++ b/pyload/lib/beaker/crypto/util.py
@@ -0,0 +1,30 @@
+from warnings import warn
+from beaker import util
+
+
+try:
+ # Use PyCrypto (if available)
+ from Crypto.Hash import HMAC as hmac, SHA as hmac_sha1
+ sha1 = hmac_sha1.new
+
+except ImportError:
+
+ # PyCrypto not available. Use the Python standard library.
+ import hmac
+
+ # When using the stdlib, we have to make sure the hmac version and sha
+ # version are compatible
+ if util.py24:
+ from sha import sha as sha1
+ import sha as hmac_sha1
+ else:
+ # NOTE: We have to use the callable with hashlib (hashlib.sha1),
+ # otherwise hmac only accepts the sha module object itself
+ from hashlib import sha1
+ hmac_sha1 = sha1
+
+
+if util.py24:
+ from md5 import md5
+else:
+ from hashlib import md5
diff --git a/pyload/lib/beaker/exceptions.py b/pyload/lib/beaker/exceptions.py
new file mode 100644
index 000000000..cc0eed286
--- /dev/null
+++ b/pyload/lib/beaker/exceptions.py
@@ -0,0 +1,24 @@
+"""Beaker exception classes"""
+
+class BeakerException(Exception):
+ pass
+
+
+class CreationAbortedError(Exception):
+ """Deprecated."""
+
+
+class InvalidCacheBackendError(BeakerException, ImportError):
+ pass
+
+
+class MissingCacheParameter(BeakerException):
+ pass
+
+
+class LockError(BeakerException):
+ pass
+
+
+class InvalidCryptoBackendError(BeakerException):
+ pass
diff --git a/pyload/lib/beaker/ext/__init__.py b/pyload/lib/beaker/ext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/lib/beaker/ext/__init__.py
diff --git a/pyload/lib/beaker/ext/database.py b/pyload/lib/beaker/ext/database.py
new file mode 100644
index 000000000..701e6f7d2
--- /dev/null
+++ b/pyload/lib/beaker/ext/database.py
@@ -0,0 +1,165 @@
+import cPickle
+import logging
+import pickle
+from datetime import datetime
+
+from beaker.container import OpenResourceNamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
+from beaker.synchronization import file_synchronizer, null_synchronizer
+from beaker.util import verify_directory, SyncDict
+
+log = logging.getLogger(__name__)
+
+sa = None
+pool = None
+types = None
+
+class DatabaseNamespaceManager(OpenResourceNamespaceManager):
+ metadatas = SyncDict()
+ tables = SyncDict()
+
+ @classmethod
+ def _init_dependencies(cls):
+ global sa, pool, types
+ if sa is not None:
+ return
+ try:
+ import sqlalchemy as sa
+ import sqlalchemy.pool as pool
+ from sqlalchemy import types
+ except ImportError:
+ raise InvalidCacheBackendError("Database cache backend requires "
+ "the 'sqlalchemy' library")
+
+ def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
+ table_name='beaker_cache', data_dir=None, lock_dir=None,
+ **params):
+ """Creates a database namespace manager
+
+ ``url``
+ SQLAlchemy compliant db url
+ ``sa_opts``
+ A dictionary of SQLAlchemy keyword options to initialize the engine
+ with.
+ ``optimistic``
+ Use optimistic session locking, note that this will result in an
+ additional select when updating a cache value to compare version
+ numbers.
+ ``table_name``
+ The table name to use in the database for the cache.
+ """
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ if sa_opts is None:
+ sa_opts = params
+
+ if lock_dir:
+ self.lock_dir = lock_dir
+ elif data_dir:
+ self.lock_dir = data_dir + "/container_db_lock"
+ if self.lock_dir:
+ verify_directory(self.lock_dir)
+
+ # Check to see if the table's been created before
+ url = url or sa_opts['sa.url']
+ table_key = url + table_name
+ def make_cache():
+ # Check to see if we have a connection pool open already
+ meta_key = url + table_name
+ def make_meta():
+ # SQLAlchemy pops the url, this ensures it sticks around
+ # later
+ sa_opts['sa.url'] = url
+ engine = sa.engine_from_config(sa_opts, 'sa.')
+ meta = sa.MetaData()
+ meta.bind = engine
+ return meta
+ meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
+ # Create the table object and cache it now
+ cache = sa.Table(table_name, meta,
+ sa.Column('id', types.Integer, primary_key=True),
+ sa.Column('namespace', types.String(255), nullable=False),
+ sa.Column('accessed', types.DateTime, nullable=False),
+ sa.Column('created', types.DateTime, nullable=False),
+ sa.Column('data', types.PickleType, nullable=False),
+ sa.UniqueConstraint('namespace')
+ )
+ cache.create(checkfirst=True)
+ return cache
+ self.hash = {}
+ self._is_new = False
+ self.loaded = False
+ self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
+
+ def get_access_lock(self):
+ return null_synchronizer()
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier ="databasecontainer/funclock/%s" % self.namespace,
+ lock_dir = self.lock_dir)
+
+ def do_open(self, flags):
+ # If we already loaded the data, don't bother loading it again
+ if self.loaded:
+ self.flags = flags
+ return
+
+ cache = self.cache
+ result = sa.select([cache.c.data],
+ cache.c.namespace==self.namespace
+ ).execute().fetchone()
+ if not result:
+ self._is_new = True
+ self.hash = {}
+ else:
+ self._is_new = False
+ try:
+ self.hash = result['data']
+ except (IOError, OSError, EOFError, cPickle.PickleError,
+ pickle.PickleError):
+ log.debug("Couln't load pickle data, creating new storage")
+ self.hash = {}
+ self._is_new = True
+ self.flags = flags
+ self.loaded = True
+
+ def do_close(self):
+ if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
+ cache = self.cache
+ if self._is_new:
+ cache.insert().execute(namespace=self.namespace, data=self.hash,
+ accessed=datetime.now(),
+ created=datetime.now())
+ self._is_new = False
+ else:
+ cache.update(cache.c.namespace==self.namespace).execute(
+ data=self.hash, accessed=datetime.now())
+ self.flags = None
+
+ def do_remove(self):
+ cache = self.cache
+ cache.delete(cache.c.namespace==self.namespace).execute()
+ self.hash = {}
+
+ # We can retain the fact that we did a load attempt, but since the
+ # file is gone this will be a new namespace should it be saved.
+ self._is_new = True
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return self.hash.has_key(key)
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+class DatabaseContainer(Container):
+ namespace_manager = DatabaseNamespaceManager
diff --git a/pyload/lib/beaker/ext/google.py b/pyload/lib/beaker/ext/google.py
new file mode 100644
index 000000000..dd8380d7f
--- /dev/null
+++ b/pyload/lib/beaker/ext/google.py
@@ -0,0 +1,120 @@
+import cPickle
+import logging
+from datetime import datetime
+
+from beaker.container import OpenResourceNamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError
+from beaker.synchronization import null_synchronizer
+
+log = logging.getLogger(__name__)
+
+db = None
+
+class GoogleNamespaceManager(OpenResourceNamespaceManager):
+ tables = {}
+
+ @classmethod
+ def _init_dependencies(cls):
+ global db
+ if db is not None:
+ return
+ try:
+ db = __import__('google.appengine.ext.db').appengine.ext.db
+ except ImportError:
+ raise InvalidCacheBackendError("Datastore cache backend requires the "
+ "'google.appengine.ext' library")
+
+ def __init__(self, namespace, table_name='beaker_cache', **params):
+ """Creates a datastore namespace manager"""
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ def make_cache():
+ table_dict = dict(created=db.DateTimeProperty(),
+ accessed=db.DateTimeProperty(),
+ data=db.BlobProperty())
+ table = type(table_name, (db.Model,), table_dict)
+ return table
+ self.table_name = table_name
+ self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache())
+ self.hash = {}
+ self._is_new = False
+ self.loaded = False
+ self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
+
+ # Google wants namespaces to start with letters, change the namespace
+ # to start with a letter
+ self.namespace = 'p%s' % self.namespace
+
+ def get_access_lock(self):
+ return null_synchronizer()
+
+ def get_creation_lock(self, key):
+ # this is weird, should probably be present
+ return null_synchronizer()
+
+ def do_open(self, flags):
+ # If we already loaded the data, don't bother loading it again
+ if self.loaded:
+ self.flags = flags
+ return
+
+ item = self.cache.get_by_key_name(self.namespace)
+
+ if not item:
+ self._is_new = True
+ self.hash = {}
+ else:
+ self._is_new = False
+ try:
+ self.hash = cPickle.loads(str(item.data))
+ except (IOError, OSError, EOFError, cPickle.PickleError):
+ if self.log_debug:
+ log.debug("Couln't load pickle data, creating new storage")
+ self.hash = {}
+ self._is_new = True
+ self.flags = flags
+ self.loaded = True
+
+ def do_close(self):
+ if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
+ if self._is_new:
+ item = self.cache(key_name=self.namespace)
+ item.data = cPickle.dumps(self.hash)
+ item.created = datetime.now()
+ item.accessed = datetime.now()
+ item.put()
+ self._is_new = False
+ else:
+ item = self.cache.get_by_key_name(self.namespace)
+ item.data = cPickle.dumps(self.hash)
+ item.accessed = datetime.now()
+ item.put()
+ self.flags = None
+
+ def do_remove(self):
+ item = self.cache.get_by_key_name(self.namespace)
+ item.delete()
+ self.hash = {}
+
+ # We can retain the fact that we did a load attempt, but since the
+ # file is gone this will be a new namespace should it be saved.
+ self._is_new = True
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return self.hash.has_key(key)
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+
+class GoogleContainer(Container):
+ namespace_class = GoogleNamespaceManager
diff --git a/pyload/lib/beaker/ext/memcached.py b/pyload/lib/beaker/ext/memcached.py
new file mode 100644
index 000000000..96516953f
--- /dev/null
+++ b/pyload/lib/beaker/ext/memcached.py
@@ -0,0 +1,82 @@
+from beaker.container import NamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
+from beaker.synchronization import file_synchronizer, null_synchronizer
+from beaker.util import verify_directory, SyncDict
+import warnings
+
+memcache = None
+
+class MemcachedNamespaceManager(NamespaceManager):
+ clients = SyncDict()
+
+ @classmethod
+ def _init_dependencies(cls):
+ global memcache
+ if memcache is not None:
+ return
+ try:
+ import pylibmc as memcache
+ except ImportError:
+ try:
+ import cmemcache as memcache
+ warnings.warn("cmemcache is known to have serious "
+ "concurrency issues; consider using 'memcache' or 'pylibmc'")
+ except ImportError:
+ try:
+ import memcache
+ except ImportError:
+ raise InvalidCacheBackendError("Memcached cache backend requires either "
+ "the 'memcache' or 'cmemcache' library")
+
+ def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
+ NamespaceManager.__init__(self, namespace)
+
+ if not url:
+ raise MissingCacheParameter("url is required")
+
+ if lock_dir:
+ self.lock_dir = lock_dir
+ elif data_dir:
+ self.lock_dir = data_dir + "/container_mcd_lock"
+ if self.lock_dir:
+ verify_directory(self.lock_dir)
+
+ self.mc = MemcachedNamespaceManager.clients.get(url, memcache.Client, url.split(';'))
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier="memcachedcontainer/funclock/%s" % self.namespace,lock_dir = self.lock_dir)
+
+ def _format_key(self, key):
+ return self.namespace + '_' + key.replace(' ', '\302\267')
+
+ def __getitem__(self, key):
+ return self.mc.get(self._format_key(key))
+
+ def __contains__(self, key):
+ value = self.mc.get(self._format_key(key))
+ return value is not None
+
+ def has_key(self, key):
+ return key in self
+
+ def set_value(self, key, value, expiretime=None):
+ if expiretime:
+ self.mc.set(self._format_key(key), value, time=expiretime)
+ else:
+ self.mc.set(self._format_key(key), value)
+
+ def __setitem__(self, key, value):
+ self.set_value(key, value)
+
+ def __delitem__(self, key):
+ self.mc.delete(self._format_key(key))
+
+ def do_remove(self):
+ self.mc.flush_all()
+
+ def keys(self):
+ raise NotImplementedError("Memcache caching does not support iteration of all cache keys")
+
+class MemcachedContainer(Container):
+ namespace_class = MemcachedNamespaceManager
diff --git a/pyload/lib/beaker/ext/sqla.py b/pyload/lib/beaker/ext/sqla.py
new file mode 100644
index 000000000..8c79633c1
--- /dev/null
+++ b/pyload/lib/beaker/ext/sqla.py
@@ -0,0 +1,133 @@
+import cPickle
+import logging
+import pickle
+from datetime import datetime
+
+from beaker.container import OpenResourceNamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
+from beaker.synchronization import file_synchronizer, null_synchronizer
+from beaker.util import verify_directory, SyncDict
+
+
+log = logging.getLogger(__name__)
+
+sa = None
+
+class SqlaNamespaceManager(OpenResourceNamespaceManager):
+ binds = SyncDict()
+ tables = SyncDict()
+
+ @classmethod
+ def _init_dependencies(cls):
+ global sa
+ if sa is not None:
+ return
+ try:
+ import sqlalchemy as sa
+ except ImportError:
+ raise InvalidCacheBackendError("SQLAlchemy, which is required by "
+ "this backend, is not installed")
+
+ def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None,
+ **kwargs):
+ """Create a namespace manager for use with a database table via
+ SQLAlchemy.
+
+ ``bind``
+ SQLAlchemy ``Engine`` or ``Connection`` object
+
+ ``table``
+ SQLAlchemy ``Table`` object in which to store namespace data.
+ This should usually be something created by ``make_cache_table``.
+ """
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ if lock_dir:
+ self.lock_dir = lock_dir
+ elif data_dir:
+ self.lock_dir = data_dir + "/container_db_lock"
+ if self.lock_dir:
+ verify_directory(self.lock_dir)
+
+ self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
+ self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name),
+ lambda: table)
+ self.hash = {}
+ self._is_new = False
+ self.loaded = False
+
+ def get_access_lock(self):
+ return null_synchronizer()
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier ="databasecontainer/funclock/%s" % self.namespace,
+ lock_dir=self.lock_dir)
+
+ def do_open(self, flags):
+ if self.loaded:
+ self.flags = flags
+ return
+ select = sa.select([self.table.c.data],
+ (self.table.c.namespace == self.namespace))
+ result = self.bind.execute(select).fetchone()
+ if not result:
+ self._is_new = True
+ self.hash = {}
+ else:
+ self._is_new = False
+ try:
+ self.hash = result['data']
+ except (IOError, OSError, EOFError, cPickle.PickleError,
+ pickle.PickleError):
+ log.debug("Couln't load pickle data, creating new storage")
+ self.hash = {}
+ self._is_new = True
+ self.flags = flags
+ self.loaded = True
+
+ def do_close(self):
+ if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
+ if self._is_new:
+ insert = self.table.insert()
+ self.bind.execute(insert, namespace=self.namespace, data=self.hash,
+ accessed=datetime.now(), created=datetime.now())
+ self._is_new = False
+ else:
+ update = self.table.update(self.table.c.namespace == self.namespace)
+ self.bind.execute(update, data=self.hash, accessed=datetime.now())
+ self.flags = None
+
+ def do_remove(self):
+ delete = self.table.delete(self.table.c.namespace == self.namespace)
+ self.bind.execute(delete)
+ self.hash = {}
+ self._is_new = True
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return self.hash.has_key(key)
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+
+class SqlaContainer(Container):
+ namespace_manager = SqlaNamespaceManager
+
+def make_cache_table(metadata, table_name='beaker_cache'):
+ """Return a ``Table`` object suitable for storing cached values for the
+ namespace manager. Do not create the table."""
+ return sa.Table(table_name, metadata,
+ sa.Column('namespace', sa.String(255), primary_key=True),
+ sa.Column('accessed', sa.DateTime, nullable=False),
+ sa.Column('created', sa.DateTime, nullable=False),
+ sa.Column('data', sa.PickleType, nullable=False))
diff --git a/pyload/lib/beaker/middleware.py b/pyload/lib/beaker/middleware.py
new file mode 100644
index 000000000..7ba88b37d
--- /dev/null
+++ b/pyload/lib/beaker/middleware.py
@@ -0,0 +1,165 @@
+import warnings
+
+try:
+ from paste.registry import StackedObjectProxy
+ beaker_session = StackedObjectProxy(name="Beaker Session")
+ beaker_cache = StackedObjectProxy(name="Cache Manager")
+except:
+ beaker_cache = None
+ beaker_session = None
+
+from beaker.cache import CacheManager
+from beaker.session import Session, SessionObject
+from beaker.util import coerce_cache_params, coerce_session_params, \
+ parse_cache_config_options
+
+
+class CacheMiddleware(object):
+ cache = beaker_cache
+
+ def __init__(self, app, config=None, environ_key='beaker.cache', **kwargs):
+ """Initialize the Cache Middleware
+
+ The Cache middleware will make a Cache instance available
+ every request under the ``environ['beaker.cache']`` key by
+ default. The location in environ can be changed by setting
+ ``environ_key``.
+
+ ``config``
+ dict All settings should be prefixed by 'cache.'. This
+ method of passing variables is intended for Paste and other
+ setups that accumulate multiple component settings in a
+ single dictionary. If config contains *no cache. prefixed
+ args*, then *all* of the config options will be used to
+ intialize the Cache objects.
+
+ ``environ_key``
+ Location where the Cache instance will keyed in the WSGI
+ environ
+
+ ``**kwargs``
+ All keyword arguments are assumed to be cache settings and
+ will override any settings found in ``config``
+
+ """
+ self.app = app
+ config = config or {}
+
+ self.options = {}
+
+ # Update the options with the parsed config
+ self.options.update(parse_cache_config_options(config))
+
+ # Add any options from kwargs, but leave out the defaults this
+ # time
+ self.options.update(
+ parse_cache_config_options(kwargs, include_defaults=False))
+
+ # Assume all keys are intended for cache if none are prefixed with
+ # 'cache.'
+ if not self.options and config:
+ self.options = config
+
+ self.options.update(kwargs)
+ self.cache_manager = CacheManager(**self.options)
+ self.environ_key = environ_key
+
+ def __call__(self, environ, start_response):
+ if environ.get('paste.registry'):
+ if environ['paste.registry'].reglist:
+ environ['paste.registry'].register(self.cache,
+ self.cache_manager)
+ environ[self.environ_key] = self.cache_manager
+ return self.app(environ, start_response)
+
+
+class SessionMiddleware(object):
+ session = beaker_session
+
+ def __init__(self, wrap_app, config=None, environ_key='beaker.session',
+ **kwargs):
+ """Initialize the Session Middleware
+
+ The Session middleware will make a lazy session instance
+ available every request under the ``environ['beaker.session']``
+ key by default. The location in environ can be changed by
+ setting ``environ_key``.
+
+ ``config``
+ dict All settings should be prefixed by 'session.'. This
+ method of passing variables is intended for Paste and other
+ setups that accumulate multiple component settings in a
+ single dictionary. If config contains *no cache. prefixed
+ args*, then *all* of the config options will be used to
+ intialize the Cache objects.
+
+ ``environ_key``
+ Location where the Session instance will keyed in the WSGI
+ environ
+
+ ``**kwargs``
+ All keyword arguments are assumed to be session settings and
+ will override any settings found in ``config``
+
+ """
+ config = config or {}
+
+ # Load up the default params
+ self.options = dict(invalidate_corrupt=True, type=None,
+ data_dir=None, key='beaker.session.id',
+ timeout=None, secret=None, log_file=None)
+
+ # Pull out any config args meant for beaker session. if there are any
+ for dct in [config, kwargs]:
+ for key, val in dct.iteritems():
+ if key.startswith('beaker.session.'):
+ self.options[key[15:]] = val
+ if key.startswith('session.'):
+ self.options[key[8:]] = val
+ if key.startswith('session_'):
+ warnings.warn('Session options should start with session. '
+ 'instead of session_.', DeprecationWarning, 2)
+ self.options[key[8:]] = val
+
+ # Coerce and validate session params
+ coerce_session_params(self.options)
+
+ # Assume all keys are intended for cache if none are prefixed with
+ # 'cache.'
+ if not self.options and config:
+ self.options = config
+
+ self.options.update(kwargs)
+ self.wrap_app = wrap_app
+ self.environ_key = environ_key
+
+ def __call__(self, environ, start_response):
+ session = SessionObject(environ, **self.options)
+ if environ.get('paste.registry'):
+ if environ['paste.registry'].reglist:
+ environ['paste.registry'].register(self.session, session)
+ environ[self.environ_key] = session
+ environ['beaker.get_session'] = self._get_session
+
+ def session_start_response(status, headers, exc_info = None):
+ if session.accessed():
+ session.persist()
+ if session.__dict__['_headers']['set_cookie']:
+ cookie = session.__dict__['_headers']['cookie_out']
+ if cookie:
+ headers.append(('Set-cookie', cookie))
+ return start_response(status, headers, exc_info)
+ return self.wrap_app(environ, session_start_response)
+
+ def _get_session(self):
+ return Session({}, use_cookies=False, **self.options)
+
+
+def session_filter_factory(global_conf, **kwargs):
+ def filter(app):
+ return SessionMiddleware(app, global_conf, **kwargs)
+ return filter
+
+
+def session_filter_app_factory(app, global_conf, **kwargs):
+ return SessionMiddleware(app, global_conf, **kwargs)
diff --git a/pyload/lib/beaker/session.py b/pyload/lib/beaker/session.py
new file mode 100644
index 000000000..7d465530b
--- /dev/null
+++ b/pyload/lib/beaker/session.py
@@ -0,0 +1,618 @@
+import Cookie
+import os
+import random
+import time
+from datetime import datetime, timedelta
+
+from beaker.crypto import hmac as HMAC, hmac_sha1 as SHA1, md5
+from beaker.util import pickle
+
+from beaker import crypto
+from beaker.cache import clsmap
+from beaker.exceptions import BeakerException, InvalidCryptoBackendError
+from base64 import b64encode, b64decode
+
+
+__all__ = ['SignedCookie', 'Session']
+
+getpid = hasattr(os, 'getpid') and os.getpid or (lambda : '')
+
+class SignedCookie(Cookie.BaseCookie):
+ """Extends python cookie to give digital signature support"""
+ def __init__(self, secret, input=None):
+ self.secret = secret
+ Cookie.BaseCookie.__init__(self, input)
+
+ def value_decode(self, val):
+ val = val.strip('"')
+ sig = HMAC.new(self.secret, val[40:], SHA1).hexdigest()
+
+ # Avoid timing attacks
+ invalid_bits = 0
+ input_sig = val[:40]
+ if len(sig) != len(input_sig):
+ return None, val
+
+ for a, b in zip(sig, input_sig):
+ invalid_bits += a != b
+
+ if invalid_bits:
+ return None, val
+ else:
+ return val[40:], val
+
+ def value_encode(self, val):
+ sig = HMAC.new(self.secret, val, SHA1).hexdigest()
+ return str(val), ("%s%s" % (sig, val))
+
+
+class Session(dict):
+ """Session object that uses container package for storage.
+
+ ``key``
+ The name the cookie should be set to.
+ ``timeout``
+ How long session data is considered valid. This is used
+ regardless of the cookie being present or not to determine
+ whether session data is still valid.
+ ``cookie_domain``
+ Domain to use for the cookie.
+ ``secure``
+ Whether or not the cookie should only be sent over SSL.
+ """
+ def __init__(self, request, id=None, invalidate_corrupt=False,
+ use_cookies=True, type=None, data_dir=None,
+ key='beaker.session.id', timeout=None, cookie_expires=True,
+ cookie_domain=None, secret=None, secure=False,
+ namespace_class=None, **namespace_args):
+ if not type:
+ if data_dir:
+ self.type = 'file'
+ else:
+ self.type = 'memory'
+ else:
+ self.type = type
+
+ self.namespace_class = namespace_class or clsmap[self.type]
+
+ self.namespace_args = namespace_args
+
+ self.request = request
+ self.data_dir = data_dir
+ self.key = key
+
+ self.timeout = timeout
+ self.use_cookies = use_cookies
+ self.cookie_expires = cookie_expires
+
+ # Default cookie domain/path
+ self._domain = cookie_domain
+ self._path = '/'
+ self.was_invalidated = False
+ self.secret = secret
+ self.secure = secure
+ self.id = id
+ self.accessed_dict = {}
+
+ if self.use_cookies:
+ cookieheader = request.get('cookie', '')
+ if secret:
+ try:
+ self.cookie = SignedCookie(secret, input=cookieheader)
+ except Cookie.CookieError:
+ self.cookie = SignedCookie(secret, input=None)
+ else:
+ self.cookie = Cookie.SimpleCookie(input=cookieheader)
+
+ if not self.id and self.key in self.cookie:
+ self.id = self.cookie[self.key].value
+
+ self.is_new = self.id is None
+ if self.is_new:
+ self._create_id()
+ self['_accessed_time'] = self['_creation_time'] = time.time()
+ else:
+ try:
+ self.load()
+ except:
+ if invalidate_corrupt:
+ self.invalidate()
+ else:
+ raise
+
+ def _create_id(self):
+ self.id = md5(
+ md5("%f%s%f%s" % (time.time(), id({}), random.random(),
+ getpid())).hexdigest(),
+ ).hexdigest()
+ self.is_new = True
+ self.last_accessed = None
+ if self.use_cookies:
+ self.cookie[self.key] = self.id
+ if self._domain:
+ self.cookie[self.key]['domain'] = self._domain
+ if self.secure:
+ self.cookie[self.key]['secure'] = True
+ self.cookie[self.key]['path'] = self._path
+ if self.cookie_expires is not True:
+ if self.cookie_expires is False:
+ expires = datetime.fromtimestamp( 0x7FFFFFFF )
+ elif isinstance(self.cookie_expires, timedelta):
+ expires = datetime.today() + self.cookie_expires
+ elif isinstance(self.cookie_expires, datetime):
+ expires = self.cookie_expires
+ else:
+ raise ValueError("Invalid argument for cookie_expires: %s"
+ % repr(self.cookie_expires))
+ self.cookie[self.key]['expires'] = \
+ expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
+ self.request['cookie_out'] = self.cookie[self.key].output(header='')
+ self.request['set_cookie'] = False
+
+ def created(self):
+ return self['_creation_time']
+ created = property(created)
+
+ def _set_domain(self, domain):
+ self['_domain'] = domain
+ self.cookie[self.key]['domain'] = domain
+ self.request['cookie_out'] = self.cookie[self.key].output(header='')
+ self.request['set_cookie'] = True
+
+ def _get_domain(self):
+ return self._domain
+
+ domain = property(_get_domain, _set_domain)
+
+ def _set_path(self, path):
+ self['_path'] = path
+ self.cookie[self.key]['path'] = path
+ self.request['cookie_out'] = self.cookie[self.key].output(header='')
+ self.request['set_cookie'] = True
+
+ def _get_path(self):
+ return self._path
+
+ path = property(_get_path, _set_path)
+
+ def _delete_cookie(self):
+ self.request['set_cookie'] = True
+ self.cookie[self.key] = self.id
+ if self._domain:
+ self.cookie[self.key]['domain'] = self._domain
+ if self.secure:
+ self.cookie[self.key]['secure'] = True
+ self.cookie[self.key]['path'] = '/'
+ expires = datetime.today().replace(year=2003)
+ self.cookie[self.key]['expires'] = \
+ expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
+ self.request['cookie_out'] = self.cookie[self.key].output(header='')
+ self.request['set_cookie'] = True
+
+ def delete(self):
+ """Deletes the session from the persistent storage, and sends
+ an expired cookie out"""
+ if self.use_cookies:
+ self._delete_cookie()
+ self.clear()
+
+ def invalidate(self):
+ """Invalidates this session, creates a new session id, returns
+ to the is_new state"""
+ self.clear()
+ self.was_invalidated = True
+ self._create_id()
+ self.load()
+
+ def load(self):
+ "Loads the data from this session from persistent storage"
+ self.namespace = self.namespace_class(self.id,
+ data_dir=self.data_dir, digest_filenames=False,
+ **self.namespace_args)
+ now = time.time()
+ self.request['set_cookie'] = True
+
+ self.namespace.acquire_read_lock()
+ timed_out = False
+ try:
+ self.clear()
+ try:
+ session_data = self.namespace['session']
+
+ # Memcached always returns a key, its None when its not
+ # present
+ if session_data is None:
+ session_data = {
+ '_creation_time':now,
+ '_accessed_time':now
+ }
+ self.is_new = True
+ except (KeyError, TypeError):
+ session_data = {
+ '_creation_time':now,
+ '_accessed_time':now
+ }
+ self.is_new = True
+
+ if self.timeout is not None and \
+ now - session_data['_accessed_time'] > self.timeout:
+ timed_out= True
+ else:
+ # Properly set the last_accessed time, which is different
+ # than the *currently* _accessed_time
+ if self.is_new or '_accessed_time' not in session_data:
+ self.last_accessed = None
+ else:
+ self.last_accessed = session_data['_accessed_time']
+
+ # Update the current _accessed_time
+ session_data['_accessed_time'] = now
+ self.update(session_data)
+ self.accessed_dict = session_data.copy()
+ finally:
+ self.namespace.release_read_lock()
+ if timed_out:
+ self.invalidate()
+
+ def save(self, accessed_only=False):
+ """Saves the data for this session to persistent storage
+
+ If accessed_only is True, then only the original data loaded
+ at the beginning of the request will be saved, with the updated
+ last accessed time.
+
+ """
+ # Look to see if its a new session that was only accessed
+ # Don't save it under that case
+ if accessed_only and self.is_new:
+ return None
+
+ if not hasattr(self, 'namespace'):
+ self.namespace = self.namespace_class(
+ self.id,
+ data_dir=self.data_dir,
+ digest_filenames=False,
+ **self.namespace_args)
+
+ self.namespace.acquire_write_lock()
+ try:
+ if accessed_only:
+ data = dict(self.accessed_dict.items())
+ else:
+ data = dict(self.items())
+
+ # Save the data
+ if not data and 'session' in self.namespace:
+ del self.namespace['session']
+ else:
+ self.namespace['session'] = data
+ finally:
+ self.namespace.release_write_lock()
+ if self.is_new:
+ self.request['set_cookie'] = True
+
+ def revert(self):
+ """Revert the session to its original state from its first
+ access in the request"""
+ self.clear()
+ self.update(self.accessed_dict)
+
+ # TODO: I think both these methods should be removed. They're from
+ # the original mod_python code i was ripping off but they really
+ # have no use here.
+ def lock(self):
+ """Locks this session against other processes/threads. This is
+ automatic when load/save is called.
+
+ ***use with caution*** and always with a corresponding 'unlock'
+ inside a "finally:" block, as a stray lock typically cannot be
+ unlocked without shutting down the whole application.
+
+ """
+ self.namespace.acquire_write_lock()
+
+ def unlock(self):
+ """Unlocks this session against other processes/threads. This
+ is automatic when load/save is called.
+
+ ***use with caution*** and always within a "finally:" block, as
+ a stray lock typically cannot be unlocked without shutting down
+ the whole application.
+
+ """
+ self.namespace.release_write_lock()
+
+class CookieSession(Session):
+ """Pure cookie-based session
+
+ Options recognized when using cookie-based sessions are slightly
+ more restricted than general sessions.
+
+ ``key``
+ The name the cookie should be set to.
+ ``timeout``
+ How long session data is considered valid. This is used
+ regardless of the cookie being present or not to determine
+ whether session data is still valid.
+ ``encrypt_key``
+ The key to use for the session encryption, if not provided the
+ session will not be encrypted.
+ ``validate_key``
+ The key used to sign the encrypted session
+ ``cookie_domain``
+ Domain to use for the cookie.
+ ``secure``
+ Whether or not the cookie should only be sent over SSL.
+
+ """
+ def __init__(self, request, key='beaker.session.id', timeout=None,
+ cookie_expires=True, cookie_domain=None, encrypt_key=None,
+ validate_key=None, secure=False, **kwargs):
+
+ if not crypto.has_aes and encrypt_key:
+ raise InvalidCryptoBackendError("No AES library is installed, can't generate "
+ "encrypted cookie-only Session.")
+
+ self.request = request
+ self.key = key
+ self.timeout = timeout
+ self.cookie_expires = cookie_expires
+ self.encrypt_key = encrypt_key
+ self.validate_key = validate_key
+ self.request['set_cookie'] = False
+ self.secure = secure
+ self._domain = cookie_domain
+ self._path = '/'
+
+ try:
+ cookieheader = request['cookie']
+ except KeyError:
+ cookieheader = ''
+
+ if validate_key is None:
+ raise BeakerException("No validate_key specified for Cookie only "
+ "Session.")
+
+ try:
+ self.cookie = SignedCookie(validate_key, input=cookieheader)
+ except Cookie.CookieError:
+ self.cookie = SignedCookie(validate_key, input=None)
+
+ self['_id'] = self._make_id()
+ self.is_new = True
+
+ # If we have a cookie, load it
+ if self.key in self.cookie and self.cookie[self.key].value is not None:
+ self.is_new = False
+ try:
+ self.update(self._decrypt_data())
+ except:
+ pass
+ if self.timeout is not None and time.time() - \
+ self['_accessed_time'] > self.timeout:
+ self.clear()
+ self.accessed_dict = self.copy()
+ self._create_cookie()
+
+ def created(self):
+ return self['_creation_time']
+ created = property(created)
+
+ def id(self):
+ return self['_id']
+ id = property(id)
+
+ def _set_domain(self, domain):
+ self['_domain'] = domain
+ self._domain = domain
+
+ def _get_domain(self):
+ return self._domain
+
+ domain = property(_get_domain, _set_domain)
+
+ def _set_path(self, path):
+ self['_path'] = path
+ self._path = path
+
+ def _get_path(self):
+ return self._path
+
+ path = property(_get_path, _set_path)
+
+ def _encrypt_data(self):
+ """Serialize, encipher, and base64 the session dict"""
+ if self.encrypt_key:
+ nonce = b64encode(os.urandom(40))[:8]
+ encrypt_key = crypto.generateCryptoKeys(self.encrypt_key,
+ self.validate_key + nonce, 1)
+ data = pickle.dumps(self.copy(), 2)
+ return nonce + b64encode(crypto.aesEncrypt(data, encrypt_key))
+ else:
+ data = pickle.dumps(self.copy(), 2)
+ return b64encode(data)
+
+ def _decrypt_data(self):
+ """Bas64, decipher, then un-serialize the data for the session
+ dict"""
+ if self.encrypt_key:
+ nonce = self.cookie[self.key].value[:8]
+ encrypt_key = crypto.generateCryptoKeys(self.encrypt_key,
+ self.validate_key + nonce, 1)
+ payload = b64decode(self.cookie[self.key].value[8:])
+ data = crypto.aesDecrypt(payload, encrypt_key)
+ return pickle.loads(data)
+ else:
+ data = b64decode(self.cookie[self.key].value)
+ return pickle.loads(data)
+
+ def _make_id(self):
+ return md5(md5(
+ "%f%s%f%s" % (time.time(), id({}), random.random(), getpid())
+ ).hexdigest()
+ ).hexdigest()
+
+ def save(self, accessed_only=False):
+ """Saves the data for this session to persistent storage"""
+ if accessed_only and self.is_new:
+ return
+ if accessed_only:
+ self.clear()
+ self.update(self.accessed_dict)
+ self._create_cookie()
+
+ def expire(self):
+ """Delete the 'expires' attribute on this Session, if any."""
+
+ self.pop('_expires', None)
+
+ def _create_cookie(self):
+ if '_creation_time' not in self:
+ self['_creation_time'] = time.time()
+ if '_id' not in self:
+ self['_id'] = self._make_id()
+ self['_accessed_time'] = time.time()
+
+ if self.cookie_expires is not True:
+ if self.cookie_expires is False:
+ expires = datetime.fromtimestamp( 0x7FFFFFFF )
+ elif isinstance(self.cookie_expires, timedelta):
+ expires = datetime.today() + self.cookie_expires
+ elif isinstance(self.cookie_expires, datetime):
+ expires = self.cookie_expires
+ else:
+ raise ValueError("Invalid argument for cookie_expires: %s"
+ % repr(self.cookie_expires))
+ self['_expires'] = expires
+ elif '_expires' in self:
+ expires = self['_expires']
+ else:
+ expires = None
+
+ val = self._encrypt_data()
+ if len(val) > 4064:
+ raise BeakerException("Cookie value is too long to store")
+
+ self.cookie[self.key] = val
+ if '_domain' in self:
+ self.cookie[self.key]['domain'] = self['_domain']
+ elif self._domain:
+ self.cookie[self.key]['domain'] = self._domain
+ if self.secure:
+ self.cookie[self.key]['secure'] = True
+
+ self.cookie[self.key]['path'] = self.get('_path', '/')
+
+ if expires:
+ self.cookie[self.key]['expires'] = \
+ expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
+ self.request['cookie_out'] = self.cookie[self.key].output(header='')
+ self.request['set_cookie'] = True
+
+ def delete(self):
+ """Delete the cookie, and clear the session"""
+ # Send a delete cookie request
+ self._delete_cookie()
+ self.clear()
+
+ def invalidate(self):
+ """Clear the contents and start a new session"""
+ self.delete()
+ self['_id'] = self._make_id()
+
+
+class SessionObject(object):
+ """Session proxy/lazy creator
+
+ This object proxies access to the actual session object, so that in
+ the case that the session hasn't been used before, it will be
+ setup. This avoid creating and loading the session from persistent
+ storage unless its actually used during the request.
+
+ """
+ def __init__(self, environ, **params):
+ self.__dict__['_params'] = params
+ self.__dict__['_environ'] = environ
+ self.__dict__['_sess'] = None
+ self.__dict__['_headers'] = []
+
+ def _session(self):
+ """Lazy initial creation of session object"""
+ if self.__dict__['_sess'] is None:
+ params = self.__dict__['_params']
+ environ = self.__dict__['_environ']
+ self.__dict__['_headers'] = req = {'cookie_out':None}
+ req['cookie'] = environ.get('HTTP_COOKIE')
+ if params.get('type') == 'cookie':
+ self.__dict__['_sess'] = CookieSession(req, **params)
+ else:
+ self.__dict__['_sess'] = Session(req, use_cookies=True,
+ **params)
+ return self.__dict__['_sess']
+
+ def __getattr__(self, attr):
+ return getattr(self._session(), attr)
+
+ def __setattr__(self, attr, value):
+ setattr(self._session(), attr, value)
+
+ def __delattr__(self, name):
+ self._session().__delattr__(name)
+
+ def __getitem__(self, key):
+ return self._session()[key]
+
+ def __setitem__(self, key, value):
+ self._session()[key] = value
+
+ def __delitem__(self, key):
+ self._session().__delitem__(key)
+
+ def __repr__(self):
+ return self._session().__repr__()
+
+ def __iter__(self):
+ """Only works for proxying to a dict"""
+ return iter(self._session().keys())
+
+ def __contains__(self, key):
+ return self._session().has_key(key)
+
+ def get_by_id(self, id):
+ """Loads a session given a session ID"""
+ params = self.__dict__['_params']
+ session = Session({}, use_cookies=False, id=id, **params)
+ if session.is_new:
+ return None
+ return session
+
+ def save(self):
+ self.__dict__['_dirty'] = True
+
+ def delete(self):
+ self.__dict__['_dirty'] = True
+ self._session().delete()
+
+ def persist(self):
+ """Persist the session to the storage
+
+ If its set to autosave, then the entire session will be saved
+ regardless of if save() has been called. Otherwise, just the
+ accessed time will be updated if save() was not called, or
+ the session will be saved if save() was called.
+
+ """
+ if self.__dict__['_params'].get('auto'):
+ self._session().save()
+ else:
+ if self.__dict__.get('_dirty'):
+ self._session().save()
+ else:
+ self._session().save(accessed_only=True)
+
+ def dirty(self):
+ return self.__dict__.get('_dirty', False)
+
+ def accessed(self):
+ """Returns whether or not the session has been accessed"""
+ return self.__dict__['_sess'] is not None
diff --git a/pyload/lib/beaker/synchronization.py b/pyload/lib/beaker/synchronization.py
new file mode 100644
index 000000000..761303707
--- /dev/null
+++ b/pyload/lib/beaker/synchronization.py
@@ -0,0 +1,381 @@
+"""Synchronization functions.
+
+File- and mutex-based mutual exclusion synchronizers are provided,
+as well as a name-based mutex which locks within an application
+based on a string name.
+
+"""
+
+import os
+import sys
+import tempfile
+
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+# check for fcntl module
+try:
+ sys.getwindowsversion()
+ has_flock = False
+except:
+ try:
+ import fcntl
+ has_flock = True
+ except ImportError:
+ has_flock = False
+
+from beaker import util
+from beaker.exceptions import LockError
+
+__all__ = ["file_synchronizer", "mutex_synchronizer", "null_synchronizer",
+ "NameLock", "_threading"]
+
+
+class NameLock(object):
+ """a proxy for an RLock object that is stored in a name based
+ registry.
+
+ Multiple threads can get a reference to the same RLock based on the
+ name alone, and synchronize operations related to that name.
+
+ """
+ locks = util.WeakValuedRegistry()
+
+ class NLContainer(object):
+ def __init__(self, reentrant):
+ if reentrant:
+ self.lock = _threading.RLock()
+ else:
+ self.lock = _threading.Lock()
+ def __call__(self):
+ return self.lock
+
+ def __init__(self, identifier = None, reentrant = False):
+ if identifier is None:
+ self._lock = NameLock.NLContainer(reentrant)
+ else:
+ self._lock = NameLock.locks.get(identifier, NameLock.NLContainer,
+ reentrant)
+
+ def acquire(self, wait = True):
+ return self._lock().acquire(wait)
+
+ def release(self):
+ self._lock().release()
+
+
+_synchronizers = util.WeakValuedRegistry()
+def _synchronizer(identifier, cls, **kwargs):
+ return _synchronizers.sync_get((identifier, cls), cls, identifier, **kwargs)
+
+
+def file_synchronizer(identifier, **kwargs):
+ if not has_flock or 'lock_dir' not in kwargs:
+ return mutex_synchronizer(identifier)
+ else:
+ return _synchronizer(identifier, FileSynchronizer, **kwargs)
+
+
+def mutex_synchronizer(identifier, **kwargs):
+ return _synchronizer(identifier, ConditionSynchronizer, **kwargs)
+
+
+class null_synchronizer(object):
+ def acquire_write_lock(self, wait=True):
+ return True
+ def acquire_read_lock(self):
+ pass
+ def release_write_lock(self):
+ pass
+ def release_read_lock(self):
+ pass
+ acquire = acquire_write_lock
+ release = release_write_lock
+
+
+class SynchronizerImpl(object):
+ def __init__(self):
+ self._state = util.ThreadLocal()
+
+ class SyncState(object):
+ __slots__ = 'reentrantcount', 'writing', 'reading'
+
+ def __init__(self):
+ self.reentrantcount = 0
+ self.writing = False
+ self.reading = False
+
+ def state(self):
+ if not self._state.has():
+ state = SynchronizerImpl.SyncState()
+ self._state.put(state)
+ return state
+ else:
+ return self._state.get()
+ state = property(state)
+
+ def release_read_lock(self):
+ state = self.state
+
+ if state.writing:
+ raise LockError("lock is in writing state")
+ if not state.reading:
+ raise LockError("lock is not in reading state")
+
+ if state.reentrantcount == 1:
+ self.do_release_read_lock()
+ state.reading = False
+
+ state.reentrantcount -= 1
+
+ def acquire_read_lock(self, wait = True):
+ state = self.state
+
+ if state.writing:
+ raise LockError("lock is in writing state")
+
+ if state.reentrantcount == 0:
+ x = self.do_acquire_read_lock(wait)
+ if (wait or x):
+ state.reentrantcount += 1
+ state.reading = True
+ return x
+ elif state.reading:
+ state.reentrantcount += 1
+ return True
+
+ def release_write_lock(self):
+ state = self.state
+
+ if state.reading:
+ raise LockError("lock is in reading state")
+ if not state.writing:
+ raise LockError("lock is not in writing state")
+
+ if state.reentrantcount == 1:
+ self.do_release_write_lock()
+ state.writing = False
+
+ state.reentrantcount -= 1
+
+ release = release_write_lock
+
+ def acquire_write_lock(self, wait = True):
+ state = self.state
+
+ if state.reading:
+ raise LockError("lock is in reading state")
+
+ if state.reentrantcount == 0:
+ x = self.do_acquire_write_lock(wait)
+ if (wait or x):
+ state.reentrantcount += 1
+ state.writing = True
+ return x
+ elif state.writing:
+ state.reentrantcount += 1
+ return True
+
+ acquire = acquire_write_lock
+
+ def do_release_read_lock(self):
+ raise NotImplementedError()
+
+ def do_acquire_read_lock(self):
+ raise NotImplementedError()
+
+ def do_release_write_lock(self):
+ raise NotImplementedError()
+
+ def do_acquire_write_lock(self):
+ raise NotImplementedError()
+
+
+class FileSynchronizer(SynchronizerImpl):
+ """a synchronizer which locks using flock().
+
+ Adapted for Python/multithreads from Apache::Session::Lock::File,
+ http://search.cpan.org/src/CWEST/Apache-Session-1.81/Session/Lock/File.pm
+
+ This module does not unlink temporary files,
+ because it interferes with proper locking. This can cause
+ problems on certain systems (Linux) whose file systems (ext2) do not
+ perform well with lots of files in one directory. To prevent this
+ you should use a script to clean out old files from your lock directory.
+
+ """
+ def __init__(self, identifier, lock_dir):
+ super(FileSynchronizer, self).__init__()
+ self._filedescriptor = util.ThreadLocal()
+
+ if lock_dir is None:
+ lock_dir = tempfile.gettempdir()
+ else:
+ lock_dir = lock_dir
+
+ self.filename = util.encoded_path(
+ lock_dir,
+ [identifier],
+ extension='.lock'
+ )
+
+ def _filedesc(self):
+ return self._filedescriptor.get()
+ _filedesc = property(_filedesc)
+
+ def _open(self, mode):
+ filedescriptor = self._filedesc
+ if filedescriptor is None:
+ filedescriptor = os.open(self.filename, mode)
+ self._filedescriptor.put(filedescriptor)
+ return filedescriptor
+
+ def do_acquire_read_lock(self, wait):
+ filedescriptor = self._open(os.O_CREAT | os.O_RDONLY)
+ if not wait:
+ try:
+ fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB)
+ return True
+ except IOError:
+ os.close(filedescriptor)
+ self._filedescriptor.remove()
+ return False
+ else:
+ fcntl.flock(filedescriptor, fcntl.LOCK_SH)
+ return True
+
+ def do_acquire_write_lock(self, wait):
+ filedescriptor = self._open(os.O_CREAT | os.O_WRONLY)
+ if not wait:
+ try:
+ fcntl.flock(filedescriptor, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ return True
+ except IOError:
+ os.close(filedescriptor)
+ self._filedescriptor.remove()
+ return False
+ else:
+ fcntl.flock(filedescriptor, fcntl.LOCK_EX)
+ return True
+
+ def do_release_read_lock(self):
+ self._release_all_locks()
+
+ def do_release_write_lock(self):
+ self._release_all_locks()
+
+ def _release_all_locks(self):
+ filedescriptor = self._filedesc
+ if filedescriptor is not None:
+ fcntl.flock(filedescriptor, fcntl.LOCK_UN)
+ os.close(filedescriptor)
+ self._filedescriptor.remove()
+
+
+class ConditionSynchronizer(SynchronizerImpl):
+ """a synchronizer using a Condition."""
+
+ def __init__(self, identifier):
+ super(ConditionSynchronizer, self).__init__()
+
+ # counts how many asynchronous methods are executing
+ self.async = 0
+
+ # pointer to thread that is the current sync operation
+ self.current_sync_operation = None
+
+ # condition object to lock on
+ self.condition = _threading.Condition(_threading.Lock())
+
+ def do_acquire_read_lock(self, wait = True):
+ self.condition.acquire()
+ try:
+ # see if a synchronous operation is waiting to start
+ # or is already running, in which case we wait (or just
+ # give up and return)
+ if wait:
+ while self.current_sync_operation is not None:
+ self.condition.wait()
+ else:
+ if self.current_sync_operation is not None:
+ return False
+
+ self.async += 1
+ finally:
+ self.condition.release()
+
+ if not wait:
+ return True
+
+ def do_release_read_lock(self):
+ self.condition.acquire()
+ try:
+ self.async -= 1
+
+ # check if we are the last asynchronous reader thread
+ # out the door.
+ if self.async == 0:
+ # yes. so if a sync operation is waiting, notifyAll to wake
+ # it up
+ if self.current_sync_operation is not None:
+ self.condition.notifyAll()
+ elif self.async < 0:
+ raise LockError("Synchronizer error - too many "
+ "release_read_locks called")
+ finally:
+ self.condition.release()
+
+ def do_acquire_write_lock(self, wait = True):
+ self.condition.acquire()
+ try:
+ # here, we are not a synchronous reader, and after returning,
+ # assuming waiting or immediate availability, we will be.
+
+ if wait:
+ # if another sync is working, wait
+ while self.current_sync_operation is not None:
+ self.condition.wait()
+ else:
+ # if another sync is working,
+ # we dont want to wait, so forget it
+ if self.current_sync_operation is not None:
+ return False
+
+ # establish ourselves as the current sync
+ # this indicates to other read/write operations
+ # that they should wait until this is None again
+ self.current_sync_operation = _threading.currentThread()
+
+ # now wait again for asyncs to finish
+ if self.async > 0:
+ if wait:
+ # wait
+ self.condition.wait()
+ else:
+ # we dont want to wait, so forget it
+ self.current_sync_operation = None
+ return False
+ finally:
+ self.condition.release()
+
+ if not wait:
+ return True
+
+ def do_release_write_lock(self):
+ self.condition.acquire()
+ try:
+ if self.current_sync_operation is not _threading.currentThread():
+ raise LockError("Synchronizer error - current thread doesnt "
+ "have the write lock")
+
+ # reset the current sync operation so
+ # another can get it
+ self.current_sync_operation = None
+
+ # tell everyone to get ready
+ self.condition.notifyAll()
+ finally:
+ # everyone go !!
+ self.condition.release()
diff --git a/pyload/lib/beaker/util.py b/pyload/lib/beaker/util.py
new file mode 100644
index 000000000..04c9617c5
--- /dev/null
+++ b/pyload/lib/beaker/util.py
@@ -0,0 +1,302 @@
+"""Beaker utilities"""
+
+try:
+ import thread as _thread
+ import threading as _threading
+except ImportError:
+ import dummy_thread as _thread
+ import dummy_threading as _threading
+
+from datetime import datetime, timedelta
+import os
+import string
+import types
+import weakref
+import warnings
+import sys
+
+py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
+py24 = sys.version_info < (2,5)
+jython = sys.platform.startswith('java')
+
+if py3k or jython:
+ import pickle
+else:
+ import cPickle as pickle
+
+from beaker.converters import asbool
+from threading import local as _tlocal
+
+
+__all__ = ["ThreadLocal", "Registry", "WeakValuedRegistry", "SyncDict",
+ "encoded_path", "verify_directory"]
+
+
+def verify_directory(dir):
+ """verifies and creates a directory. tries to
+ ignore collisions with other threads and processes."""
+
+ tries = 0
+ while not os.access(dir, os.F_OK):
+ try:
+ tries += 1
+ os.makedirs(dir)
+ except:
+ if tries > 5:
+ raise
+
+
+def deprecated(message):
+ def wrapper(fn):
+ def deprecated_method(*args, **kargs):
+ warnings.warn(message, DeprecationWarning, 2)
+ return fn(*args, **kargs)
+ # TODO: use decorator ? functools.wrapper ?
+ deprecated_method.__name__ = fn.__name__
+ deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__)
+ return deprecated_method
+ return wrapper
+
+class ThreadLocal(object):
+ """stores a value on a per-thread basis"""
+
+ __slots__ = '_tlocal'
+
+ def __init__(self):
+ self._tlocal = _tlocal()
+
+ def put(self, value):
+ self._tlocal.value = value
+
+ def has(self):
+ return hasattr(self._tlocal, 'value')
+
+ def get(self, default=None):
+ return getattr(self._tlocal, 'value', default)
+
+ def remove(self):
+ del self._tlocal.value
+
+class SyncDict(object):
+ """
+ An efficient/threadsafe singleton map algorithm, a.k.a.
+ "get a value based on this key, and create if not found or not
+ valid" paradigm:
+
+ exists && isvalid ? get : create
+
+ Designed to work with weakref dictionaries to expect items
+ to asynchronously disappear from the dictionary.
+
+ Use python 2.3.3 or greater ! a major bug was just fixed in Nov.
+ 2003 that was driving me nuts with garbage collection/weakrefs in
+ this section.
+
+ """
+ def __init__(self):
+ self.mutex = _thread.allocate_lock()
+ self.dict = {}
+
+ def get(self, key, createfunc, *args, **kwargs):
+ try:
+ if self.has_key(key):
+ return self.dict[key]
+ else:
+ return self.sync_get(key, createfunc, *args, **kwargs)
+ except KeyError:
+ return self.sync_get(key, createfunc, *args, **kwargs)
+
+ def sync_get(self, key, createfunc, *args, **kwargs):
+ self.mutex.acquire()
+ try:
+ try:
+ if self.has_key(key):
+ return self.dict[key]
+ else:
+ return self._create(key, createfunc, *args, **kwargs)
+ except KeyError:
+ return self._create(key, createfunc, *args, **kwargs)
+ finally:
+ self.mutex.release()
+
+ def _create(self, key, createfunc, *args, **kwargs):
+ self[key] = obj = createfunc(*args, **kwargs)
+ return obj
+
+ def has_key(self, key):
+ return self.dict.has_key(key)
+
+ def __contains__(self, key):
+ return self.dict.__contains__(key)
+ def __getitem__(self, key):
+ return self.dict.__getitem__(key)
+ def __setitem__(self, key, value):
+ self.dict.__setitem__(key, value)
+ def __delitem__(self, key):
+ return self.dict.__delitem__(key)
+ def clear(self):
+ self.dict.clear()
+
+
+class WeakValuedRegistry(SyncDict):
+ def __init__(self):
+ self.mutex = _threading.RLock()
+ self.dict = weakref.WeakValueDictionary()
+
+sha1 = None
+def encoded_path(root, identifiers, extension = ".enc", depth = 3,
+ digest_filenames=True):
+
+ """Generate a unique file-accessible path from the given list of
+ identifiers starting at the given root directory."""
+ ident = "_".join(identifiers)
+
+ global sha1
+ if sha1 is None:
+ from beaker.crypto import sha1
+
+ if digest_filenames:
+ if py3k:
+ ident = sha1(ident.encode('utf-8')).hexdigest()
+ else:
+ ident = sha1(ident).hexdigest()
+
+ ident = os.path.basename(ident)
+
+ tokens = []
+ for d in range(1, depth):
+ tokens.append(ident[0:d])
+
+ dir = os.path.join(root, *tokens)
+ verify_directory(dir)
+
+ return os.path.join(dir, ident + extension)
+
+
+def verify_options(opt, types, error):
+ if not isinstance(opt, types):
+ if not isinstance(types, tuple):
+ types = (types,)
+ coerced = False
+ for typ in types:
+ try:
+ if typ in (list, tuple):
+ opt = [x.strip() for x in opt.split(',')]
+ else:
+ if typ == bool:
+ typ = asbool
+ opt = typ(opt)
+ coerced = True
+ except:
+ pass
+ if coerced:
+ break
+ if not coerced:
+ raise Exception(error)
+ elif isinstance(opt, str) and not opt.strip():
+ raise Exception("Empty strings are invalid for: %s" % error)
+ return opt
+
+
+def verify_rules(params, ruleset):
+ for key, types, message in ruleset:
+ if key in params:
+ params[key] = verify_options(params[key], types, message)
+ return params
+
+
+def coerce_session_params(params):
+ rules = [
+ ('data_dir', (str, types.NoneType), "data_dir must be a string "
+ "referring to a directory."),
+ ('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a "
+ "directory."),
+ ('type', (str, types.NoneType), "Session type must be a string."),
+ ('cookie_expires', (bool, datetime, timedelta), "Cookie expires was "
+ "not a boolean, datetime, or timedelta instance."),
+ ('cookie_domain', (str, types.NoneType), "Cookie domain must be a "
+ "string."),
+ ('id', (str,), "Session id must be a string."),
+ ('key', (str,), "Session key must be a string."),
+ ('secret', (str, types.NoneType), "Session secret must be a string."),
+ ('validate_key', (str, types.NoneType), "Session encrypt_key must be "
+ "a string."),
+ ('encrypt_key', (str, types.NoneType), "Session validate_key must be "
+ "a string."),
+ ('secure', (bool, types.NoneType), "Session secure must be a boolean."),
+ ('timeout', (int, types.NoneType), "Session timeout must be an "
+ "integer."),
+ ('auto', (bool, types.NoneType), "Session is created if accessed."),
+ ]
+ return verify_rules(params, rules)
+
+
+def coerce_cache_params(params):
+ rules = [
+ ('data_dir', (str, types.NoneType), "data_dir must be a string "
+ "referring to a directory."),
+ ('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a "
+ "directory."),
+ ('type', (str,), "Cache type must be a string."),
+ ('enabled', (bool, types.NoneType), "enabled must be true/false "
+ "if present."),
+ ('expire', (int, types.NoneType), "expire must be an integer representing "
+ "how many seconds the cache is valid for"),
+ ('regions', (list, tuple, types.NoneType), "Regions must be a "
+ "comma seperated list of valid regions")
+ ]
+ return verify_rules(params, rules)
+
+
+def parse_cache_config_options(config, include_defaults=True):
+ """Parse configuration options and validate for use with the
+ CacheManager"""
+
+ # Load default cache options
+ if include_defaults:
+ options= dict(type='memory', data_dir=None, expire=None,
+ log_file=None)
+ else:
+ options = {}
+ for key, val in config.iteritems():
+ if key.startswith('beaker.cache.'):
+ options[key[13:]] = val
+ if key.startswith('cache.'):
+ options[key[6:]] = val
+ coerce_cache_params(options)
+
+ # Set cache to enabled if not turned off
+ if 'enabled' not in options:
+ options['enabled'] = True
+
+ # Configure region dict if regions are available
+ regions = options.pop('regions', None)
+ if regions:
+ region_configs = {}
+ for region in regions:
+ # Setup the default cache options
+ region_options = dict(data_dir=options.get('data_dir'),
+ lock_dir=options.get('lock_dir'),
+ type=options.get('type'),
+ enabled=options['enabled'],
+ expire=options.get('expire'))
+ region_len = len(region) + 1
+ for key in options.keys():
+ if key.startswith('%s.' % region):
+ region_options[key[region_len:]] = options.pop(key)
+ coerce_cache_params(region_options)
+ region_configs[region] = region_options
+ options['cache_regions'] = region_configs
+ return options
+
+def func_namespace(func):
+ """Generates a unique namespace for a function"""
+ kls = None
+ if hasattr(func, 'im_func'):
+ kls = func.im_class
+ func = func.im_func
+
+ if kls:
+ return '%s.%s' % (kls.__module__, kls.__name__)
+ else:
+ return '%s.%s' % (func.__module__, func.__name__)
diff --git a/pyload/lib/bottle.py b/pyload/lib/bottle.py
new file mode 100644
index 000000000..b00bda1c9
--- /dev/null
+++ b/pyload/lib/bottle.py
@@ -0,0 +1,3251 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Bottle is a fast and simple micro-framework for small web applications. It
+offers request dispatching (Routes) with url parameter support, templates,
+a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
+template engines - all in a single file and with no dependencies other than the
+Python Standard Library.
+
+Homepage and documentation: http://bottlepy.org/
+
+Copyright (c) 2012, Marcel Hellkamp.
+License: MIT (see LICENSE for details)
+"""
+
+from __future__ import with_statement
+
+__author__ = 'Marcel Hellkamp'
+__version__ = '0.11.4'
+__license__ = 'MIT'
+
+# The gevent server adapter needs to patch some modules before they are imported
+# This is why we parse the commandline parameters here but handle them later
+if __name__ == '__main__':
+ from optparse import OptionParser
+ _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
+ _opt = _cmd_parser.add_option
+ _opt("--version", action="store_true", help="show version number.")
+ _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
+ _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
+ _opt("-p", "--plugin", action="append", help="install additional plugin/s.")
+ _opt("--debug", action="store_true", help="start server in debug mode.")
+ _opt("--reload", action="store_true", help="auto-reload on file changes.")
+ _cmd_options, _cmd_args = _cmd_parser.parse_args()
+ if _cmd_options.server and _cmd_options.server.startswith('gevent'):
+ import gevent.monkey; gevent.monkey.patch_all()
+
+import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
+ os, re, subprocess, sys, tempfile, threading, time, urllib, warnings
+
+from datetime import date as datedate, datetime, timedelta
+from tempfile import TemporaryFile
+from traceback import format_exc, print_exc
+
+try: from json import dumps as json_dumps, loads as json_lds
+except ImportError: # pragma: no cover
+ try: from simplejson import dumps as json_dumps, loads as json_lds
+ except ImportError:
+ try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
+ except ImportError:
+ def json_dumps(data):
+ raise ImportError("JSON support requires Python 2.6 or simplejson.")
+ json_lds = json_dumps
+
+
+
+# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
+# It ain't pretty but it works... Sorry for the mess.
+
+py = sys.version_info
+py3k = py >= (3,0,0)
+py25 = py < (2,6,0)
+py31 = (3,1,0) <= py < (3,2,0)
+
+# Workaround for the missing "as" keyword in py3k.
+def _e(): return sys.exc_info()[1]
+
+# Workaround for the "print is a keyword/function" Python 2/3 dilemma
+# and a fallback for mod_wsgi (resticts stdout/err attribute access)
+try:
+ _stdout, _stderr = sys.stdout.write, sys.stderr.write
+except IOError:
+ _stdout = lambda x: sys.stdout.write(x)
+ _stderr = lambda x: sys.stderr.write(x)
+
+# Lots of stdlib and builtin differences.
+if py3k:
+ import http.client as httplib
+ import _thread as thread
+ from urllib.parse import urljoin, SplitResult as UrlSplitResult
+ from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
+ urlunquote = functools.partial(urlunquote, encoding='latin1')
+ from http.cookies import SimpleCookie
+ from collections import MutableMapping as DictMixin
+ import pickle
+ from io import BytesIO
+ basestring = str
+ unicode = str
+ json_loads = lambda s: json_lds(touni(s))
+ callable = lambda x: hasattr(x, '__call__')
+ imap = map
+else: # 2.x
+ import httplib
+ import thread
+ from urlparse import urljoin, SplitResult as UrlSplitResult
+ from urllib import urlencode, quote as urlquote, unquote as urlunquote
+ from Cookie import SimpleCookie
+ from itertools import imap
+ import cPickle as pickle
+ from StringIO import StringIO as BytesIO
+ if py25:
+ from UserDict import DictMixin
+ def next(it): return it.next()
+ bytes = str
+ else: # 2.6, 2.7
+ from collections import MutableMapping as DictMixin
+ json_loads = json_lds
+
+# Some helpers for string/byte handling
+def tob(s, enc='utf8'):
+ return s.encode(enc) if isinstance(s, unicode) else bytes(s)
+def touni(s, enc='utf8', err='strict'):
+ return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
+tonat = touni if py3k else tob
+
+# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
+# 3.1 needs a workaround.
+if py31:
+ from io import TextIOWrapper
+ class NCTextIOWrapper(TextIOWrapper):
+ def close(self): pass # Keep wrapped buffer open.
+
+# File uploads (which are implemented as empty FiledStorage instances...)
+# have a negative truth value. That makes no sense, here is a fix.
+class FieldStorage(cgi.FieldStorage):
+ def __nonzero__(self): return bool(self.list or self.file)
+ if py3k: __bool__ = __nonzero__
+
+# A bug in functools causes it to break if the wrapper is an instance method
+def update_wrapper(wrapper, wrapped, *a, **ka):
+ try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
+ except AttributeError: pass
+
+
+
+# These helpers are used at module level and need to be defined first.
+# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
+
+def depr(message):
+ warnings.warn(message, DeprecationWarning, stacklevel=3)
+
+def makelist(data): # This is just to handy
+ if isinstance(data, (tuple, list, set, dict)): return list(data)
+ elif data: return [data]
+ else: return []
+
+
+class DictProperty(object):
+ ''' Property that maps to a key in a local dict-like attribute. '''
+ def __init__(self, attr, key=None, read_only=False):
+ self.attr, self.key, self.read_only = attr, key, read_only
+
+ def __call__(self, func):
+ functools.update_wrapper(self, func, updated=[])
+ self.getter, self.key = func, self.key or func.__name__
+ return self
+
+ def __get__(self, obj, cls):
+ if obj is None: return self
+ key, storage = self.key, getattr(obj, self.attr)
+ if key not in storage: storage[key] = self.getter(obj)
+ return storage[key]
+
+ def __set__(self, obj, value):
+ if self.read_only: raise AttributeError("Read-Only property.")
+ getattr(obj, self.attr)[self.key] = value
+
+ def __delete__(self, obj):
+ if self.read_only: raise AttributeError("Read-Only property.")
+ del getattr(obj, self.attr)[self.key]
+
+
+class cached_property(object):
+ ''' A property that is only computed once per instance and then replaces
+ itself with an ordinary attribute. Deleting the attribute resets the
+ property. '''
+
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, obj, cls):
+ if obj is None: return self
+ value = obj.__dict__[self.func.__name__] = self.func(obj)
+ return value
+
+
+class lazy_attribute(object):
+ ''' A property that caches itself to the class object. '''
+ def __init__(self, func):
+ functools.update_wrapper(self, func, updated=[])
+ self.getter = func
+
+ def __get__(self, obj, cls):
+ value = self.getter(cls)
+ setattr(cls, self.__name__, value)
+ return value
+
+
+
+
+
+
+###############################################################################
+# Exceptions and Events ########################################################
+###############################################################################
+
+
+class BottleException(Exception):
+ """ A base class for exceptions used by bottle. """
+ pass
+
+
+
+
+
+
+###############################################################################
+# Routing ######################################################################
+###############################################################################
+
+
+class RouteError(BottleException):
+ """ This is a base class for all routing related exceptions """
+
+
+class RouteReset(BottleException):
+ """ If raised by a plugin or request handler, the route is reset and all
+ plugins are re-applied. """
+
+class RouterUnknownModeError(RouteError): pass
+
+
+class RouteSyntaxError(RouteError):
+ """ The route parser found something not supported by this router """
+
+
+class RouteBuildError(RouteError):
+ """ The route could not been built """
+
+
+class Router(object):
+ ''' A Router is an ordered collection of route->target pairs. It is used to
+ efficiently match WSGI requests against a number of routes and return
+ the first target that satisfies the request. The target may be anything,
+ usually a string, ID or callable object. A route consists of a path-rule
+ and a HTTP method.
+
+ The path-rule is either a static path (e.g. `/contact`) or a dynamic
+ path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
+ and details on the matching order are described in docs:`routing`.
+ '''
+
+ default_pattern = '[^/]+'
+ default_filter = 're'
+ #: Sorry for the mess. It works. Trust me.
+ rule_syntax = re.compile('(\\\\*)'\
+ '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
+ '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
+ '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
+
+ def __init__(self, strict=False):
+ self.rules = {} # A {rule: Rule} mapping
+ self.builder = {} # A rule/name->build_info mapping
+ self.static = {} # Cache for static routes: {path: {method: target}}
+ self.dynamic = [] # Cache for dynamic routes. See _compile()
+ #: If true, static routes are no longer checked first.
+ self.strict_order = strict
+ self.filters = {'re': self.re_filter, 'int': self.int_filter,
+ 'float': self.float_filter, 'path': self.path_filter}
+
+ def re_filter(self, conf):
+ return conf or self.default_pattern, None, None
+
+ def int_filter(self, conf):
+ return r'-?\d+', int, lambda x: str(int(x))
+
+ def float_filter(self, conf):
+ return r'-?[\d.]+', float, lambda x: str(float(x))
+
+ def path_filter(self, conf):
+ return r'.+?', None, None
+
+ def add_filter(self, name, func):
+ ''' Add a filter. The provided function is called with the configuration
+ string as parameter and must return a (regexp, to_python, to_url) tuple.
+ The first element is a string, the last two are callables or None. '''
+ self.filters[name] = func
+
+ def parse_rule(self, rule):
+ ''' Parses a rule into a (name, filter, conf) token stream. If mode is
+ None, name contains a static rule part. '''
+ offset, prefix = 0, ''
+ for match in self.rule_syntax.finditer(rule):
+ prefix += rule[offset:match.start()]
+ g = match.groups()
+ if len(g[0])%2: # Escaped wildcard
+ prefix += match.group(0)[len(g[0]):]
+ offset = match.end()
+ continue
+ if prefix: yield prefix, None, None
+ name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
+ if not filtr: filtr = self.default_filter
+ yield name, filtr, conf or None
+ offset, prefix = match.end(), ''
+ if offset <= len(rule) or prefix:
+ yield prefix+rule[offset:], None, None
+
+ def add(self, rule, method, target, name=None):
+ ''' Add a new route or replace the target for an existing route. '''
+ if rule in self.rules:
+ self.rules[rule][method] = target
+ if name: self.builder[name] = self.builder[rule]
+ return
+
+ target = self.rules[rule] = {method: target}
+
+ # Build pattern and other structures for dynamic routes
+ anons = 0 # Number of anonymous wildcards
+ pattern = '' # Regular expression pattern
+ filters = [] # Lists of wildcard input filters
+ builder = [] # Data structure for the URL builder
+ is_static = True
+ for key, mode, conf in self.parse_rule(rule):
+ if mode:
+ is_static = False
+ mask, in_filter, out_filter = self.filters[mode](conf)
+ if key:
+ pattern += '(?P<%s>%s)' % (key, mask)
+ else:
+ pattern += '(?:%s)' % mask
+ key = 'anon%d' % anons; anons += 1
+ if in_filter: filters.append((key, in_filter))
+ builder.append((key, out_filter or str))
+ elif key:
+ pattern += re.escape(key)
+ builder.append((None, key))
+ self.builder[rule] = builder
+ if name: self.builder[name] = builder
+
+ if is_static and not self.strict_order:
+ self.static[self.build(rule)] = target
+ return
+
+ def fpat_sub(m):
+ return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
+ flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
+
+ try:
+ re_match = re.compile('^(%s)$' % pattern).match
+ except re.error:
+ raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
+
+ def match(path):
+ """ Return an url-argument dictionary. """
+ url_args = re_match(path).groupdict()
+ for name, wildcard_filter in filters:
+ try:
+ url_args[name] = wildcard_filter(url_args[name])
+ except ValueError:
+ raise HTTPError(400, 'Path has wrong format.')
+ return url_args
+
+ try:
+ combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
+ self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
+ self.dynamic[-1][1].append((match, target))
+ except (AssertionError, IndexError): # AssertionError: Too many groups
+ self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
+ [(match, target)]))
+ return match
+
+ def build(self, _name, *anons, **query):
+ ''' Build an URL by filling the wildcards in a rule. '''
+ builder = self.builder.get(_name)
+ if not builder: raise RouteBuildError("No route with that name.", _name)
+ try:
+ for i, value in enumerate(anons): query['anon%d'%i] = value
+ url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
+ return url if not query else url+'?'+urlencode(query)
+ except KeyError:
+ raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
+
+ def match(self, environ):
+ ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
+ path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
+ if path in self.static:
+ targets = self.static[path]
+ else:
+ for combined, rules in self.dynamic:
+ match = combined.match(path)
+ if not match: continue
+ getargs, targets = rules[match.lastindex - 1]
+ urlargs = getargs(path) if getargs else {}
+ break
+
+ if not targets:
+ raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
+ method = environ['REQUEST_METHOD'].upper()
+ if method in targets:
+ return targets[method], urlargs
+ if method == 'HEAD' and 'GET' in targets:
+ return targets['GET'], urlargs
+ if 'ANY' in targets:
+ return targets['ANY'], urlargs
+ allowed = [verb for verb in targets if verb != 'ANY']
+ if 'GET' in allowed and 'HEAD' not in allowed:
+ allowed.append('HEAD')
+ raise HTTPError(405, "Method not allowed.", Allow=",".join(allowed))
+
+
+class Route(object):
+ ''' This class wraps a route callback along with route specific metadata and
+ configuration and applies Plugins on demand. It is also responsible for
+ turing an URL path rule into a regular expression usable by the Router.
+ '''
+
+ def __init__(self, app, rule, method, callback, name=None,
+ plugins=None, skiplist=None, **config):
+ #: The application this route is installed to.
+ self.app = app
+ #: The path-rule string (e.g. ``/wiki/:page``).
+ self.rule = rule
+ #: The HTTP method as a string (e.g. ``GET``).
+ self.method = method
+ #: The original callback with no plugins applied. Useful for introspection.
+ self.callback = callback
+ #: The name of the route (if specified) or ``None``.
+ self.name = name or None
+ #: A list of route-specific plugins (see :meth:`Bottle.route`).
+ self.plugins = plugins or []
+ #: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
+ self.skiplist = skiplist or []
+ #: Additional keyword arguments passed to the :meth:`Bottle.route`
+ #: decorator are stored in this dictionary. Used for route-specific
+ #: plugin configuration and meta-data.
+ self.config = ConfigDict(config)
+
+ def __call__(self, *a, **ka):
+ depr("Some APIs changed to return Route() instances instead of"\
+ " callables. Make sure to use the Route.call method and not to"\
+ " call Route instances directly.")
+ return self.call(*a, **ka)
+
+ @cached_property
+ def call(self):
+ ''' The route callback with all plugins applied. This property is
+ created on demand and then cached to speed up subsequent requests.'''
+ return self._make_callback()
+
+ def reset(self):
+ ''' Forget any cached values. The next time :attr:`call` is accessed,
+ all plugins are re-applied. '''
+ self.__dict__.pop('call', None)
+
+ def prepare(self):
+ ''' Do all on-demand work immediately (useful for debugging).'''
+ self.call
+
+ @property
+ def _context(self):
+ depr('Switch to Plugin API v2 and access the Route object directly.')
+ return dict(rule=self.rule, method=self.method, callback=self.callback,
+ name=self.name, app=self.app, config=self.config,
+ apply=self.plugins, skip=self.skiplist)
+
+ def all_plugins(self):
+ ''' Yield all Plugins affecting this route. '''
+ unique = set()
+ for p in reversed(self.app.plugins + self.plugins):
+ if True in self.skiplist: break
+ name = getattr(p, 'name', False)
+ if name and (name in self.skiplist or name in unique): continue
+ if p in self.skiplist or type(p) in self.skiplist: continue
+ if name: unique.add(name)
+ yield p
+
+ def _make_callback(self):
+ callback = self.callback
+ for plugin in self.all_plugins():
+ try:
+ if hasattr(plugin, 'apply'):
+ api = getattr(plugin, 'api', 1)
+ context = self if api > 1 else self._context
+ callback = plugin.apply(callback, context)
+ else:
+ callback = plugin(callback)
+ except RouteReset: # Try again with changed configuration.
+ return self._make_callback()
+ if not callback is self.callback:
+ update_wrapper(callback, self.callback)
+ return callback
+
+ def __repr__(self):
+ return '<%s %r %r>' % (self.method, self.rule, self.callback)
+
+
+
+
+
+
+###############################################################################
+# Application Object ###########################################################
+###############################################################################
+
+
+class Bottle(object):
+ """ Each Bottle object represents a single, distinct web application and
+ consists of routes, callbacks, plugins, resources and configuration.
+ Instances are callable WSGI applications.
+
+ :param catchall: If true (default), handle all exceptions. Turn off to
+ let debugging middleware handle exceptions.
+ """
+
+ def __init__(self, catchall=True, autojson=True):
+ #: If true, most exceptions are caught and returned as :exc:`HTTPError`
+ self.catchall = catchall
+
+ #: A :class:`ResourceManager` for application files
+ self.resources = ResourceManager()
+
+ #: A :class:`ConfigDict` for app specific configuration.
+ self.config = ConfigDict()
+ self.config.autojson = autojson
+
+ self.routes = [] # List of installed :class:`Route` instances.
+ self.router = Router() # Maps requests to :class:`Route` instances.
+ self.error_handler = {}
+
+ # Core plugins
+ self.plugins = [] # List of installed plugins.
+ self.hooks = HooksPlugin()
+ self.install(self.hooks)
+ if self.config.autojson:
+ self.install(JSONPlugin())
+ self.install(TemplatePlugin())
+
+
+ def mount(self, prefix, app, **options):
+ ''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
+ URL prefix. Example::
+
+ root_app.mount('/admin/', admin_app)
+
+ :param prefix: path prefix or `mount-point`. If it ends in a slash,
+ that slash is mandatory.
+ :param app: an instance of :class:`Bottle` or a WSGI application.
+
+ All other parameters are passed to the underlying :meth:`route` call.
+ '''
+ if isinstance(app, basestring):
+ prefix, app = app, prefix
+ depr('Parameter order of Bottle.mount() changed.') # 0.10
+
+ segments = [p for p in prefix.split('/') if p]
+ if not segments: raise ValueError('Empty path prefix.')
+ path_depth = len(segments)
+
+ def mountpoint_wrapper():
+ try:
+ request.path_shift(path_depth)
+ rs = BaseResponse([], 200)
+ def start_response(status, header):
+ rs.status = status
+ for name, value in header: rs.add_header(name, value)
+ return rs.body.append
+ body = app(request.environ, start_response)
+ body = itertools.chain(rs.body, body)
+ return HTTPResponse(body, rs.status_code, **rs.headers)
+ finally:
+ request.path_shift(-path_depth)
+
+ options.setdefault('skip', True)
+ options.setdefault('method', 'ANY')
+ options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
+ options['callback'] = mountpoint_wrapper
+
+ self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
+ if not prefix.endswith('/'):
+ self.route('/' + '/'.join(segments), **options)
+
+ def merge(self, routes):
+ ''' Merge the routes of another :class:`Bottle` application or a list of
+ :class:`Route` objects into this application. The routes keep their
+ 'owner', meaning that the :data:`Route.app` attribute is not
+ changed. '''
+ if isinstance(routes, Bottle):
+ routes = routes.routes
+ for route in routes:
+ self.add_route(route)
+
+ def install(self, plugin):
+ ''' Add a plugin to the list of plugins and prepare it for being
+ applied to all routes of this application. A plugin may be a simple
+ decorator or an object that implements the :class:`Plugin` API.
+ '''
+ if hasattr(plugin, 'setup'): plugin.setup(self)
+ if not callable(plugin) and not hasattr(plugin, 'apply'):
+ raise TypeError("Plugins must be callable or implement .apply()")
+ self.plugins.append(plugin)
+ self.reset()
+ return plugin
+
+ def uninstall(self, plugin):
+ ''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
+ object to remove all plugins that match that type, a string to remove
+ all plugins with a matching ``name`` attribute or ``True`` to remove all
+ plugins. Return the list of removed plugins. '''
+ removed, remove = [], plugin
+ for i, plugin in list(enumerate(self.plugins))[::-1]:
+ if remove is True or remove is plugin or remove is type(plugin) \
+ or getattr(plugin, 'name', True) == remove:
+ removed.append(plugin)
+ del self.plugins[i]
+ if hasattr(plugin, 'close'): plugin.close()
+ if removed: self.reset()
+ return removed
+
+ def run(self, **kwargs):
+ ''' Calls :func:`run` with the same parameters. '''
+ run(self, **kwargs)
+
+ def reset(self, route=None):
+ ''' Reset all routes (force plugins to be re-applied) and clear all
+ caches. If an ID or route object is given, only that specific route
+ is affected. '''
+ if route is None: routes = self.routes
+ elif isinstance(route, Route): routes = [route]
+ else: routes = [self.routes[route]]
+ for route in routes: route.reset()
+ if DEBUG:
+ for route in routes: route.prepare()
+ self.hooks.trigger('app_reset')
+
+ def close(self):
+ ''' Close the application and all installed plugins. '''
+ for plugin in self.plugins:
+ if hasattr(plugin, 'close'): plugin.close()
+ self.stopped = True
+
+ def match(self, environ):
+ """ Search for a matching route and return a (:class:`Route` , urlargs)
+ tuple. The second value is a dictionary with parameters extracted
+ from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
+ return self.router.match(environ)
+
+ def get_url(self, routename, **kargs):
+ """ Return a string that matches a named route """
+ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
+ location = self.router.build(routename, **kargs).lstrip('/')
+ return urljoin(urljoin('/', scriptname), location)
+
+ def add_route(self, route):
+ ''' Add a route object, but do not change the :data:`Route.app`
+ attribute.'''
+ self.routes.append(route)
+ self.router.add(route.rule, route.method, route, name=route.name)
+ if DEBUG: route.prepare()
+
+ def route(self, path=None, method='GET', callback=None, name=None,
+ apply=None, skip=None, **config):
+ """ A decorator to bind a function to a request URL. Example::
+
+ @app.route('/hello/:name')
+ def hello(name):
+ return 'Hello %s' % name
+
+ The ``:name`` part is a wildcard. See :class:`Router` for syntax
+ details.
+
+ :param path: Request path or a list of paths to listen to. If no
+ path is specified, it is automatically generated from the
+ signature of the function.
+ :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
+ methods to listen to. (default: `GET`)
+ :param callback: An optional shortcut to avoid the decorator
+ syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
+ :param name: The name for this route. (default: None)
+ :param apply: A decorator or plugin or a list of plugins. These are
+ applied to the route callback in addition to installed plugins.
+ :param skip: A list of plugins, plugin classes or names. Matching
+ plugins are not installed to this route. ``True`` skips all.
+
+ Any additional keyword arguments are stored as route-specific
+ configuration and passed to plugins (see :meth:`Plugin.apply`).
+ """
+ if callable(path): path, callback = None, path
+ plugins = makelist(apply)
+ skiplist = makelist(skip)
+ def decorator(callback):
+ # TODO: Documentation and tests
+ if isinstance(callback, basestring): callback = load(callback)
+ for rule in makelist(path) or yieldroutes(callback):
+ for verb in makelist(method):
+ verb = verb.upper()
+ route = Route(self, rule, verb, callback, name=name,
+ plugins=plugins, skiplist=skiplist, **config)
+ self.add_route(route)
+ return callback
+ return decorator(callback) if callback else decorator
+
+ def get(self, path=None, method='GET', **options):
+ """ Equals :meth:`route`. """
+ return self.route(path, method, **options)
+
+ def post(self, path=None, method='POST', **options):
+ """ Equals :meth:`route` with a ``POST`` method parameter. """
+ return self.route(path, method, **options)
+
+ def put(self, path=None, method='PUT', **options):
+ """ Equals :meth:`route` with a ``PUT`` method parameter. """
+ return self.route(path, method, **options)
+
+ def delete(self, path=None, method='DELETE', **options):
+ """ Equals :meth:`route` with a ``DELETE`` method parameter. """
+ return self.route(path, method, **options)
+
+ def error(self, code=500):
+ """ Decorator: Register an output handler for a HTTP error code"""
+ def wrapper(handler):
+ self.error_handler[int(code)] = handler
+ return handler
+ return wrapper
+
+ def hook(self, name):
+ """ Return a decorator that attaches a callback to a hook. Three hooks
+ are currently implemented:
+
+ - before_request: Executed once before each request
+ - after_request: Executed once after each request
+ - app_reset: Called whenever :meth:`reset` is called.
+ """
+ def wrapper(func):
+ self.hooks.add(name, func)
+ return func
+ return wrapper
+
+ def handle(self, path, method='GET'):
+ """ (deprecated) Execute the first matching route callback and return
+ the result. :exc:`HTTPResponse` exceptions are caught and returned.
+ If :attr:`Bottle.catchall` is true, other exceptions are caught as
+ well and returned as :exc:`HTTPError` instances (500).
+ """
+ depr("This method will change semantics in 0.10. Try to avoid it.")
+ if isinstance(path, dict):
+ return self._handle(path)
+ return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
+
+ def default_error_handler(self, res):
+ return tob(template(ERROR_PAGE_TEMPLATE, e=res))
+
+ def _handle(self, environ):
+ try:
+ environ['bottle.app'] = self
+ request.bind(environ)
+ response.bind()
+ route, args = self.router.match(environ)
+ environ['route.handle'] = route
+ environ['bottle.route'] = route
+ environ['route.url_args'] = args
+ return route.call(**args)
+ except HTTPResponse:
+ return _e()
+ except RouteReset:
+ route.reset()
+ return self._handle(environ)
+ except (KeyboardInterrupt, SystemExit, MemoryError):
+ raise
+ except Exception:
+ if not self.catchall: raise
+ stacktrace = format_exc()
+ environ['wsgi.errors'].write(stacktrace)
+ return HTTPError(500, "Internal Server Error", _e(), stacktrace)
+
+ def _cast(self, out, peek=None):
+ """ Try to convert the parameter into something WSGI compatible and set
+ correct HTTP headers when possible.
+ Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
+ iterable of strings and iterable of unicodes
+ """
+
+ # Empty output is done here
+ if not out:
+ if 'Content-Length' not in response:
+ response['Content-Length'] = 0
+ return []
+ # Join lists of byte or unicode strings. Mixed lists are NOT supported
+ if isinstance(out, (tuple, list))\
+ and isinstance(out[0], (bytes, unicode)):
+ out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
+ # Encode unicode strings
+ if isinstance(out, unicode):
+ out = out.encode(response.charset)
+ # Byte Strings are just returned
+ if isinstance(out, bytes):
+ if 'Content-Length' not in response:
+ response['Content-Length'] = len(out)
+ return [out]
+ # HTTPError or HTTPException (recursive, because they may wrap anything)
+ # TODO: Handle these explicitly in handle() or make them iterable.
+ if isinstance(out, HTTPError):
+ out.apply(response)
+ out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
+ return self._cast(out)
+ if isinstance(out, HTTPResponse):
+ out.apply(response)
+ return self._cast(out.body)
+
+ # File-like objects.
+ if hasattr(out, 'read'):
+ if 'wsgi.file_wrapper' in request.environ:
+ return request.environ['wsgi.file_wrapper'](out)
+ elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
+ return WSGIFileWrapper(out)
+
+ # Handle Iterables. We peek into them to detect their inner type.
+ try:
+ out = iter(out)
+ first = next(out)
+ while not first:
+ first = next(out)
+ except StopIteration:
+ return self._cast('')
+ except HTTPResponse:
+ first = _e()
+ except (KeyboardInterrupt, SystemExit, MemoryError):
+ raise
+ except Exception:
+ if not self.catchall: raise
+ first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
+
+ # These are the inner types allowed in iterator or generator objects.
+ if isinstance(first, HTTPResponse):
+ return self._cast(first)
+ if isinstance(first, bytes):
+ return itertools.chain([first], out)
+ if isinstance(first, unicode):
+ return imap(lambda x: x.encode(response.charset),
+ itertools.chain([first], out))
+ return self._cast(HTTPError(500, 'Unsupported response type: %s'\
+ % type(first)))
+
+ def wsgi(self, environ, start_response):
+ """ The bottle WSGI-interface. """
+ try:
+ out = self._cast(self._handle(environ))
+ # rfc2616 section 4.3
+ if response._status_code in (100, 101, 204, 304)\
+ or environ['REQUEST_METHOD'] == 'HEAD':
+ if hasattr(out, 'close'): out.close()
+ out = []
+ start_response(response._status_line, response.headerlist)
+ return out
+ except (KeyboardInterrupt, SystemExit, MemoryError):
+ raise
+ except Exception:
+ if not self.catchall: raise
+ err = '<h1>Critical error while processing request: %s</h1>' \
+ % html_escape(environ.get('PATH_INFO', '/'))
+ if DEBUG:
+ err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
+ '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
+ % (html_escape(repr(_e())), html_escape(format_exc()))
+ environ['wsgi.errors'].write(err)
+ headers = [('Content-Type', 'text/html; charset=UTF-8')]
+ start_response('500 INTERNAL SERVER ERROR', headers)
+ return [tob(err)]
+
+ def __call__(self, environ, start_response):
+ ''' Each instance of :class:'Bottle' is a WSGI application. '''
+ return self.wsgi(environ, start_response)
+
+
+
+
+
+
+###############################################################################
+# HTTP and WSGI Tools ##########################################################
+###############################################################################
+
+
+class BaseRequest(object):
+ """ A wrapper for WSGI environment dictionaries that adds a lot of
+ convenient access methods and properties. Most of them are read-only.
+
+ Adding new attributes to a request actually adds them to the environ
+ dictionary (as 'bottle.request.ext.<name>'). This is the recommended
+ way to store and access request-specific data.
+ """
+
+ __slots__ = ('environ')
+
+ #: Maximum size of memory buffer for :attr:`body` in bytes.
+ MEMFILE_MAX = 102400
+ #: Maximum number pr GET or POST parameters per request
+ MAX_PARAMS = 100
+
+ def __init__(self, environ=None):
+ """ Wrap a WSGI environ dictionary. """
+ #: The wrapped WSGI environ dictionary. This is the only real attribute.
+ #: All other attributes actually are read-only properties.
+ self.environ = {} if environ is None else environ
+ self.environ['bottle.request'] = self
+
+ @DictProperty('environ', 'bottle.app', read_only=True)
+ def app(self):
+ ''' Bottle application handling this request. '''
+ raise RuntimeError('This request is not connected to an application.')
+
+ @property
+ def path(self):
+ ''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
+ broken clients and avoid the "empty path" edge case). '''
+ return '/' + self.environ.get('PATH_INFO','').lstrip('/')
+
+ @property
+ def method(self):
+ ''' The ``REQUEST_METHOD`` value as an uppercase string. '''
+ return self.environ.get('REQUEST_METHOD', 'GET').upper()
+
+ @DictProperty('environ', 'bottle.request.headers', read_only=True)
+ def headers(self):
+ ''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
+ HTTP request headers. '''
+ return WSGIHeaderDict(self.environ)
+
+ def get_header(self, name, default=None):
+ ''' Return the value of a request header, or a given default value. '''
+ return self.headers.get(name, default)
+
+ @DictProperty('environ', 'bottle.request.cookies', read_only=True)
+ def cookies(self):
+ """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
+ decoded. Use :meth:`get_cookie` if you expect signed cookies. """
+ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
+ cookies = list(cookies.values())[:self.MAX_PARAMS]
+ return FormsDict((c.key, c.value) for c in cookies)
+
+ def get_cookie(self, key, default=None, secret=None):
+ """ Return the content of a cookie. To read a `Signed Cookie`, the
+ `secret` must match the one used to create the cookie (see
+ :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
+ cookie or wrong signature), return a default value. """
+ value = self.cookies.get(key)
+ if secret and value:
+ dec = cookie_decode(value, secret) # (key, value) tuple or None
+ return dec[1] if dec and dec[0] == key else default
+ return value or default
+
+ @DictProperty('environ', 'bottle.request.query', read_only=True)
+ def query(self):
+ ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
+ values are sometimes called "URL arguments" or "GET parameters", but
+ not to be confused with "URL wildcards" as they are provided by the
+ :class:`Router`. '''
+ get = self.environ['bottle.get'] = FormsDict()
+ pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
+ for key, value in pairs[:self.MAX_PARAMS]:
+ get[key] = value
+ return get
+
+ @DictProperty('environ', 'bottle.request.forms', read_only=True)
+ def forms(self):
+ """ Form values parsed from an `url-encoded` or `multipart/form-data`
+ encoded POST or PUT request body. The result is retuned as a
+ :class:`FormsDict`. All keys and values are strings. File uploads
+ are stored separately in :attr:`files`. """
+ forms = FormsDict()
+ for name, item in self.POST.allitems():
+ if not hasattr(item, 'filename'):
+ forms[name] = item
+ return forms
+
+ @DictProperty('environ', 'bottle.request.params', read_only=True)
+ def params(self):
+ """ A :class:`FormsDict` with the combined values of :attr:`query` and
+ :attr:`forms`. File uploads are stored in :attr:`files`. """
+ params = FormsDict()
+ for key, value in self.query.allitems():
+ params[key] = value
+ for key, value in self.forms.allitems():
+ params[key] = value
+ return params
+
+ @DictProperty('environ', 'bottle.request.files', read_only=True)
+ def files(self):
+ """ File uploads parsed from an `url-encoded` or `multipart/form-data`
+ encoded POST or PUT request body. The values are instances of
+ :class:`cgi.FieldStorage`. The most important attributes are:
+
+ filename
+ The filename, if specified; otherwise None; this is the client
+ side filename, *not* the file name on which it is stored (that's
+ a temporary file you don't deal with)
+ file
+ The file(-like) object from which you can read the data.
+ value
+ The value as a *string*; for file uploads, this transparently
+ reads the file every time you request the value. Do not do this
+ on big files.
+ """
+ files = FormsDict()
+ for name, item in self.POST.allitems():
+ if hasattr(item, 'filename'):
+ files[name] = item
+ return files
+
+ @DictProperty('environ', 'bottle.request.json', read_only=True)
+ def json(self):
+ ''' If the ``Content-Type`` header is ``application/json``, this
+ property holds the parsed content of the request body. Only requests
+ smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
+ exhaustion. '''
+ if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
+ and 0 < self.content_length < self.MEMFILE_MAX:
+ return json_loads(self.body.read(self.MEMFILE_MAX))
+ return None
+
+ @DictProperty('environ', 'bottle.request.body', read_only=True)
+ def _body(self):
+ maxread = max(0, self.content_length)
+ stream = self.environ['wsgi.input']
+ body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
+ while maxread > 0:
+ part = stream.read(min(maxread, self.MEMFILE_MAX))
+ if not part: break
+ body.write(part)
+ maxread -= len(part)
+ self.environ['wsgi.input'] = body
+ body.seek(0)
+ return body
+
+ @property
+ def body(self):
+ """ The HTTP request body as a seek-able file-like object. Depending on
+ :attr:`MEMFILE_MAX`, this is either a temporary file or a
+ :class:`io.BytesIO` instance. Accessing this property for the first
+ time reads and replaces the ``wsgi.input`` environ variable.
+ Subsequent accesses just do a `seek(0)` on the file object. """
+ self._body.seek(0)
+ return self._body
+
+ #: An alias for :attr:`query`.
+ GET = query
+
+ @DictProperty('environ', 'bottle.request.post', read_only=True)
+ def POST(self):
+ """ The values of :attr:`forms` and :attr:`files` combined into a single
+ :class:`FormsDict`. Values are either strings (form values) or
+ instances of :class:`cgi.FieldStorage` (file uploads).
+ """
+ post = FormsDict()
+ # We default to application/x-www-form-urlencoded for everything that
+ # is not multipart and take the fast path (also: 3.1 workaround)
+ if not self.content_type.startswith('multipart/'):
+ maxlen = max(0, min(self.content_length, self.MEMFILE_MAX))
+ pairs = _parse_qsl(tonat(self.body.read(maxlen), 'latin1'))
+ for key, value in pairs[:self.MAX_PARAMS]:
+ post[key] = value
+ return post
+
+ safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
+ for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
+ if key in self.environ: safe_env[key] = self.environ[key]
+ args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
+ if py31:
+ args['fp'] = NCTextIOWrapper(args['fp'], encoding='ISO-8859-1',
+ newline='\n')
+ elif py3k:
+ args['encoding'] = 'ISO-8859-1'
+ data = FieldStorage(**args)
+ for item in (data.list or [])[:self.MAX_PARAMS]:
+ post[item.name] = item if item.filename else item.value
+ return post
+
+ @property
+ def COOKIES(self):
+ ''' Alias for :attr:`cookies` (deprecated). '''
+ depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
+ return self.cookies
+
+ @property
+ def url(self):
+ """ The full request URI including hostname and scheme. If your app
+ lives behind a reverse proxy or load balancer and you get confusing
+ results, make sure that the ``X-Forwarded-Host`` header is set
+ correctly. """
+ return self.urlparts.geturl()
+
+ @DictProperty('environ', 'bottle.request.urlparts', read_only=True)
+ def urlparts(self):
+ ''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
+ The tuple contains (scheme, host, path, query_string and fragment),
+ but the fragment is always empty because it is not visible to the
+ server. '''
+ env = self.environ
+ http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
+ host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
+ if not host:
+ # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
+ host = env.get('SERVER_NAME', '127.0.0.1')
+ port = env.get('SERVER_PORT')
+ if port and port != ('80' if http == 'http' else '443'):
+ host += ':' + port
+ path = urlquote(self.fullpath)
+ return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
+
+ @property
+ def fullpath(self):
+ """ Request path including :attr:`script_name` (if present). """
+ return urljoin(self.script_name, self.path.lstrip('/'))
+
+ @property
+ def query_string(self):
+ """ The raw :attr:`query` part of the URL (everything in between ``?``
+ and ``#``) as a string. """
+ return self.environ.get('QUERY_STRING', '')
+
+ @property
+ def script_name(self):
+ ''' The initial portion of the URL's `path` that was removed by a higher
+ level (server or routing middleware) before the application was
+ called. This script path is returned with leading and tailing
+ slashes. '''
+ script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
+ return '/' + script_name + '/' if script_name else '/'
+
+ def path_shift(self, shift=1):
+ ''' Shift path segments from :attr:`path` to :attr:`script_name` and
+ vice versa.
+
+ :param shift: The number of path segments to shift. May be negative
+ to change the shift direction. (default: 1)
+ '''
+ script = self.environ.get('SCRIPT_NAME','/')
+ self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
+
+ @property
+ def content_length(self):
+ ''' The request body length as an integer. The client is responsible to
+ set this header. Otherwise, the real length of the body is unknown
+ and -1 is returned. In this case, :attr:`body` will be empty. '''
+ return int(self.environ.get('CONTENT_LENGTH') or -1)
+
+ @property
+ def content_type(self):
+ ''' The Content-Type header as a lowercase-string (default: empty). '''
+ return self.environ.get('CONTENT_TYPE', '').lower()
+
+ @property
+ def is_xhr(self):
+ ''' True if the request was triggered by a XMLHttpRequest. This only
+ works with JavaScript libraries that support the `X-Requested-With`
+ header (most of the popular libraries do). '''
+ requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
+ return requested_with.lower() == 'xmlhttprequest'
+
+ @property
+ def is_ajax(self):
+ ''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
+ return self.is_xhr
+
+ @property
+ def auth(self):
+ """ HTTP authentication data as a (user, password) tuple. This
+ implementation currently supports basic (not digest) authentication
+ only. If the authentication happened at a higher level (e.g. in the
+ front web-server or a middleware), the password field is None, but
+ the user field is looked up from the ``REMOTE_USER`` environ
+ variable. On any errors, None is returned. """
+ basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
+ if basic: return basic
+ ruser = self.environ.get('REMOTE_USER')
+ if ruser: return (ruser, None)
+ return None
+
+ @property
+ def remote_route(self):
+ """ A list of all IPs that were involved in this request, starting with
+ the client IP and followed by zero or more proxies. This does only
+ work if all proxies support the ```X-Forwarded-For`` header. Note
+ that this information can be forged by malicious clients. """
+ proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
+ if proxy: return [ip.strip() for ip in proxy.split(',')]
+ remote = self.environ.get('REMOTE_ADDR')
+ return [remote] if remote else []
+
+ @property
+ def remote_addr(self):
+ """ The client IP as a string. Note that this information can be forged
+ by malicious clients. """
+ route = self.remote_route
+ return route[0] if route else None
+
+ def copy(self):
+ """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """
+ return Request(self.environ.copy())
+
+ def get(self, value, default=None): return self.environ.get(value, default)
+ def __getitem__(self, key): return self.environ[key]
+ def __delitem__(self, key): self[key] = ""; del(self.environ[key])
+ def __iter__(self): return iter(self.environ)
+ def __len__(self): return len(self.environ)
+ def keys(self): return self.environ.keys()
+ def __setitem__(self, key, value):
+ """ Change an environ value and clear all caches that depend on it. """
+
+ if self.environ.get('bottle.request.readonly'):
+ raise KeyError('The environ dictionary is read-only.')
+
+ self.environ[key] = value
+ todelete = ()
+
+ if key == 'wsgi.input':
+ todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
+ elif key == 'QUERY_STRING':
+ todelete = ('query', 'params')
+ elif key.startswith('HTTP_'):
+ todelete = ('headers', 'cookies')
+
+ for key in todelete:
+ self.environ.pop('bottle.request.'+key, None)
+
+ def __repr__(self):
+ return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
+
+ def __getattr__(self, name):
+ ''' Search in self.environ for additional user defined attributes. '''
+ try:
+ var = self.environ['bottle.request.ext.%s'%name]
+ return var.__get__(self) if hasattr(var, '__get__') else var
+ except KeyError:
+ raise AttributeError('Attribute %r not defined.' % name)
+
+ def __setattr__(self, name, value):
+ if name == 'environ': return object.__setattr__(self, name, value)
+ self.environ['bottle.request.ext.%s'%name] = value
+
+
+
+
+def _hkey(s):
+ return s.title().replace('_','-')
+
+
+class HeaderProperty(object):
+ def __init__(self, name, reader=None, writer=str, default=''):
+ self.name, self.default = name, default
+ self.reader, self.writer = reader, writer
+ self.__doc__ = 'Current value of the %r header.' % name.title()
+
+ def __get__(self, obj, cls):
+ if obj is None: return self
+ value = obj.headers.get(self.name, self.default)
+ return self.reader(value) if self.reader else value
+
+ def __set__(self, obj, value):
+ obj.headers[self.name] = self.writer(value)
+
+ def __delete__(self, obj):
+ del obj.headers[self.name]
+
+
+class BaseResponse(object):
+ """ Storage class for a response body as well as headers and cookies.
+
+ This class does support dict-like case-insensitive item-access to
+ headers, but is NOT a dict. Most notably, iterating over a response
+ yields parts of the body and not the headers.
+ """
+
+ default_status = 200
+ default_content_type = 'text/html; charset=UTF-8'
+
+ # Header blacklist for specific response codes
+ # (rfc2616 section 10.2.3 and 10.3.5)
+ bad_headers = {
+ 204: set(('Content-Type',)),
+ 304: set(('Allow', 'Content-Encoding', 'Content-Language',
+ 'Content-Length', 'Content-Range', 'Content-Type',
+ 'Content-Md5', 'Last-Modified'))}
+
+ def __init__(self, body='', status=None, **headers):
+ self._cookies = None
+ self._headers = {'Content-Type': [self.default_content_type]}
+ self.body = body
+ self.status = status or self.default_status
+ if headers:
+ for name, value in headers.items():
+ self[name] = value
+
+ def copy(self):
+ ''' Returns a copy of self. '''
+ copy = Response()
+ copy.status = self.status
+ copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
+ return copy
+
+ def __iter__(self):
+ return iter(self.body)
+
+ def close(self):
+ if hasattr(self.body, 'close'):
+ self.body.close()
+
+ @property
+ def status_line(self):
+ ''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
+ return self._status_line
+
+ @property
+ def status_code(self):
+ ''' The HTTP status code as an integer (e.g. 404).'''
+ return self._status_code
+
+ def _set_status(self, status):
+ if isinstance(status, int):
+ code, status = status, _HTTP_STATUS_LINES.get(status)
+ elif ' ' in status:
+ status = status.strip()
+ code = int(status.split()[0])
+ else:
+ raise ValueError('String status line without a reason phrase.')
+ if not 100 <= code <= 999: raise ValueError('Status code out of range.')
+ self._status_code = code
+ self._status_line = str(status or ('%d Unknown' % code))
+
+ def _get_status(self):
+ return self._status_line
+
+ status = property(_get_status, _set_status, None,
+ ''' A writeable property to change the HTTP response status. It accepts
+ either a numeric code (100-999) or a string with a custom reason
+ phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
+ :data:`status_code` are updated accordingly. The return value is
+ always a status string. ''')
+ del _get_status, _set_status
+
+ @property
+ def headers(self):
+ ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
+ view on the response headers. '''
+ hdict = HeaderDict()
+ hdict.dict = self._headers
+ return hdict
+
+ def __contains__(self, name): return _hkey(name) in self._headers
+ def __delitem__(self, name): del self._headers[_hkey(name)]
+ def __getitem__(self, name): return self._headers[_hkey(name)][-1]
+ def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
+
+ def get_header(self, name, default=None):
+ ''' Return the value of a previously defined header. If there is no
+ header with that name, return a default value. '''
+ return self._headers.get(_hkey(name), [default])[-1]
+
+ def set_header(self, name, value):
+ ''' Create a new response header, replacing any previously defined
+ headers with the same name. '''
+ self._headers[_hkey(name)] = [str(value)]
+
+ def add_header(self, name, value):
+ ''' Add an additional response header, not removing duplicates. '''
+ self._headers.setdefault(_hkey(name), []).append(str(value))
+
+ def iter_headers(self):
+ ''' Yield (header, value) tuples, skipping headers that are not
+ allowed with the current response status code. '''
+ return self.headerlist
+
+ def wsgiheader(self):
+ depr('The wsgiheader method is deprecated. See headerlist.') #0.10
+ return self.headerlist
+
+ @property
+ def headerlist(self):
+ ''' WSGI conform list of (header, value) tuples. '''
+ out = []
+ headers = self._headers.items()
+ if self._status_code in self.bad_headers:
+ bad_headers = self.bad_headers[self._status_code]
+ headers = [h for h in headers if h[0] not in bad_headers]
+ out += [(name, val) for name, vals in headers for val in vals]
+ if self._cookies:
+ for c in self._cookies.values():
+ out.append(('Set-Cookie', c.OutputString()))
+ return out
+
+ content_type = HeaderProperty('Content-Type')
+ content_length = HeaderProperty('Content-Length', reader=int)
+
+ @property
+ def charset(self):
+ """ Return the charset specified in the content-type header (default: utf8). """
+ if 'charset=' in self.content_type:
+ return self.content_type.split('charset=')[-1].split(';')[0].strip()
+ return 'UTF-8'
+
+ @property
+ def COOKIES(self):
+ """ A dict-like SimpleCookie instance. This should not be used directly.
+ See :meth:`set_cookie`. """
+ depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
+ if not self._cookies:
+ self._cookies = SimpleCookie()
+ return self._cookies
+
+ def set_cookie(self, name, value, secret=None, **options):
+ ''' Create a new cookie or replace an old one. If the `secret` parameter is
+ set, create a `Signed Cookie` (described below).
+
+ :param name: the name of the cookie.
+ :param value: the value of the cookie.
+ :param secret: a signature key required for signed cookies.
+
+ Additionally, this method accepts all RFC 2109 attributes that are
+ supported by :class:`cookie.Morsel`, including:
+
+ :param max_age: maximum age in seconds. (default: None)
+ :param expires: a datetime object or UNIX timestamp. (default: None)
+ :param domain: the domain that is allowed to read the cookie.
+ (default: current domain)
+ :param path: limits the cookie to a given path (default: current path)
+ :param secure: limit the cookie to HTTPS connections (default: off).
+ :param httponly: prevents client-side javascript to read this cookie
+ (default: off, requires Python 2.6 or newer).
+
+ If neither `expires` nor `max_age` is set (default), the cookie will
+ expire at the end of the browser session (as soon as the browser
+ window is closed).
+
+ Signed cookies may store any pickle-able object and are
+ cryptographically signed to prevent manipulation. Keep in mind that
+ cookies are limited to 4kb in most browsers.
+
+ Warning: Signed cookies are not encrypted (the client can still see
+ the content) and not copy-protected (the client can restore an old
+ cookie). The main intention is to make pickling and unpickling
+ save, not to store secret information at client side.
+ '''
+ if not self._cookies:
+ self._cookies = SimpleCookie()
+
+ if secret:
+ value = touni(cookie_encode((name, value), secret))
+ elif not isinstance(value, basestring):
+ raise TypeError('Secret key missing for non-string Cookie.')
+
+ if len(value) > 4096: raise ValueError('Cookie value to long.')
+ self._cookies[name] = value
+
+ for key, value in options.items():
+ if key == 'max_age':
+ if isinstance(value, timedelta):
+ value = value.seconds + value.days * 24 * 3600
+ if key == 'expires':
+ if isinstance(value, (datedate, datetime)):
+ value = value.timetuple()
+ elif isinstance(value, (int, float)):
+ value = time.gmtime(value)
+ value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
+ self._cookies[name][key.replace('_', '-')] = value
+
+ def delete_cookie(self, key, **kwargs):
+ ''' Delete a cookie. Be sure to use the same `domain` and `path`
+ settings as used to create the cookie. '''
+ kwargs['max_age'] = -1
+ kwargs['expires'] = 0
+ self.set_cookie(key, '', **kwargs)
+
+ def __repr__(self):
+ out = ''
+ for name, value in self.headerlist:
+ out += '%s: %s\n' % (name.title(), value.strip())
+ return out
+
+#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse`
+#: attributes.
+_lctx = threading.local()
+
+def local_property(name):
+ def fget(self):
+ try:
+ return getattr(_lctx, name)
+ except AttributeError:
+ raise RuntimeError("Request context not initialized.")
+ def fset(self, value): setattr(_lctx, name, value)
+ def fdel(self): delattr(_lctx, name)
+ return property(fget, fset, fdel,
+ 'Thread-local property stored in :data:`_lctx.%s`' % name)
+
+
+class LocalRequest(BaseRequest):
+ ''' A thread-local subclass of :class:`BaseRequest` with a different
+ set of attribues for each thread. There is usually only one global
+ instance of this class (:data:`request`). If accessed during a
+ request/response cycle, this instance always refers to the *current*
+ request (even on a multithreaded server). '''
+ bind = BaseRequest.__init__
+ environ = local_property('request_environ')
+
+
+class LocalResponse(BaseResponse):
+ ''' A thread-local subclass of :class:`BaseResponse` with a different
+ set of attribues for each thread. There is usually only one global
+ instance of this class (:data:`response`). Its attributes are used
+ to build the HTTP response at the end of the request/response cycle.
+ '''
+ bind = BaseResponse.__init__
+ _status_line = local_property('response_status_line')
+ _status_code = local_property('response_status_code')
+ _cookies = local_property('response_cookies')
+ _headers = local_property('response_headers')
+ body = local_property('response_body')
+
+Request = BaseRequest
+Response = BaseResponse
+
+class HTTPResponse(Response, BottleException):
+ def __init__(self, body='', status=None, header=None, **headers):
+ if header or 'output' in headers:
+ depr('Call signature changed (for the better)')
+ if header: headers.update(header)
+ if 'output' in headers: body = headers.pop('output')
+ super(HTTPResponse, self).__init__(body, status, **headers)
+
+ def apply(self, response):
+ response._status_code = self._status_code
+ response._status_line = self._status_line
+ response._headers = self._headers
+ response._cookies = self._cookies
+ response.body = self.body
+
+ def _output(self, value=None):
+ depr('Use HTTPResponse.body instead of HTTPResponse.output')
+ if value is None: return self.body
+ self.body = value
+
+ output = property(_output, _output, doc='Alias for .body')
+
+class HTTPError(HTTPResponse):
+ default_status = 500
+ def __init__(self, status=None, body=None, exception=None, traceback=None, header=None, **headers):
+ self.exception = exception
+ self.traceback = traceback
+ super(HTTPError, self).__init__(body, status, header, **headers)
+
+
+
+
+
+###############################################################################
+# Plugins ######################################################################
+###############################################################################
+
+class PluginError(BottleException): pass
+
+class JSONPlugin(object):
+ name = 'json'
+ api = 2
+
+ def __init__(self, json_dumps=json_dumps):
+ self.json_dumps = json_dumps
+
+ def apply(self, callback, route):
+ dumps = self.json_dumps
+ if not dumps: return callback
+ def wrapper(*a, **ka):
+ rv = callback(*a, **ka)
+ if isinstance(rv, dict):
+ #Attempt to serialize, raises exception on failure
+ json_response = dumps(rv)
+ #Set content type only if serialization succesful
+ response.content_type = 'application/json'
+ return json_response
+ return rv
+ return wrapper
+
+
+class HooksPlugin(object):
+ name = 'hooks'
+ api = 2
+
+ _names = 'before_request', 'after_request', 'app_reset'
+
+ def __init__(self):
+ self.hooks = dict((name, []) for name in self._names)
+ self.app = None
+
+ def _empty(self):
+ return not (self.hooks['before_request'] or self.hooks['after_request'])
+
+ def setup(self, app):
+ self.app = app
+
+ def add(self, name, func):
+ ''' Attach a callback to a hook. '''
+ was_empty = self._empty()
+ self.hooks.setdefault(name, []).append(func)
+ if self.app and was_empty and not self._empty(): self.app.reset()
+
+ def remove(self, name, func):
+ ''' Remove a callback from a hook. '''
+ was_empty = self._empty()
+ if name in self.hooks and func in self.hooks[name]:
+ self.hooks[name].remove(func)
+ if self.app and not was_empty and self._empty(): self.app.reset()
+
+ def trigger(self, name, *a, **ka):
+ ''' Trigger a hook and return a list of results. '''
+ hooks = self.hooks[name]
+ if ka.pop('reversed', False): hooks = hooks[::-1]
+ return [hook(*a, **ka) for hook in hooks]
+
+ def apply(self, callback, route):
+ if self._empty(): return callback
+ def wrapper(*a, **ka):
+ self.trigger('before_request')
+ rv = callback(*a, **ka)
+ self.trigger('after_request', reversed=True)
+ return rv
+ return wrapper
+
+
+class TemplatePlugin(object):
+ ''' This plugin applies the :func:`view` decorator to all routes with a
+ `template` config parameter. If the parameter is a tuple, the second
+ element must be a dict with additional options (e.g. `template_engine`)
+ or default variables for the template. '''
+ name = 'template'
+ api = 2
+
+ def apply(self, callback, route):
+ conf = route.config.get('template')
+ if isinstance(conf, (tuple, list)) and len(conf) == 2:
+ return view(conf[0], **conf[1])(callback)
+ elif isinstance(conf, str) and 'template_opts' in route.config:
+ depr('The `template_opts` parameter is deprecated.') #0.9
+ return view(conf, **route.config['template_opts'])(callback)
+ elif isinstance(conf, str):
+ return view(conf)(callback)
+ else:
+ return callback
+
+
+#: Not a plugin, but part of the plugin API. TODO: Find a better place.
+class _ImportRedirect(object):
+ def __init__(self, name, impmask):
+ ''' Create a virtual package that redirects imports (see PEP 302). '''
+ self.name = name
+ self.impmask = impmask
+ self.module = sys.modules.setdefault(name, imp.new_module(name))
+ self.module.__dict__.update({'__file__': __file__, '__path__': [],
+ '__all__': [], '__loader__': self})
+ sys.meta_path.append(self)
+
+ def find_module(self, fullname, path=None):
+ if '.' not in fullname: return
+ packname, modname = fullname.rsplit('.', 1)
+ if packname != self.name: return
+ return self
+
+ def load_module(self, fullname):
+ if fullname in sys.modules: return sys.modules[fullname]
+ packname, modname = fullname.rsplit('.', 1)
+ realname = self.impmask % modname
+ __import__(realname)
+ module = sys.modules[fullname] = sys.modules[realname]
+ setattr(self.module, modname, module)
+ module.__loader__ = self
+ return module
+
+
+
+
+
+
+###############################################################################
+# Common Utilities #############################################################
+###############################################################################
+
+
+class MultiDict(DictMixin):
+ """ This dict stores multiple values per key, but behaves exactly like a
+ normal dict in that it returns only the newest value for any given key.
+ There are special methods available to access the full list of values.
+ """
+
+ def __init__(self, *a, **k):
+ self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
+
+ def __len__(self): return len(self.dict)
+ def __iter__(self): return iter(self.dict)
+ def __contains__(self, key): return key in self.dict
+ def __delitem__(self, key): del self.dict[key]
+ def __getitem__(self, key): return self.dict[key][-1]
+ def __setitem__(self, key, value): self.append(key, value)
+ def keys(self): return self.dict.keys()
+
+ if py3k:
+ def values(self): return (v[-1] for v in self.dict.values())
+ def items(self): return ((k, v[-1]) for k, v in self.dict.items())
+ def allitems(self):
+ return ((k, v) for k, vl in self.dict.items() for v in vl)
+ iterkeys = keys
+ itervalues = values
+ iteritems = items
+ iterallitems = allitems
+
+ else:
+ def values(self): return [v[-1] for v in self.dict.values()]
+ def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
+ def iterkeys(self): return self.dict.iterkeys()
+ def itervalues(self): return (v[-1] for v in self.dict.itervalues())
+ def iteritems(self):
+ return ((k, v[-1]) for k, v in self.dict.iteritems())
+ def iterallitems(self):
+ return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
+ def allitems(self):
+ return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
+
+ def get(self, key, default=None, index=-1, type=None):
+ ''' Return the most recent value for a key.
+
+ :param default: The default value to be returned if the key is not
+ present or the type conversion fails.
+ :param index: An index for the list of available values.
+ :param type: If defined, this callable is used to cast the value
+ into a specific type. Exception are suppressed and result in
+ the default value to be returned.
+ '''
+ try:
+ val = self.dict[key][index]
+ return type(val) if type else val
+ except Exception:
+ pass
+ return default
+
+ def append(self, key, value):
+ ''' Add a new value to the list of values for this key. '''
+ self.dict.setdefault(key, []).append(value)
+
+ def replace(self, key, value):
+ ''' Replace the list of values with a single value. '''
+ self.dict[key] = [value]
+
+ def getall(self, key):
+ ''' Return a (possibly empty) list of values for a key. '''
+ return self.dict.get(key) or []
+
+ #: Aliases for WTForms to mimic other multi-dict APIs (Django)
+ getone = get
+ getlist = getall
+
+
+
+class FormsDict(MultiDict):
+ ''' This :class:`MultiDict` subclass is used to store request form data.
+ Additionally to the normal dict-like item access methods (which return
+ unmodified data as native strings), this container also supports
+ attribute-like access to its values. Attributes are automatically de-
+ or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
+ attributes default to an empty string. '''
+
+ #: Encoding used for attribute values.
+ input_encoding = 'utf8'
+ #: If true (default), unicode strings are first encoded with `latin1`
+ #: and then decoded to match :attr:`input_encoding`.
+ recode_unicode = True
+
+ def _fix(self, s, encoding=None):
+ if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
+ s = s.encode('latin1')
+ if isinstance(s, bytes): # Python 2 WSGI
+ return s.decode(encoding or self.input_encoding)
+ return s
+
+ def decode(self, encoding=None):
+ ''' Returns a copy with all keys and values de- or recoded to match
+ :attr:`input_encoding`. Some libraries (e.g. WTForms) want a
+ unicode dictionary. '''
+ copy = FormsDict()
+ enc = copy.input_encoding = encoding or self.input_encoding
+ copy.recode_unicode = False
+ for key, value in self.allitems():
+ copy.append(self._fix(key, enc), self._fix(value, enc))
+ return copy
+
+ def getunicode(self, name, default=None, encoding=None):
+ try:
+ return self._fix(self[name], encoding)
+ except (UnicodeError, KeyError):
+ return default
+
+ def __getattr__(self, name, default=unicode()):
+ # Without this guard, pickle generates a cryptic TypeError:
+ if name.startswith('__') and name.endswith('__'):
+ return super(FormsDict, self).__getattr__(name)
+ return self.getunicode(name, default=default)
+
+
+class HeaderDict(MultiDict):
+ """ A case-insensitive version of :class:`MultiDict` that defaults to
+ replace the old value instead of appending it. """
+
+ def __init__(self, *a, **ka):
+ self.dict = {}
+ if a or ka: self.update(*a, **ka)
+
+ def __contains__(self, key): return _hkey(key) in self.dict
+ def __delitem__(self, key): del self.dict[_hkey(key)]
+ def __getitem__(self, key): return self.dict[_hkey(key)][-1]
+ def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
+ def append(self, key, value):
+ self.dict.setdefault(_hkey(key), []).append(str(value))
+ def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
+ def getall(self, key): return self.dict.get(_hkey(key)) or []
+ def get(self, key, default=None, index=-1):
+ return MultiDict.get(self, _hkey(key), default, index)
+ def filter(self, names):
+ for name in [_hkey(n) for n in names]:
+ if name in self.dict:
+ del self.dict[name]
+
+
+class WSGIHeaderDict(DictMixin):
+ ''' This dict-like class wraps a WSGI environ dict and provides convenient
+ access to HTTP_* fields. Keys and values are native strings
+ (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
+ environment contains non-native string values, these are de- or encoded
+ using a lossless 'latin1' character set.
+
+ The API will remain stable even on changes to the relevant PEPs.
+ Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
+ that uses non-native strings.)
+ '''
+ #: List of keys that do not have a ``HTTP_`` prefix.
+ cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
+
+ def __init__(self, environ):
+ self.environ = environ
+
+ def _ekey(self, key):
+ ''' Translate header field name to CGI/WSGI environ key. '''
+ key = key.replace('-','_').upper()
+ if key in self.cgikeys:
+ return key
+ return 'HTTP_' + key
+
+ def raw(self, key, default=None):
+ ''' Return the header value as is (may be bytes or unicode). '''
+ return self.environ.get(self._ekey(key), default)
+
+ def __getitem__(self, key):
+ return tonat(self.environ[self._ekey(key)], 'latin1')
+
+ def __setitem__(self, key, value):
+ raise TypeError("%s is read-only." % self.__class__)
+
+ def __delitem__(self, key):
+ raise TypeError("%s is read-only." % self.__class__)
+
+ def __iter__(self):
+ for key in self.environ:
+ if key[:5] == 'HTTP_':
+ yield key[5:].replace('_', '-').title()
+ elif key in self.cgikeys:
+ yield key.replace('_', '-').title()
+
+ def keys(self): return [x for x in self]
+ def __len__(self): return len(self.keys())
+ def __contains__(self, key): return self._ekey(key) in self.environ
+
+
+class ConfigDict(dict):
+ ''' A dict-subclass with some extras: You can access keys like attributes.
+ Uppercase attributes create new ConfigDicts and act as name-spaces.
+ Other missing attributes return None. Calling a ConfigDict updates its
+ values and returns itself.
+
+ >>> cfg = ConfigDict()
+ >>> cfg.Namespace.value = 5
+ >>> cfg.OtherNamespace(a=1, b=2)
+ >>> cfg
+ {'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
+ '''
+
+ def __getattr__(self, key):
+ if key not in self and key[0].isupper():
+ self[key] = ConfigDict()
+ return self.get(key)
+
+ def __setattr__(self, key, value):
+ if hasattr(dict, key):
+ raise AttributeError('Read-only attribute.')
+ if key in self and self[key] and isinstance(self[key], ConfigDict):
+ raise AttributeError('Non-empty namespace attribute.')
+ self[key] = value
+
+ def __delattr__(self, key):
+ if key in self: del self[key]
+
+ def __call__(self, *a, **ka):
+ for key, value in dict(*a, **ka).items(): setattr(self, key, value)
+ return self
+
+
+class AppStack(list):
+ """ A stack-like list. Calling it returns the head of the stack. """
+
+ def __call__(self):
+ """ Return the current default application. """
+ return self[-1]
+
+ def push(self, value=None):
+ """ Add a new :class:`Bottle` instance to the stack """
+ if not isinstance(value, Bottle):
+ value = Bottle()
+ self.append(value)
+ return value
+
+
+class WSGIFileWrapper(object):
+
+ def __init__(self, fp, buffer_size=1024*64):
+ self.fp, self.buffer_size = fp, buffer_size
+ for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
+ if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
+
+ def __iter__(self):
+ buff, read = self.buffer_size, self.read
+ while True:
+ part = read(buff)
+ if not part: return
+ yield part
+
+
+class ResourceManager(object):
+ ''' This class manages a list of search paths and helps to find and open
+ application-bound resources (files).
+
+ :param base: default value for :meth:`add_path` calls.
+ :param opener: callable used to open resources.
+ :param cachemode: controls which lookups are cached. One of 'all',
+ 'found' or 'none'.
+ '''
+
+ def __init__(self, base='./', opener=open, cachemode='all'):
+ self.opener = open
+ self.base = base
+ self.cachemode = cachemode
+
+ #: A list of search paths. See :meth:`add_path` for details.
+ self.path = []
+ #: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
+ self.cache = {}
+
+ def add_path(self, path, base=None, index=None, create=False):
+ ''' Add a new path to the list of search paths. Return False if the
+ path does not exist.
+
+ :param path: The new search path. Relative paths are turned into
+ an absolute and normalized form. If the path looks like a file
+ (not ending in `/`), the filename is stripped off.
+ :param base: Path used to absolutize relative search paths.
+ Defaults to :attr:`base` which defaults to ``os.getcwd()``.
+ :param index: Position within the list of search paths. Defaults
+ to last index (appends to the list).
+
+ The `base` parameter makes it easy to reference files installed
+ along with a python module or package::
+
+ res.add_path('./resources/', __file__)
+ '''
+ base = os.path.abspath(os.path.dirname(base or self.base))
+ path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
+ path += os.sep
+ if path in self.path:
+ self.path.remove(path)
+ if create and not os.path.isdir(path):
+ os.makedirs(path)
+ if index is None:
+ self.path.append(path)
+ else:
+ self.path.insert(index, path)
+ self.cache.clear()
+ return os.path.exists(path)
+
+ def __iter__(self):
+ ''' Iterate over all existing files in all registered paths. '''
+ search = self.path[:]
+ while search:
+ path = search.pop()
+ if not os.path.isdir(path): continue
+ for name in os.listdir(path):
+ full = os.path.join(path, name)
+ if os.path.isdir(full): search.append(full)
+ else: yield full
+
+ def lookup(self, name):
+ ''' Search for a resource and return an absolute file path, or `None`.
+
+ The :attr:`path` list is searched in order. The first match is
+ returend. Symlinks are followed. The result is cached to speed up
+ future lookups. '''
+ if name not in self.cache or DEBUG:
+ for path in self.path:
+ fpath = os.path.join(path, name)
+ if os.path.isfile(fpath):
+ if self.cachemode in ('all', 'found'):
+ self.cache[name] = fpath
+ return fpath
+ if self.cachemode == 'all':
+ self.cache[name] = None
+ return self.cache[name]
+
+ def open(self, name, mode='r', *args, **kwargs):
+ ''' Find a resource and return a file object, or raise IOError. '''
+ fname = self.lookup(name)
+ if not fname: raise IOError("Resource %r not found." % name)
+ return self.opener(name, mode=mode, *args, **kwargs)
+
+
+
+
+
+
+###############################################################################
+# Application Helper ###########################################################
+###############################################################################
+
+
+def abort(code=500, text='Unknown Error: Application stopped.'):
+ """ Aborts execution and causes a HTTP error. """
+ raise HTTPError(code, text)
+
+
+def redirect(url, code=None):
+ """ Aborts execution and causes a 303 or 302 redirect, depending on
+ the HTTP protocol version. """
+ if code is None:
+ code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
+ location = urljoin(request.url, url)
+ res = HTTPResponse("", status=code, Location=location)
+ if response._cookies:
+ res._cookies = response._cookies
+ raise res
+
+
+def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
+ ''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
+ fp.seek(offset)
+ while bytes > 0:
+ part = fp.read(min(bytes, maxread))
+ if not part: break
+ bytes -= len(part)
+ yield part
+
+
+def static_file(filename, root, mimetype='auto', download=False):
+ """ Open a file in a safe way and return :exc:`HTTPResponse` with status
+ code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
+ Content-Length and Last-Modified header. Obey If-Modified-Since header
+ and HEAD requests.
+ """
+ root = os.path.abspath(root) + os.sep
+ filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
+ headers = dict()
+
+ if not filename.startswith(root):
+ return HTTPError(403, "Access denied.")
+ if not os.path.exists(filename) or not os.path.isfile(filename):
+ return HTTPError(404, "File does not exist.")
+ if not os.access(filename, os.R_OK):
+ return HTTPError(403, "You do not have permission to access this file.")
+
+ if mimetype == 'auto':
+ mimetype, encoding = mimetypes.guess_type(filename)
+ if mimetype: headers['Content-Type'] = mimetype
+ if encoding: headers['Content-Encoding'] = encoding
+ elif mimetype:
+ headers['Content-Type'] = mimetype
+
+ if download:
+ download = os.path.basename(filename if download == True else download)
+ headers['Content-Disposition'] = 'attachment; filename="%s"' % download
+
+ stats = os.stat(filename)
+ headers['Content-Length'] = clen = stats.st_size
+ lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
+ headers['Last-Modified'] = lm
+
+ ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
+ if ims:
+ ims = parse_date(ims.split(";")[0].strip())
+ if ims is not None and ims >= int(stats.st_mtime):
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
+ return HTTPResponse(status=304, **headers)
+
+ body = '' if request.method == 'HEAD' else open(filename, 'rb')
+
+ headers["Accept-Ranges"] = "bytes"
+ ranges = request.environ.get('HTTP_RANGE')
+ if 'HTTP_RANGE' in request.environ:
+ ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
+ if not ranges:
+ return HTTPError(416, "Requested Range Not Satisfiable")
+ offset, end = ranges[0]
+ headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
+ headers["Content-Length"] = str(end-offset)
+ if body: body = _file_iter_range(body, offset, end-offset)
+ return HTTPResponse(body, status=206, **headers)
+ return HTTPResponse(body, **headers)
+
+
+
+
+
+
+###############################################################################
+# HTTP Utilities and MISC (TODO) ###############################################
+###############################################################################
+
+
+def debug(mode=True):
+ """ Change the debug level.
+ There is only one debug level supported at the moment."""
+ global DEBUG
+ DEBUG = bool(mode)
+
+
+def parse_date(ims):
+ """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
+ try:
+ ts = email.utils.parsedate_tz(ims)
+ return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
+ except (TypeError, ValueError, IndexError, OverflowError):
+ return None
+
+
+def parse_auth(header):
+ """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
+ try:
+ method, data = header.split(None, 1)
+ if method.lower() == 'basic':
+ user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
+ return user, pwd
+ except (KeyError, ValueError):
+ return None
+
+def parse_range_header(header, maxlen=0):
+ ''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
+ unsatisfiable ranges. The end index is non-inclusive.'''
+ if not header or header[:6] != 'bytes=': return
+ ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
+ for start, end in ranges:
+ try:
+ if not start: # bytes=-100 -> last 100 bytes
+ start, end = max(0, maxlen-int(end)), maxlen
+ elif not end: # bytes=100- -> all but the first 99 bytes
+ start, end = int(start), maxlen
+ else: # bytes=100-200 -> bytes 100-200 (inclusive)
+ start, end = int(start), min(int(end)+1, maxlen)
+ if 0 <= start < end <= maxlen:
+ yield start, end
+ except ValueError:
+ pass
+
+def _parse_qsl(qs):
+ r = []
+ for pair in qs.replace(';','&').split('&'):
+ if not pair: continue
+ nv = pair.split('=', 1)
+ if len(nv) != 2: nv.append('')
+ key = urlunquote(nv[0].replace('+', ' '))
+ value = urlunquote(nv[1].replace('+', ' '))
+ r.append((key, value))
+ return r
+
+def _lscmp(a, b):
+ ''' Compares two strings in a cryptographically safe way:
+ Runtime is not affected by length of common prefix. '''
+ return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
+
+
+def cookie_encode(data, key):
+ ''' Encode and sign a pickle-able object. Return a (byte) string '''
+ msg = base64.b64encode(pickle.dumps(data, -1))
+ sig = base64.b64encode(hmac.new(tob(key), msg).digest())
+ return tob('!') + sig + tob('?') + msg
+
+
+def cookie_decode(data, key):
+ ''' Verify and decode an encoded string. Return an object or None.'''
+ data = tob(data)
+ if cookie_is_encoded(data):
+ sig, msg = data.split(tob('?'), 1)
+ if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
+ return pickle.loads(base64.b64decode(msg))
+ return None
+
+
+def cookie_is_encoded(data):
+ ''' Return True if the argument looks like a encoded cookie.'''
+ return bool(data.startswith(tob('!')) and tob('?') in data)
+
+
+def html_escape(string):
+ ''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
+ return string.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;')\
+ .replace('"','&quot;').replace("'",'&#039;')
+
+
+def html_quote(string):
+ ''' Escape and quote a string to be used as an HTTP attribute.'''
+ return '"%s"' % html_escape(string).replace('\n','%#10;')\
+ .replace('\r','&#13;').replace('\t','&#9;')
+
+
+def yieldroutes(func):
+ """ Return a generator for routes that match the signature (name, args)
+ of the func parameter. This may yield more than one route if the function
+ takes optional keyword arguments. The output is best described by example::
+
+ a() -> '/a'
+ b(x, y) -> '/b/:x/:y'
+ c(x, y=5) -> '/c/:x' and '/c/:x/:y'
+ d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
+ """
+ import inspect # Expensive module. Only import if necessary.
+ path = '/' + func.__name__.replace('__','/').lstrip('/')
+ spec = inspect.getargspec(func)
+ argc = len(spec[0]) - len(spec[3] or [])
+ path += ('/:%s' * argc) % tuple(spec[0][:argc])
+ yield path
+ for arg in spec[0][argc:]:
+ path += '/:%s' % arg
+ yield path
+
+
+def path_shift(script_name, path_info, shift=1):
+ ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
+
+ :return: The modified paths.
+ :param script_name: The SCRIPT_NAME path.
+ :param script_name: The PATH_INFO path.
+ :param shift: The number of path fragments to shift. May be negative to
+ change the shift direction. (default: 1)
+ '''
+ if shift == 0: return script_name, path_info
+ pathlist = path_info.strip('/').split('/')
+ scriptlist = script_name.strip('/').split('/')
+ if pathlist and pathlist[0] == '': pathlist = []
+ if scriptlist and scriptlist[0] == '': scriptlist = []
+ if shift > 0 and shift <= len(pathlist):
+ moved = pathlist[:shift]
+ scriptlist = scriptlist + moved
+ pathlist = pathlist[shift:]
+ elif shift < 0 and shift >= -len(scriptlist):
+ moved = scriptlist[shift:]
+ pathlist = moved + pathlist
+ scriptlist = scriptlist[:shift]
+ else:
+ empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
+ raise AssertionError("Cannot shift. Nothing left from %s" % empty)
+ new_script_name = '/' + '/'.join(scriptlist)
+ new_path_info = '/' + '/'.join(pathlist)
+ if path_info.endswith('/') and pathlist: new_path_info += '/'
+ return new_script_name, new_path_info
+
+
+def validate(**vkargs):
+ """
+ Validates and manipulates keyword arguments by user defined callables.
+ Handles ValueError and missing arguments by raising HTTPError(403).
+ """
+ depr('Use route wildcard filters instead.')
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kargs):
+ for key, value in vkargs.items():
+ if key not in kargs:
+ abort(403, 'Missing parameter: %s' % key)
+ try:
+ kargs[key] = value(kargs[key])
+ except ValueError:
+ abort(403, 'Wrong parameter format for: %s' % key)
+ return func(*args, **kargs)
+ return wrapper
+ return decorator
+
+
+def auth_basic(check, realm="private", text="Access denied"):
+ ''' Callback decorator to require HTTP auth (basic).
+ TODO: Add route(check_auth=...) parameter. '''
+ def decorator(func):
+ def wrapper(*a, **ka):
+ user, password = request.auth or (None, None)
+ if user is None or not check(user, password):
+ response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
+ return HTTPError(401, text)
+ return func(*a, **ka)
+ return wrapper
+ return decorator
+
+
+# Shortcuts for common Bottle methods.
+# They all refer to the current default application.
+
+def make_default_app_wrapper(name):
+ ''' Return a callable that relays calls to the current default app. '''
+ @functools.wraps(getattr(Bottle, name))
+ def wrapper(*a, **ka):
+ return getattr(app(), name)(*a, **ka)
+ return wrapper
+
+route = make_default_app_wrapper('route')
+get = make_default_app_wrapper('get')
+post = make_default_app_wrapper('post')
+put = make_default_app_wrapper('put')
+delete = make_default_app_wrapper('delete')
+error = make_default_app_wrapper('error')
+mount = make_default_app_wrapper('mount')
+hook = make_default_app_wrapper('hook')
+install = make_default_app_wrapper('install')
+uninstall = make_default_app_wrapper('uninstall')
+url = make_default_app_wrapper('get_url')
+
+
+
+
+
+
+
+###############################################################################
+# Server Adapter ###############################################################
+###############################################################################
+
+
+class ServerAdapter(object):
+ quiet = False
+ def __init__(self, host='127.0.0.1', port=8080, **config):
+ self.options = config
+ self.host = host
+ self.port = int(port)
+
+ def run(self, handler): # pragma: no cover
+ pass
+
+ def __repr__(self):
+ args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
+ return "%s(%s)" % (self.__class__.__name__, args)
+
+
+class CGIServer(ServerAdapter):
+ quiet = True
+ def run(self, handler): # pragma: no cover
+ from wsgiref.handlers import CGIHandler
+ def fixed_environ(environ, start_response):
+ environ.setdefault('PATH_INFO', '')
+ return handler(environ, start_response)
+ CGIHandler().run(fixed_environ)
+
+
+class FlupFCGIServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ import flup.server.fcgi
+ self.options.setdefault('bindAddress', (self.host, self.port))
+ flup.server.fcgi.WSGIServer(handler, **self.options).run()
+
+
+class WSGIRefServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ from wsgiref.simple_server import make_server, WSGIRequestHandler
+ if self.quiet:
+ class QuietHandler(WSGIRequestHandler):
+ def log_request(*args, **kw): pass
+ self.options['handler_class'] = QuietHandler
+ srv = make_server(self.host, self.port, handler, **self.options)
+ srv.serve_forever()
+
+
+class CherryPyServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ from cherrypy import wsgiserver
+ server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
+ try:
+ server.start()
+ finally:
+ server.stop()
+
+
+class WaitressServer(ServerAdapter):
+ def run(self, handler):
+ from waitress import serve
+ serve(handler, host=self.host, port=self.port)
+
+
+class PasteServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ from paste import httpserver
+ if not self.quiet:
+ from paste.translogger import TransLogger
+ handler = TransLogger(handler)
+ httpserver.serve(handler, host=self.host, port=str(self.port),
+ **self.options)
+
+
+class MeinheldServer(ServerAdapter):
+ def run(self, handler):
+ from meinheld import server
+ server.listen((self.host, self.port))
+ server.run(handler)
+
+
+class FapwsServer(ServerAdapter):
+ """ Extremely fast webserver using libev. See http://www.fapws.org/ """
+ def run(self, handler): # pragma: no cover
+ import fapws._evwsgi as evwsgi
+ from fapws import base, config
+ port = self.port
+ if float(config.SERVER_IDENT[-2:]) > 0.4:
+ # fapws3 silently changed its API in 0.5
+ port = str(port)
+ evwsgi.start(self.host, port)
+ # fapws3 never releases the GIL. Complain upstream. I tried. No luck.
+ if 'BOTTLE_CHILD' in os.environ and not self.quiet:
+ _stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
+ _stderr(" (Fapws3 breaks python thread support)\n")
+ evwsgi.set_base_module(base)
+ def app(environ, start_response):
+ environ['wsgi.multiprocess'] = False
+ return handler(environ, start_response)
+ evwsgi.wsgi_cb(('', app))
+ evwsgi.run()
+
+
+class TornadoServer(ServerAdapter):
+ """ The super hyped asynchronous server by facebook. Untested. """
+ def run(self, handler): # pragma: no cover
+ import tornado.wsgi, tornado.httpserver, tornado.ioloop
+ container = tornado.wsgi.WSGIContainer(handler)
+ server = tornado.httpserver.HTTPServer(container)
+ server.listen(port=self.port)
+ tornado.ioloop.IOLoop.instance().start()
+
+
+class AppEngineServer(ServerAdapter):
+ """ Adapter for Google App Engine. """
+ quiet = True
+ def run(self, handler):
+ from google.appengine.ext.webapp import util
+ # A main() function in the handler script enables 'App Caching'.
+ # Lets makes sure it is there. This _really_ improves performance.
+ module = sys.modules.get('__main__')
+ if module and not hasattr(module, 'main'):
+ module.main = lambda: util.run_wsgi_app(handler)
+ util.run_wsgi_app(handler)
+
+
+class TwistedServer(ServerAdapter):
+ """ Untested. """
+ def run(self, handler):
+ from twisted.web import server, wsgi
+ from twisted.python.threadpool import ThreadPool
+ from twisted.internet import reactor
+ thread_pool = ThreadPool()
+ thread_pool.start()
+ reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
+ factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
+ reactor.listenTCP(self.port, factory, interface=self.host)
+ reactor.run()
+
+
+class DieselServer(ServerAdapter):
+ """ Untested. """
+ def run(self, handler):
+ from diesel.protocols.wsgi import WSGIApplication
+ app = WSGIApplication(handler, port=self.port)
+ app.run()
+
+
+class GeventServer(ServerAdapter):
+ """ Untested. Options:
+
+ * `fast` (default: False) uses libevent's http server, but has some
+ issues: No streaming, no pipelining, no SSL.
+ """
+ def run(self, handler):
+ from gevent import wsgi, pywsgi, local
+ if not isinstance(_lctx, local.local):
+ msg = "Bottle requires gevent.monkey.patch_all() (before import)"
+ raise RuntimeError(msg)
+ if not self.options.get('fast'): wsgi = pywsgi
+ log = None if self.quiet else 'default'
+ wsgi.WSGIServer((self.host, self.port), handler, log=log).serve_forever()
+
+
+class GunicornServer(ServerAdapter):
+ """ Untested. See http://gunicorn.org/configure.html for options. """
+ def run(self, handler):
+ from gunicorn.app.base import Application
+
+ config = {'bind': "%s:%d" % (self.host, int(self.port))}
+ config.update(self.options)
+
+ class GunicornApplication(Application):
+ def init(self, parser, opts, args):
+ return config
+
+ def load(self):
+ return handler
+
+ GunicornApplication().run()
+
+
+class EventletServer(ServerAdapter):
+ """ Untested """
+ def run(self, handler):
+ from eventlet import wsgi, listen
+ try:
+ wsgi.server(listen((self.host, self.port)), handler,
+ log_output=(not self.quiet))
+ except TypeError:
+ # Fallback, if we have old version of eventlet
+ wsgi.server(listen((self.host, self.port)), handler)
+
+
+class RocketServer(ServerAdapter):
+ """ Untested. """
+ def run(self, handler):
+ from rocket import Rocket
+ server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
+ server.start()
+
+
+class BjoernServer(ServerAdapter):
+ """ Fast server written in C: https://github.com/jonashaag/bjoern """
+ def run(self, handler):
+ from bjoern import run
+ run(handler, self.host, self.port)
+
+
+class AutoServer(ServerAdapter):
+ """ Untested. """
+ adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
+ def run(self, handler):
+ for sa in self.adapters:
+ try:
+ return sa(self.host, self.port, **self.options).run(handler)
+ except ImportError:
+ pass
+
+server_names = {
+ 'cgi': CGIServer,
+ 'flup': FlupFCGIServer,
+ 'wsgiref': WSGIRefServer,
+ 'waitress': WaitressServer,
+ 'cherrypy': CherryPyServer,
+ 'paste': PasteServer,
+ 'fapws3': FapwsServer,
+ 'tornado': TornadoServer,
+ 'gae': AppEngineServer,
+ 'twisted': TwistedServer,
+ 'diesel': DieselServer,
+ 'meinheld': MeinheldServer,
+ 'gunicorn': GunicornServer,
+ 'eventlet': EventletServer,
+ 'gevent': GeventServer,
+ 'rocket': RocketServer,
+ 'bjoern' : BjoernServer,
+ 'auto': AutoServer,
+}
+
+
+
+
+
+
+###############################################################################
+# Application Control ##########################################################
+###############################################################################
+
+
+def load(target, **namespace):
+ """ Import a module or fetch an object from a module.
+
+ * ``package.module`` returns `module` as a module object.
+ * ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
+ * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
+
+ The last form accepts not only function calls, but any type of
+ expression. Keyword arguments passed to this function are available as
+ local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
+ """
+ module, target = target.split(":", 1) if ':' in target else (target, None)
+ if module not in sys.modules: __import__(module)
+ if not target: return sys.modules[module]
+ if target.isalnum(): return getattr(sys.modules[module], target)
+ package_name = module.split('.')[0]
+ namespace[package_name] = sys.modules[package_name]
+ return eval('%s.%s' % (module, target), namespace)
+
+
+def load_app(target):
+ """ Load a bottle application from a module and make sure that the import
+ does not affect the current default application, but returns a separate
+ application object. See :func:`load` for the target parameter. """
+ global NORUN; NORUN, nr_old = True, NORUN
+ try:
+ tmp = default_app.push() # Create a new "default application"
+ rv = load(target) # Import the target module
+ return rv if callable(rv) else tmp
+ finally:
+ default_app.remove(tmp) # Remove the temporary added default application
+ NORUN = nr_old
+
+_debug = debug
+def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
+ interval=1, reloader=False, quiet=False, plugins=None,
+ debug=False, **kargs):
+ """ Start a server instance. This method blocks until the server terminates.
+
+ :param app: WSGI application or target string supported by
+ :func:`load_app`. (default: :func:`default_app`)
+ :param server: Server adapter to use. See :data:`server_names` keys
+ for valid names or pass a :class:`ServerAdapter` subclass.
+ (default: `wsgiref`)
+ :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
+ all interfaces including the external one. (default: 127.0.0.1)
+ :param port: Server port to bind to. Values below 1024 require root
+ privileges. (default: 8080)
+ :param reloader: Start auto-reloading server? (default: False)
+ :param interval: Auto-reloader interval in seconds (default: 1)
+ :param quiet: Suppress output to stdout and stderr? (default: False)
+ :param options: Options passed to the server adapter.
+ """
+ if NORUN: return
+ if reloader and not os.environ.get('BOTTLE_CHILD'):
+ try:
+ lockfile = None
+ fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
+ os.close(fd) # We only need this file to exist. We never write to it
+ while os.path.exists(lockfile):
+ args = [sys.executable] + sys.argv
+ environ = os.environ.copy()
+ environ['BOTTLE_CHILD'] = 'true'
+ environ['BOTTLE_LOCKFILE'] = lockfile
+ p = subprocess.Popen(args, env=environ)
+ while p.poll() is None: # Busy wait...
+ os.utime(lockfile, None) # I am alive!
+ time.sleep(interval)
+ if p.poll() != 3:
+ if os.path.exists(lockfile): os.unlink(lockfile)
+ sys.exit(p.poll())
+ except KeyboardInterrupt:
+ pass
+ finally:
+ if os.path.exists(lockfile):
+ os.unlink(lockfile)
+ return
+
+ try:
+ _debug(debug)
+ app = app or default_app()
+ if isinstance(app, basestring):
+ app = load_app(app)
+ if not callable(app):
+ raise ValueError("Application is not callable: %r" % app)
+
+ for plugin in plugins or []:
+ app.install(plugin)
+
+ if server in server_names:
+ server = server_names.get(server)
+ if isinstance(server, basestring):
+ server = load(server)
+ if isinstance(server, type):
+ server = server(host=host, port=port, **kargs)
+ if not isinstance(server, ServerAdapter):
+ raise ValueError("Unknown or unsupported server: %r" % server)
+
+ server.quiet = server.quiet or quiet
+ if not server.quiet:
+ _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
+ _stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
+ _stderr("Hit Ctrl-C to quit.\n\n")
+
+ if reloader:
+ lockfile = os.environ.get('BOTTLE_LOCKFILE')
+ bgcheck = FileCheckerThread(lockfile, interval)
+ with bgcheck:
+ server.run(app)
+ if bgcheck.status == 'reload':
+ sys.exit(3)
+ else:
+ server.run(app)
+ except KeyboardInterrupt:
+ pass
+ except (SystemExit, MemoryError):
+ raise
+ except:
+ if not reloader: raise
+ if not getattr(server, 'quiet', quiet):
+ print_exc()
+ time.sleep(interval)
+ sys.exit(3)
+
+
+
+class FileCheckerThread(threading.Thread):
+ ''' Interrupt main-thread as soon as a changed module file is detected,
+ the lockfile gets deleted or gets to old. '''
+
+ def __init__(self, lockfile, interval):
+ threading.Thread.__init__(self)
+ self.lockfile, self.interval = lockfile, interval
+ #: Is one of 'reload', 'error' or 'exit'
+ self.status = None
+
+ def run(self):
+ exists = os.path.exists
+ mtime = lambda path: os.stat(path).st_mtime
+ files = dict()
+
+ for module in list(sys.modules.values()):
+ path = getattr(module, '__file__', '')
+ if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
+ if path and exists(path): files[path] = mtime(path)
+
+ while not self.status:
+ if not exists(self.lockfile)\
+ or mtime(self.lockfile) < time.time() - self.interval - 5:
+ self.status = 'error'
+ thread.interrupt_main()
+ for path, lmtime in list(files.items()):
+ if not exists(path) or mtime(path) > lmtime:
+ self.status = 'reload'
+ thread.interrupt_main()
+ break
+ time.sleep(self.interval)
+
+ def __enter__(self):
+ self.start()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if not self.status: self.status = 'exit' # silent exit
+ self.join()
+ return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
+
+
+
+
+
+###############################################################################
+# Template Adapters ############################################################
+###############################################################################
+
+
+class TemplateError(HTTPError):
+ def __init__(self, message):
+ HTTPError.__init__(self, 500, message)
+
+
+class BaseTemplate(object):
+ """ Base class and minimal API for template adapters """
+ extensions = ['tpl','html','thtml','stpl']
+ settings = {} #used in prepare()
+ defaults = {} #used in render()
+
+ def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
+ """ Create a new template.
+ If the source parameter (str or buffer) is missing, the name argument
+ is used to guess a template filename. Subclasses can assume that
+ self.source and/or self.filename are set. Both are strings.
+ The lookup, encoding and settings parameters are stored as instance
+ variables.
+ The lookup parameter stores a list containing directory paths.
+ The encoding parameter should be used to decode byte strings or files.
+ The settings parameter contains a dict for engine-specific settings.
+ """
+ self.name = name
+ self.source = source.read() if hasattr(source, 'read') else source
+ self.filename = source.filename if hasattr(source, 'filename') else None
+ self.lookup = [os.path.abspath(x) for x in lookup]
+ self.encoding = encoding
+ self.settings = self.settings.copy() # Copy from class variable
+ self.settings.update(settings) # Apply
+ if not self.source and self.name:
+ self.filename = self.search(self.name, self.lookup)
+ if not self.filename:
+ raise TemplateError('Template %s not found.' % repr(name))
+ if not self.source and not self.filename:
+ raise TemplateError('No template specified.')
+ self.prepare(**self.settings)
+
+ @classmethod
+ def search(cls, name, lookup=[]):
+ """ Search name in all directories specified in lookup.
+ First without, then with common extensions. Return first hit. """
+ if not lookup:
+ depr('The template lookup path list should not be empty.')
+ lookup = ['.']
+
+ if os.path.isabs(name) and os.path.isfile(name):
+ depr('Absolute template path names are deprecated.')
+ return os.path.abspath(name)
+
+ for spath in lookup:
+ spath = os.path.abspath(spath) + os.sep
+ fname = os.path.abspath(os.path.join(spath, name))
+ if not fname.startswith(spath): continue
+ if os.path.isfile(fname): return fname
+ for ext in cls.extensions:
+ if os.path.isfile('%s.%s' % (fname, ext)):
+ return '%s.%s' % (fname, ext)
+
+ @classmethod
+ def global_config(cls, key, *args):
+ ''' This reads or sets the global settings stored in class.settings. '''
+ if args:
+ cls.settings = cls.settings.copy() # Make settings local to class
+ cls.settings[key] = args[0]
+ else:
+ return cls.settings[key]
+
+ def prepare(self, **options):
+ """ Run preparations (parsing, caching, ...).
+ It should be possible to call this again to refresh a template or to
+ update settings.
+ """
+ raise NotImplementedError
+
+ def render(self, *args, **kwargs):
+ """ Render the template with the specified local variables and return
+ a single byte or unicode string. If it is a byte string, the encoding
+ must match self.encoding. This method must be thread-safe!
+ Local variables may be provided in dictionaries (*args)
+ or directly, as keywords (**kwargs).
+ """
+ raise NotImplementedError
+
+
+class MakoTemplate(BaseTemplate):
+ def prepare(self, **options):
+ from mako.template import Template
+ from mako.lookup import TemplateLookup
+ options.update({'input_encoding':self.encoding})
+ options.setdefault('format_exceptions', bool(DEBUG))
+ lookup = TemplateLookup(directories=self.lookup, **options)
+ if self.source:
+ self.tpl = Template(self.source, lookup=lookup, **options)
+ else:
+ self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
+
+ def render(self, *args, **kwargs):
+ for dictarg in args: kwargs.update(dictarg)
+ _defaults = self.defaults.copy()
+ _defaults.update(kwargs)
+ return self.tpl.render(**_defaults)
+
+
+class CheetahTemplate(BaseTemplate):
+ def prepare(self, **options):
+ from Cheetah.Template import Template
+ self.context = threading.local()
+ self.context.vars = {}
+ options['searchList'] = [self.context.vars]
+ if self.source:
+ self.tpl = Template(source=self.source, **options)
+ else:
+ self.tpl = Template(file=self.filename, **options)
+
+ def render(self, *args, **kwargs):
+ for dictarg in args: kwargs.update(dictarg)
+ self.context.vars.update(self.defaults)
+ self.context.vars.update(kwargs)
+ out = str(self.tpl)
+ self.context.vars.clear()
+ return out
+
+
+class Jinja2Template(BaseTemplate):
+ def prepare(self, filters=None, tests=None, **kwargs):
+ from jinja2 import Environment, FunctionLoader
+ if 'prefix' in kwargs: # TODO: to be removed after a while
+ raise RuntimeError('The keyword argument `prefix` has been removed. '
+ 'Use the full jinja2 environment name line_statement_prefix instead.')
+ self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
+ if filters: self.env.filters.update(filters)
+ if tests: self.env.tests.update(tests)
+ if self.source:
+ self.tpl = self.env.from_string(self.source)
+ else:
+ self.tpl = self.env.get_template(self.filename)
+
+ def render(self, *args, **kwargs):
+ for dictarg in args: kwargs.update(dictarg)
+ _defaults = self.defaults.copy()
+ _defaults.update(kwargs)
+ return self.tpl.render(**_defaults)
+
+ def loader(self, name):
+ fname = self.search(name, self.lookup)
+ if not fname: return
+ with open(fname, "rb") as f:
+ return f.read().decode(self.encoding)
+
+
+class SimpleTALTemplate(BaseTemplate):
+ ''' Deprecated, do not use. '''
+ def prepare(self, **options):
+ depr('The SimpleTAL template handler is deprecated'\
+ ' and will be removed in 0.12')
+ from simpletal import simpleTAL
+ if self.source:
+ self.tpl = simpleTAL.compileHTMLTemplate(self.source)
+ else:
+ with open(self.filename, 'rb') as fp:
+ self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
+
+ def render(self, *args, **kwargs):
+ from simpletal import simpleTALES
+ for dictarg in args: kwargs.update(dictarg)
+ context = simpleTALES.Context()
+ for k,v in self.defaults.items():
+ context.addGlobal(k, v)
+ for k,v in kwargs.items():
+ context.addGlobal(k, v)
+ output = StringIO()
+ self.tpl.expand(context, output)
+ return output.getvalue()
+
+
+class SimpleTemplate(BaseTemplate):
+ blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
+ 'with', 'def', 'class')
+ dedent_blocks = ('elif', 'else', 'except', 'finally')
+
+ @lazy_attribute
+ def re_pytokens(cls):
+ ''' This matches comments and all kinds of quoted strings but does
+ NOT match comments (#...) within quoted strings. (trust me) '''
+ return re.compile(r'''
+ (''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
+ |'(?:[^\\']|\\.)+?' # Single quotes (')
+ |"(?:[^\\"]|\\.)+?" # Double quotes (")
+ |'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
+ |"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
+ |\#.* # Comments
+ )''', re.VERBOSE)
+
+ def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
+ self.cache = {}
+ enc = self.encoding
+ self._str = lambda x: touni(x, enc)
+ self._escape = lambda x: escape_func(touni(x, enc))
+ if noescape:
+ self._str, self._escape = self._escape, self._str
+
+ @classmethod
+ def split_comment(cls, code):
+ """ Removes comments (#...) from python code. """
+ if '#' not in code: return code
+ #: Remove comments only (leave quoted strings as they are)
+ subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
+ return re.sub(cls.re_pytokens, subf, code)
+
+ @cached_property
+ def co(self):
+ return compile(self.code, self.filename or '<string>', 'exec')
+
+ @cached_property
+ def code(self):
+ stack = [] # Current Code indentation
+ lineno = 0 # Current line of code
+ ptrbuffer = [] # Buffer for printable strings and token tuple instances
+ codebuffer = [] # Buffer for generated python code
+ multiline = dedent = oneline = False
+ template = self.source or open(self.filename, 'rb').read()
+
+ def yield_tokens(line):
+ for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
+ if i % 2:
+ if part.startswith('!'): yield 'RAW', part[1:]
+ else: yield 'CMD', part
+ else: yield 'TXT', part
+
+ def flush(): # Flush the ptrbuffer
+ if not ptrbuffer: return
+ cline = ''
+ for line in ptrbuffer:
+ for token, value in line:
+ if token == 'TXT': cline += repr(value)
+ elif token == 'RAW': cline += '_str(%s)' % value
+ elif token == 'CMD': cline += '_escape(%s)' % value
+ cline += ', '
+ cline = cline[:-2] + '\\\n'
+ cline = cline[:-2]
+ if cline[:-1].endswith('\\\\\\\\\\n'):
+ cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
+ cline = '_printlist([' + cline + '])'
+ del ptrbuffer[:] # Do this before calling code() again
+ code(cline)
+
+ def code(stmt):
+ for line in stmt.splitlines():
+ codebuffer.append(' ' * len(stack) + line.strip())
+
+ for line in template.splitlines(True):
+ lineno += 1
+ line = touni(line, self.encoding)
+ sline = line.lstrip()
+ if lineno <= 2:
+ m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline)
+ if m: self.encoding = m.group(1)
+ if m: line = line.replace('coding','coding (removed)')
+ if sline and sline[0] == '%' and sline[:2] != '%%':
+ line = line.split('%',1)[1].lstrip() # Full line following the %
+ cline = self.split_comment(line).strip()
+ cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
+ flush() # You are actually reading this? Good luck, it's a mess :)
+ if cmd in self.blocks or multiline:
+ cmd = multiline or cmd
+ dedent = cmd in self.dedent_blocks # "else:"
+ if dedent and not oneline and not multiline:
+ cmd = stack.pop()
+ code(line)
+ oneline = not cline.endswith(':') # "if 1: pass"
+ multiline = cmd if cline.endswith('\\') else False
+ if not oneline and not multiline:
+ stack.append(cmd)
+ elif cmd == 'end' and stack:
+ code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
+ elif cmd == 'include':
+ p = cline.split(None, 2)[1:]
+ if len(p) == 2:
+ code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
+ elif p:
+ code("_=_include(%s, _stdout)" % repr(p[0]))
+ else: # Empty %include -> reverse of %rebase
+ code("_printlist(_base)")
+ elif cmd == 'rebase':
+ p = cline.split(None, 2)[1:]
+ if len(p) == 2:
+ code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
+ elif p:
+ code("globals()['_rebase']=(%s, {})" % repr(p[0]))
+ else:
+ code(line)
+ else: # Line starting with text (not '%') or '%%' (escaped)
+ if line.strip().startswith('%%'):
+ line = line.replace('%%', '%', 1)
+ ptrbuffer.append(yield_tokens(line))
+ flush()
+ return '\n'.join(codebuffer) + '\n'
+
+ def subtemplate(self, _name, _stdout, *args, **kwargs):
+ for dictarg in args: kwargs.update(dictarg)
+ if _name not in self.cache:
+ self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
+ return self.cache[_name].execute(_stdout, kwargs)
+
+ def execute(self, _stdout, *args, **kwargs):
+ for dictarg in args: kwargs.update(dictarg)
+ env = self.defaults.copy()
+ env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
+ '_include': self.subtemplate, '_str': self._str,
+ '_escape': self._escape, 'get': env.get,
+ 'setdefault': env.setdefault, 'defined': env.__contains__})
+ env.update(kwargs)
+ eval(self.co, env)
+ if '_rebase' in env:
+ subtpl, rargs = env['_rebase']
+ rargs['_base'] = _stdout[:] #copy stdout
+ del _stdout[:] # clear stdout
+ return self.subtemplate(subtpl,_stdout,rargs)
+ return env
+
+ def render(self, *args, **kwargs):
+ """ Render the template using keyword arguments as local variables. """
+ for dictarg in args: kwargs.update(dictarg)
+ stdout = []
+ self.execute(stdout, kwargs)
+ return ''.join(stdout)
+
+
+def template(*args, **kwargs):
+ '''
+ Get a rendered template as a string iterator.
+ You can use a name, a filename or a template string as first parameter.
+ Template rendering arguments can be passed as dictionaries
+ or directly (as keyword arguments).
+ '''
+ tpl = args[0] if args else None
+ adapter = kwargs.pop('template_adapter', SimpleTemplate)
+ lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
+ tplid = (id(lookup), tpl)
+ if tplid not in TEMPLATES or DEBUG:
+ settings = kwargs.pop('template_settings', {})
+ if isinstance(tpl, adapter):
+ TEMPLATES[tplid] = tpl
+ if settings: TEMPLATES[tplid].prepare(**settings)
+ elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
+ TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
+ else:
+ TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
+ if not TEMPLATES[tplid]:
+ abort(500, 'Template (%s) not found' % tpl)
+ for dictarg in args[1:]: kwargs.update(dictarg)
+ return TEMPLATES[tplid].render(kwargs)
+
+mako_template = functools.partial(template, template_adapter=MakoTemplate)
+cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
+jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
+simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
+
+
+def view(tpl_name, **defaults):
+ ''' Decorator: renders a template for a handler.
+ The handler can control its behavior like that:
+
+ - return a dict of template vars to fill out the template
+ - return something other than a dict and the view decorator will not
+ process the template, but return the handler result as is.
+ This includes returning a HTTPResponse(dict) to get,
+ for instance, JSON with autojson or other castfilters.
+ '''
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ result = func(*args, **kwargs)
+ if isinstance(result, (dict, DictMixin)):
+ tplvars = defaults.copy()
+ tplvars.update(result)
+ return template(tpl_name, **tplvars)
+ return result
+ return wrapper
+ return decorator
+
+mako_view = functools.partial(view, template_adapter=MakoTemplate)
+cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
+jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
+simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
+
+
+
+
+
+
+###############################################################################
+# Constants and Globals ########################################################
+###############################################################################
+
+
+TEMPLATE_PATH = ['./', './views/']
+TEMPLATES = {}
+DEBUG = False
+NORUN = False # If set, run() does nothing. Used by load_app()
+
+#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
+HTTP_CODES = httplib.responses
+HTTP_CODES[418] = "I'm a teapot" # RFC 2324
+HTTP_CODES[428] = "Precondition Required"
+HTTP_CODES[429] = "Too Many Requests"
+HTTP_CODES[431] = "Request Header Fields Too Large"
+HTTP_CODES[511] = "Network Authentication Required"
+_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
+
+#: The default template used for error pages. Override with @error()
+ERROR_PAGE_TEMPLATE = """
+%%try:
+ %%from %s import DEBUG, HTTP_CODES, request, touni
+ <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
+ <html>
+ <head>
+ <title>Error: {{e.status}}</title>
+ <style type="text/css">
+ html {background-color: #eee; font-family: sans;}
+ body {background-color: #fff; border: 1px solid #ddd;
+ padding: 15px; margin: 15px;}
+ pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
+ </style>
+ </head>
+ <body>
+ <h1>Error: {{e.status}}</h1>
+ <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
+ caused an error:</p>
+ <pre>{{e.body}}</pre>
+ %%if DEBUG and e.exception:
+ <h2>Exception:</h2>
+ <pre>{{repr(e.exception)}}</pre>
+ %%end
+ %%if DEBUG and e.traceback:
+ <h2>Traceback:</h2>
+ <pre>{{e.traceback}}</pre>
+ %%end
+ </body>
+ </html>
+%%except ImportError:
+ <b>ImportError:</b> Could not generate the error page. Please add bottle to
+ the import path.
+%%end
+""" % __name__
+
+#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
+#: request callback, this instance always refers to the *current* request
+#: (even on a multithreaded server).
+request = LocalRequest()
+
+#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
+#: HTTP response for the *current* request.
+response = LocalResponse()
+
+#: A thread-safe namespace. Not used by Bottle.
+local = threading.local()
+
+# Initialize app stack (create first empty Bottle app)
+# BC: 0.6.4 and needed for run()
+app = default_app = AppStack()
+app.push()
+
+#: A virtual package that redirects import statements.
+#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
+ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
+
+if __name__ == '__main__':
+ opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
+ if opt.version:
+ _stdout('Bottle %s\n'%__version__)
+ sys.exit(0)
+ if not args:
+ parser.print_help()
+ _stderr('\nError: No application specified.\n')
+ sys.exit(1)
+
+ sys.path.insert(0, '.')
+ sys.modules.setdefault('bottle', sys.modules['__main__'])
+
+ host, port = (opt.bind or 'localhost'), 8080
+ if ':' in host:
+ host, port = host.rsplit(':', 1)
+
+ run(args[0], host=host, port=port, server=opt.server,
+ reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
+
+
+
+
+# THE END
diff --git a/pyload/lib/forwarder.py b/pyload/lib/forwarder.py
new file mode 100644
index 000000000..eacb33c2b
--- /dev/null
+++ b/pyload/lib/forwarder.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from sys import argv
+from sys import exit
+
+import socket
+import thread
+
+from traceback import print_exc
+
+class Forwarder():
+
+ def __init__(self, extip,extport=9666):
+ print "Start portforwarding to %s:%s" % (extip, extport)
+ proxy(extip, extport, 9666)
+
+
+def proxy(*settings):
+ while True:
+ server(*settings)
+
+def server(*settings):
+ try:
+ dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ dock_socket.bind(("127.0.0.1", settings[2]))
+ dock_socket.listen(5)
+ while True:
+ client_socket = dock_socket.accept()[0]
+ server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server_socket.connect((settings[0], settings[1]))
+ thread.start_new_thread(forward, (client_socket, server_socket))
+ thread.start_new_thread(forward, (server_socket, client_socket))
+ except Exception:
+ print_exc()
+
+
+def forward(source, destination):
+ string = ' '
+ while string:
+ string = source.recv(1024)
+ if string:
+ destination.sendall(string)
+ else:
+ #source.shutdown(socket.SHUT_RD)
+ destination.shutdown(socket.SHUT_WR)
+
+if __name__ == "__main__":
+ args = argv[1:]
+ if not args:
+ print "Usage: forwarder.py <remote ip> <remote port>"
+ exit()
+ if len(args) == 1:
+ args.append(9666)
+
+ f = Forwarder(args[0], int(args[1]))
+ \ No newline at end of file
diff --git a/pyload/lib/hg_tool.py b/pyload/lib/hg_tool.py
new file mode 100644
index 000000000..cd97833df
--- /dev/null
+++ b/pyload/lib/hg_tool.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from subprocess import Popen, PIPE
+from time import time, gmtime, strftime
+
+aliases = {"zoidber": "zoidberg", "zoidberg10": "zoidberg", "webmaster": "dhmh", "mast3rranan": "ranan",
+ "ranan2": "ranan"}
+exclude = ["locale/*", "module/lib/*"]
+date_format = "%Y-%m-%d"
+line_re = re.compile(r" (\d+) \**", re.I)
+
+def add_exclude_flags(args):
+ for dir in exclude:
+ args.extend(["-X", dir])
+
+# remove small percentages
+def wipe(data, perc=1):
+ s = (sum(data.values()) * perc) / 100
+ for k, v in data.items():
+ if v < s: del data[k]
+
+ return data
+
+# remove aliases
+def de_alias(data):
+ for k, v in aliases.iteritems():
+ if k not in data: continue
+ alias = aliases[k]
+
+ if alias in data: data[alias] += data[k]
+ else: data[alias] = data[k]
+
+ del data[k]
+
+ return data
+
+
+def output(data):
+ s = float(sum(data.values()))
+ print "Total Lines: %d" % s
+ for k, v in data.iteritems():
+ print "%15s: %.1f%% | %d" % (k, (v * 100) / s, v)
+ print
+
+
+def file_list():
+ args = ["hg", "status", "-A"]
+ add_exclude_flags(args)
+ p = Popen(args, stdout=PIPE)
+ out, err = p.communicate()
+ return [x.split()[1] for x in out.splitlines() if x.split()[0] in "CMA"]
+
+
+def hg_annotate(path):
+ args = ["hg", "annotate", "-u", path]
+ p = Popen(args, stdout=PIPE)
+ out, err = p.communicate()
+
+ data = {}
+
+ for line in out.splitlines():
+ author, non, line = line.partition(":")
+
+ # probably binary file
+ if author == path: return {}
+
+ author = author.strip().lower()
+ if not line.strip(): continue # don't count blank lines
+
+ if author in data: data[author] += 1
+ else: data[author] = 1
+
+ return de_alias(data)
+
+
+def hg_churn(days=None):
+ args = ["hg", "churn"]
+ if days:
+ args.append("-d")
+ t = time() - 60 * 60 * 24 * days
+ args.append("%s to %s" % (strftime(date_format, gmtime(t)), strftime(date_format)))
+
+ add_exclude_flags(args)
+ p = Popen(args, stdout=PIPE)
+ out, err = p.communicate()
+
+ data = {}
+
+ for line in out.splitlines():
+ m = line_re.search(line)
+ author = line.split()[0]
+ lines = int(m.group(1))
+
+ if "@" in author:
+ author, n, email = author.partition("@")
+
+ author = author.strip().lower()
+
+ if author in data: data[author] += lines
+ else: data[author] = lines
+
+ return de_alias(data)
+
+
+def complete_annotate():
+ files = file_list()
+ data = {}
+ for f in files:
+ tmp = hg_annotate(f)
+ for k, v in tmp.iteritems():
+ if k in data: data[k] += v
+ else: data[k] = v
+
+ return data
+
+
+if __name__ == "__main__":
+ for d in (30, 90, 180):
+ c = wipe(hg_churn(d))
+ print "Changes in %d days:" % d
+ output(c)
+
+ c = wipe(hg_churn())
+ print "Total changes:"
+ output(c)
+
+ print "Current source code version:"
+ data = wipe(complete_annotate())
+ output(data)
+
+
diff --git a/pyload/lib/mod_pywebsocket/COPYING b/pyload/lib/mod_pywebsocket/COPYING
new file mode 100644
index 000000000..989d02e4c
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/COPYING
@@ -0,0 +1,28 @@
+Copyright 2012, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pyload/lib/mod_pywebsocket/__init__.py b/pyload/lib/mod_pywebsocket/__init__.py
new file mode 100644
index 000000000..454ae0c45
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/__init__.py
@@ -0,0 +1,197 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket extension for Apache HTTP Server.
+
+mod_pywebsocket is a WebSocket extension for Apache HTTP Server
+intended for testing or experimental purposes. mod_python is required.
+
+
+Installation
+============
+
+0. Prepare an Apache HTTP Server for which mod_python is enabled.
+
+1. Specify the following Apache HTTP Server directives to suit your
+ configuration.
+
+ If mod_pywebsocket is not in the Python path, specify the following.
+ <websock_lib> is the directory where mod_pywebsocket is installed.
+
+ PythonPath "sys.path+['<websock_lib>']"
+
+ Always specify the following. <websock_handlers> is the directory where
+ user-written WebSocket handlers are placed.
+
+ PythonOption mod_pywebsocket.handler_root <websock_handlers>
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+
+ To limit the search for WebSocket handlers to a directory <scan_dir>
+ under <websock_handlers>, configure as follows:
+
+ PythonOption mod_pywebsocket.handler_scan <scan_dir>
+
+ <scan_dir> is useful in saving scan time when <websock_handlers>
+ contains many non-WebSocket handler files.
+
+ If you want to allow handlers whose canonical path is not under the root
+ directory (i.e. symbolic link is in root directory but its target is not),
+ configure as follows:
+
+ PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
+
+ Example snippet of httpd.conf:
+ (mod_pywebsocket is in /websock_lib, WebSocket handlers are in
+ /websock_handlers, port is 80 for ws, 443 for wss.)
+
+ <IfModule python_module>
+ PythonPath "sys.path+['/websock_lib']"
+ PythonOption mod_pywebsocket.handler_root /websock_handlers
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+ </IfModule>
+
+2. Tune Apache parameters for serving WebSocket. We'd like to note that at
+ least TimeOut directive from core features and RequestReadTimeout
+ directive from mod_reqtimeout should be modified not to kill connections
+ in only a few seconds of idle time.
+
+3. Verify installation. You can use example/console.html to poke the server.
+
+
+Writing WebSocket handlers
+==========================
+
+When a WebSocket request comes in, the resource name
+specified in the handshake is considered as if it is a file path under
+<websock_handlers> and the handler defined in
+<websock_handlers>/<resource_name>_wsh.py is invoked.
+
+For example, if the resource name is /example/chat, the handler defined in
+<websock_handlers>/example/chat_wsh.py is invoked.
+
+A WebSocket handler is composed of the following three functions:
+
+ web_socket_do_extra_handshake(request)
+ web_socket_transfer_data(request)
+ web_socket_passive_closing_handshake(request)
+
+where:
+ request: mod_python request.
+
+web_socket_do_extra_handshake is called during the handshake after the
+headers are successfully parsed and WebSocket properties (ws_location,
+ws_origin, and ws_resource) are added to request. A handler
+can reject the request by raising an exception.
+
+A request object has the following properties that you can use during the
+extra handshake (web_socket_do_extra_handshake):
+- ws_resource
+- ws_origin
+- ws_version
+- ws_location (HyBi 00 only)
+- ws_extensions (HyBi 06 and later)
+- ws_deflate (HyBi 06 and later)
+- ws_protocol
+- ws_requested_protocols (HyBi 06 and later)
+
+The last two are a bit tricky. See the next subsection.
+
+
+Subprotocol Negotiation
+-----------------------
+
+For HyBi 06 and later, ws_protocol is always set to None when
+web_socket_do_extra_handshake is called. If ws_requested_protocols is not
+None, you must choose one subprotocol from this list and set it to
+ws_protocol.
+
+For HyBi 00, when web_socket_do_extra_handshake is called,
+ws_protocol is set to the value given by the client in
+Sec-WebSocket-Protocol header or None if
+such header was not found in the opening handshake request. Finish extra
+handshake with ws_protocol untouched to accept the request subprotocol.
+Then, Sec-WebSocket-Protocol header will be sent to
+the client in response with the same value as requested. Raise an exception
+in web_socket_do_extra_handshake to reject the requested subprotocol.
+
+
+Data Transfer
+-------------
+
+web_socket_transfer_data is called after the handshake completed
+successfully. A handler can receive/send messages from/to the client
+using request. mod_pywebsocket.msgutil module provides utilities
+for data transfer.
+
+You can receive a message by the following statement.
+
+ message = request.ws_stream.receive_message()
+
+This call blocks until any complete text frame arrives, and the payload data
+of the incoming frame will be stored into message. When you're using IETF
+HyBi 00 or later protocol, receive_message() will return None on receiving
+client-initiated closing handshake. When any error occurs, receive_message()
+will raise some exception.
+
+You can send a message by the following statement.
+
+ request.ws_stream.send_message(message)
+
+
+Closing Connection
+------------------
+
+Executing the following statement or just return-ing from
+web_socket_transfer_data cause connection close.
+
+ request.ws_stream.close_connection()
+
+close_connection will wait
+for closing handshake acknowledgement coming from the client. When it
+couldn't receive a valid acknowledgement, raises an exception.
+
+web_socket_passive_closing_handshake is called after the server receives
+incoming closing frame from the client peer immediately. You can specify
+code and reason by return values. They are sent as a outgoing closing frame
+from the server. A request object has the following properties that you can
+use in web_socket_passive_closing_handshake.
+- ws_close_code
+- ws_close_reason
+
+
+Threading
+---------
+
+A WebSocket handler must be thread-safe if the server (Apache or
+standalone.py) is configured to use threads.
+"""
+
+
+# vi:sts=4 sw=4 et tw=72
diff --git a/pyload/lib/mod_pywebsocket/_stream_base.py b/pyload/lib/mod_pywebsocket/_stream_base.py
new file mode 100644
index 000000000..60fb33d2c
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/_stream_base.py
@@ -0,0 +1,165 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Base stream class.
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+from mod_pywebsocket import util
+
+
+# Exceptions
+
+
+class ConnectionTerminatedException(Exception):
+ """This exception will be raised when a connection is terminated
+ unexpectedly.
+ """
+
+ pass
+
+
+class InvalidFrameException(ConnectionTerminatedException):
+ """This exception will be raised when we received an invalid frame we
+ cannot parse.
+ """
+
+ pass
+
+
+class BadOperationException(Exception):
+ """This exception will be raised when send_message() is called on
+ server-terminated connection or receive_message() is called on
+ client-terminated connection.
+ """
+
+ pass
+
+
+class UnsupportedFrameException(Exception):
+ """This exception will be raised when we receive a frame with flag, opcode
+ we cannot handle. Handlers can just catch and ignore this exception and
+ call receive_message() again to continue processing the next frame.
+ """
+
+ pass
+
+
+class InvalidUTF8Exception(Exception):
+ """This exception will be raised when we receive a text frame which
+ contains invalid UTF-8 strings.
+ """
+
+ pass
+
+
+class StreamBase(object):
+ """Base stream class."""
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def _read(self, length):
+ """Reads length bytes from connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ bytes = self._request.connection.read(length)
+ if not bytes:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Peer (%r) closed connection' %
+ (length, (self._request.connection.remote_addr,)))
+ return bytes
+
+ def _write(self, bytes):
+ """Writes given bytes to connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+ """
+
+ try:
+ self._request.connection.write(bytes)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._request.connection.remote_addr,),
+ e)
+ raise
+
+ def receive_bytes(self, length):
+ """Receives multiple bytes. Retries read when we couldn't receive the
+ specified amount.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ bytes = []
+ while length > 0:
+ new_bytes = self._read(length)
+ bytes.append(new_bytes)
+ length -= len(new_bytes)
+ return ''.join(bytes)
+
+ def _read_until(self, delim_char):
+ """Reads bytes until we encounter delim_char. The result will not
+ contain delim_char.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ bytes = []
+ while True:
+ ch = self._read(1)
+ if ch == delim_char:
+ break
+ bytes.append(ch)
+ return ''.join(bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/_stream_hixie75.py b/pyload/lib/mod_pywebsocket/_stream_hixie75.py
new file mode 100644
index 000000000..94cf5b31b
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/_stream_hixie75.py
@@ -0,0 +1,229 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides a class for parsing/building frames of the WebSocket
+protocol version HyBi 00 and Hixie 75.
+
+Specification:
+- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket import util
+
+
+class StreamHixie75(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol version
+ HyBi 00 and Hixie 75.
+ """
+
+ def __init__(self, request, enable_closing_handshake=False):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ enable_closing_handshake: to let StreamHixie75 perform closing
+ handshake as specified in HyBi 00, set
+ this option to True.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._enable_closing_handshake = enable_closing_handshake
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: unicode string to send.
+ binary: not used in hixie75.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection.
+ """
+
+ if not end:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with end=False')
+
+ if binary:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with binary=True')
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
+
+ def _read_payload_length_hixie75(self):
+ """Reads a length header in a Hixie75 version frame with length.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ length = 0
+ while True:
+ b_str = self._read(1)
+ b = ord(b_str)
+ length = length * 128 + (b & 0x7f)
+ if (b & 0x80) == 0:
+ break
+ return length
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload an unicode string.
+
+ Returns:
+ payload unicode string in a WebSocket frame.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ BadOperationException: when called on a client-terminated
+ connection.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # Read 1 byte.
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+ frame_type_str = self.receive_bytes(1)
+ frame_type = ord(frame_type_str)
+ if (frame_type & 0x80) == 0x80:
+ # The payload length is specified in the frame.
+ # Read and discard.
+ length = self._read_payload_length_hixie75()
+ if length > 0:
+ _ = self.receive_bytes(length)
+ # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
+ # /client terminated/ flag and abort these steps.
+ if not self._enable_closing_handshake:
+ continue
+
+ if frame_type == 0xFF and length == 0:
+ self._request.client_terminated = True
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing '
+ 'handshake')
+ return None
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ self._send_closing_handshake()
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake')
+ return None
+ else:
+ # The payload is delimited with \xff.
+ bytes = self._read_until('\xff')
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ message = bytes.decode('utf-8', 'replace')
+ if frame_type == 0x00:
+ return message
+ # Discard data of other types.
+
+ def _send_closing_handshake(self):
+ if not self._enable_closing_handshake:
+ raise BadOperationException(
+ 'Closing handshake is not supported in Hixie 75 protocol')
+
+ self._request.server_terminated = True
+
+ # 5.3 the server may decide to terminate the WebSocket connection by
+ # running through the following steps:
+ # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
+ # start of the closing handshake.
+ self._write('\xff\x00')
+
+ def close_connection(self, unused_code='', unused_reason=''):
+ """Closes a WebSocket connection.
+
+ Raises:
+ ConnectionTerminatedException: when closing handshake was
+ not successfull.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if not self._enable_closing_handshake:
+ self._request.server_terminated = True
+ self._logger.debug('Connection closed')
+ return
+
+ self._send_closing_handshake()
+ self._logger.debug('Sent server-initiated closing handshake')
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake, and if we couldn't receive non-handshake
+ # frame, we take it as ConnectionTerminatedException.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body):
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_ping')
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/_stream_hybi.py b/pyload/lib/mod_pywebsocket/_stream_hybi.py
new file mode 100644
index 000000000..bd158fa6b
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/_stream_hybi.py
@@ -0,0 +1,915 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for parsing/building frames
+of the WebSocket protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+from collections import deque
+import logging
+import os
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+_NOOP_MASKER = util.NoopMasker()
+
+
+class Frame(object):
+
+ def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
+ opcode=None, payload=''):
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.payload = payload
+
+
+# Helper functions made public to be used for writing unittests for WebSocket
+# clients.
+
+
+def create_length_header(length, mask):
+ """Creates a length header.
+
+ Args:
+ length: Frame length. Must be less than 2^63.
+ mask: Mask bit. Must be boolean.
+
+ Raises:
+ ValueError: when bad data is given.
+ """
+
+ if mask:
+ mask_bit = 1 << 7
+ else:
+ mask_bit = 0
+
+ if length < 0:
+ raise ValueError('length must be non negative integer')
+ elif length <= 125:
+ return chr(mask_bit | length)
+ elif length < (1 << 16):
+ return chr(mask_bit | 126) + struct.pack('!H', length)
+ elif length < (1 << 63):
+ return chr(mask_bit | 127) + struct.pack('!Q', length)
+ else:
+ raise ValueError('Payload is too big for one frame')
+
+
+def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
+ """Creates a frame header.
+
+ Raises:
+ Exception: when bad data is given.
+ """
+
+ if opcode < 0 or 0xf < opcode:
+ raise ValueError('Opcode out of range')
+
+ if payload_length < 0 or (1 << 63) <= payload_length:
+ raise ValueError('payload_length out of range')
+
+ if (fin | rsv1 | rsv2 | rsv3) & ~1:
+ raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
+
+ header = ''
+
+ first_byte = ((fin << 7)
+ | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
+ | opcode)
+ header += chr(first_byte)
+ header += create_length_header(payload_length, mask)
+
+ return header
+
+
+def _build_frame(header, body, mask):
+ if not mask:
+ return header + body
+
+ masking_nonce = os.urandom(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ return header + masking_nonce + masker.mask(body)
+
+
+def _filter_and_format_frame_object(frame, mask, frame_filters):
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_binary_frame(
+ message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple binary frame with no extension, reserved bit."""
+
+ frame = Frame(fin=fin, opcode=opcode, payload=message)
+ return _filter_and_format_frame_object(frame, mask, frame_filters)
+
+
+def create_text_frame(
+ message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple text frame with no extension, reserved bit."""
+
+ encoded_message = message.encode('utf-8')
+ return create_binary_frame(encoded_message, opcode, fin, mask,
+ frame_filters)
+
+
+def parse_frame(receive_bytes, logger=None,
+ ws_version=common.VERSION_HYBI_LATEST,
+ unmask_receive=True):
+ """Parses a frame. Returns a tuple containing each header field and
+ payload.
+
+ Args:
+ receive_bytes: a function that reads frame data from a stream or
+ something similar. The function takes length of the bytes to be
+ read. The function must raise ConnectionTerminatedException if
+ there is not enough data to be read.
+ logger: a logging object.
+ ws_version: the version of WebSocket protocol.
+ unmask_receive: unmask received frames. When received unmasked
+ frame, raises InvalidFrameException.
+
+ Raises:
+ ConnectionTerminatedException: when receive_bytes raises it.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ if not logger:
+ logger = logging.getLogger()
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
+
+ received = receive_bytes(2)
+
+ first_byte = ord(received[0])
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xf
+
+ second_byte = ord(received[1])
+ mask = (second_byte >> 7) & 1
+ payload_length = second_byte & 0x7f
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
+ 'Mask=%s, Payload_length=%s',
+ fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
+
+ if (mask == 1) != unmask_receive:
+ raise InvalidFrameException(
+ 'Mask bit on the received frame did\'nt match masking '
+ 'configuration for received frames')
+
+ # The HyBi and later specs disallow putting a value in 0x0-0xFFFF
+ # into the 8-octet extended payload length field (or 0x0-0xFD in
+ # 2-octet field).
+ valid_length_encoding = True
+ length_encoding_bytes = 1
+ if payload_length == 127:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 8-octet extended payload length')
+
+ extended_payload_length = receive_bytes(8)
+ payload_length = struct.unpack(
+ '!Q', extended_payload_length)[0]
+ if payload_length > 0x7FFFFFFFFFFFFFFF:
+ raise InvalidFrameException(
+ 'Extended payload length >= 2^63')
+ if ws_version >= 13 and payload_length < 0x10000:
+ valid_length_encoding = False
+ length_encoding_bytes = 8
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+ elif payload_length == 126:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 2-octet extended payload length')
+
+ extended_payload_length = receive_bytes(2)
+ payload_length = struct.unpack(
+ '!H', extended_payload_length)[0]
+ if ws_version >= 13 and payload_length < 126:
+ valid_length_encoding = False
+ length_encoding_bytes = 2
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+
+ if not valid_length_encoding:
+ logger.warning(
+ 'Payload length is not encoded using the minimal number of '
+ 'bytes (%d is encoded using %d bytes)',
+ payload_length,
+ length_encoding_bytes)
+
+ if mask == 1:
+ logger.log(common.LOGLEVEL_FINE, 'Receive mask')
+
+ masking_nonce = receive_bytes(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
+ else:
+ masker = _NOOP_MASKER
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ receive_start = time.time()
+
+ raw_payload_bytes = receive_bytes(payload_length)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done receiving payload data at %s MB/s',
+ payload_length / (time.time() - receive_start) / 1000 / 1000)
+ logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ unmask_start = time.time()
+
+ bytes = masker.mask(raw_payload_bytes)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done unmasking payload data at %s MB/s',
+ payload_length / (time.time() - unmask_start) / 1000 / 1000)
+
+ return opcode, bytes, fin, rsv1, rsv2, rsv3
+
+
+class FragmentedFrameBuilder(object):
+ """A stateful class to send a message as fragments."""
+
+ def __init__(self, mask, frame_filters=[], encode_utf8=True):
+ """Constructs an instance."""
+
+ self._mask = mask
+ self._frame_filters = frame_filters
+ # This is for skipping UTF-8 encoding when building text type frames
+ # from compressed data.
+ self._encode_utf8 = encode_utf8
+
+ self._started = False
+
+ # Hold opcode of the first frame in messages to verify types of other
+ # frames in the message are all the same.
+ self._opcode = common.OPCODE_TEXT
+
+ def build(self, payload_data, end, binary):
+ if binary:
+ frame_type = common.OPCODE_BINARY
+ else:
+ frame_type = common.OPCODE_TEXT
+ if self._started:
+ if self._opcode != frame_type:
+ raise ValueError('Message types are different in frames for '
+ 'the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ opcode = frame_type
+ self._opcode = frame_type
+
+ if end:
+ self._started = False
+ fin = 1
+ else:
+ self._started = True
+ fin = 0
+
+ if binary or not self._encode_utf8:
+ return create_binary_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+ else:
+ return create_text_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+
+
+def _create_control_frame(opcode, body, mask, frame_filters):
+ frame = Frame(opcode=opcode, payload=body)
+
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ if len(frame.payload) > 125:
+ raise BadOperationException(
+ 'Payload data size of control frames must be 125 bytes or less')
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_ping_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
+
+
+def create_pong_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
+
+
+def create_close_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(
+ common.OPCODE_CLOSE, body, mask, frame_filters)
+
+
+def create_closing_handshake_body(code, reason):
+ body = ''
+ if code is not None:
+ if (code > common.STATUS_USER_PRIVATE_MAX or
+ code < common.STATUS_NORMAL_CLOSURE):
+ raise BadOperationException('Status code is out of range')
+ if (code == common.STATUS_NO_STATUS_RECEIVED or
+ code == common.STATUS_ABNORMAL_CLOSURE or
+ code == common.STATUS_TLS_HANDSHAKE):
+ raise BadOperationException('Status code is reserved pseudo '
+ 'code')
+ encoded_reason = reason.encode('utf-8')
+ body = struct.pack('!H', code) + encoded_reason
+ return body
+
+
+class StreamOptions(object):
+ """Holds option values to configure Stream objects."""
+
+ def __init__(self):
+ """Constructs StreamOptions."""
+
+ # Enables deflate-stream extension.
+ self.deflate_stream = False
+
+ # Filters applied to frames.
+ self.outgoing_frame_filters = []
+ self.incoming_frame_filters = []
+
+ # Filters applied to messages. Control frames are not affected by them.
+ self.outgoing_message_filters = []
+ self.incoming_message_filters = []
+
+ self.encode_text_message_to_utf8 = True
+ self.mask_send = False
+ self.unmask_receive = True
+ # RFC6455 disallows fragmented control frames, but mux extension
+ # relaxes the restriction.
+ self.allow_fragmented_control_frame = False
+
+
+class Stream(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol
+ (RFC 6455).
+ """
+
+ def __init__(self, request, options):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ if self._options.deflate_stream:
+ self._logger.debug('Setup filter for deflate-stream')
+ self._request = util.DeflateRequest(self._request)
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ # Holds body of received fragments.
+ self._received_fragments = []
+ # Holds the opcode of the first fragment.
+ self._original_opcode = None
+
+ self._writer = FragmentedFrameBuilder(
+ self._options.mask_send, self._options.outgoing_frame_filters,
+ self._options.encode_text_message_to_utf8)
+
+ self._ping_queue = deque()
+
+ def _receive_frame(self):
+ """Receives a frame and return data in the frame as a tuple containing
+ each header field and payload separately.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ def _receive_bytes(length):
+ return self.receive_bytes(length)
+
+ return parse_frame(receive_bytes=_receive_bytes,
+ logger=self._logger,
+ ws_version=self._request.ws_version,
+ unmask_receive=self._options.unmask_receive)
+
+ def _receive_frame_as_frame_object(self):
+ opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+
+ return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
+ opcode=opcode, payload=bytes)
+
+ def receive_filtered_frame(self):
+ """Receives a frame and applies frame filters and message filters.
+ The frame to be received must satisfy following conditions:
+ - The frame is not fragmented.
+ - The opcode of the frame is TEXT or BINARY.
+
+ DO NOT USE this method except for testing purpose.
+ """
+
+ frame = self._receive_frame_as_frame_object()
+ if not frame.fin:
+ raise InvalidFrameException(
+ 'Segmented frames must not be received via '
+ 'receive_filtered_frame()')
+ if (frame.opcode != common.OPCODE_TEXT and
+ frame.opcode != common.OPCODE_BINARY):
+ raise InvalidFrameException(
+ 'Control frames must not be received via '
+ 'receive_filtered_frame()')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+ for message_filter in self._options.incoming_message_filters:
+ frame.payload = message_filter.filter(frame.payload)
+ return frame
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: text in unicode or binary in str to send.
+ binary: send message as binary frame.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection or called with inconsistent message type or
+ binary parameter.
+ """
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ try:
+ # Set this to any positive integer to limit maximum size of data in
+ # payload data of each frame.
+ MAX_PAYLOAD_DATA_SIZE = -1
+
+ if MAX_PAYLOAD_DATA_SIZE <= 0:
+ self._write(self._writer.build(message, end, binary))
+ return
+
+ bytes_written = 0
+ while True:
+ end_for_this_frame = end
+ bytes_to_write = len(message) - bytes_written
+ if (MAX_PAYLOAD_DATA_SIZE > 0 and
+ bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
+ end_for_this_frame = False
+ bytes_to_write = MAX_PAYLOAD_DATA_SIZE
+
+ frame = self._writer.build(
+ message[bytes_written:bytes_written + bytes_to_write],
+ end_for_this_frame,
+ binary)
+ self._write(frame)
+
+ bytes_written += bytes_to_write
+
+ # This if must be placed here (the end of while block) so that
+ # at least one frame is sent.
+ if len(message) <= bytes_written:
+ break
+ except ValueError, e:
+ raise BadOperationException(e)
+
+ def _get_message_from_frame(self, frame):
+ """Gets a message from frame. If the message is composed of fragmented
+ frames and the frame is not the last fragmented frame, this method
+ returns None. The whole message will be returned when the last
+ fragmented frame is passed to this method.
+
+ Raises:
+ InvalidFrameException: when the frame doesn't match defragmentation
+ context, or the frame contains invalid data.
+ """
+
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ if not self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received a termination frame but fragmentation '
+ 'not started')
+ else:
+ raise InvalidFrameException(
+ 'Received an intermediate frame but '
+ 'fragmentation not started')
+
+ if frame.fin:
+ # End of fragmentation frame
+ self._received_fragments.append(frame.payload)
+ message = ''.join(self._received_fragments)
+ self._received_fragments = []
+ return message
+ else:
+ # Intermediate frame
+ self._received_fragments.append(frame.payload)
+ return None
+ else:
+ if self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received an unfragmented frame without '
+ 'terminating existing fragmentation')
+ else:
+ raise InvalidFrameException(
+ 'New fragmentation started without terminating '
+ 'existing fragmentation')
+
+ if frame.fin:
+ # Unfragmented frame
+
+ self._original_opcode = frame.opcode
+ return frame.payload
+ else:
+ # Start of fragmentation frame
+
+ if (not self._options.allow_fragmented_control_frame and
+ common.is_control_opcode(frame.opcode)):
+ raise InvalidFrameException(
+ 'Control frames must not be fragmented')
+
+ self._original_opcode = frame.opcode
+ self._received_fragments.append(frame.payload)
+ return None
+
+ def _process_close_message(self, message):
+ """Processes close message.
+
+ Args:
+ message: close message.
+
+ Raises:
+ InvalidFrameException: when the message is invalid.
+ """
+
+ self._request.client_terminated = True
+
+ # Status code is optional. We can have status reason only if we
+ # have status code. Status reason can be empty string. So,
+ # allowed cases are
+ # - no application data: no code no reason
+ # - 2 octet of application data: has code but no reason
+ # - 3 or more octet of application data: both code and reason
+ if len(message) == 0:
+ self._logger.debug('Received close frame (empty body)')
+ self._request.ws_close_code = (
+ common.STATUS_NO_STATUS_RECEIVED)
+ elif len(message) == 1:
+ raise InvalidFrameException(
+ 'If a close frame has status code, the length of '
+ 'status code must be 2 octet')
+ elif len(message) >= 2:
+ self._request.ws_close_code = struct.unpack(
+ '!H', message[0:2])[0]
+ self._request.ws_close_reason = message[2:].decode(
+ 'utf-8', 'replace')
+ self._logger.debug(
+ 'Received close frame (code=%d, reason=%r)',
+ self._request.ws_close_code,
+ self._request.ws_close_reason)
+
+ # Drain junk data after the close frame if necessary.
+ self._drain_received_data()
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing handshake')
+ return
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ code = common.STATUS_NORMAL_CLOSURE
+ reason = ''
+ if hasattr(self._request, '_dispatcher'):
+ dispatcher = self._request._dispatcher
+ code, reason = dispatcher.passive_closing_handshake(
+ self._request)
+ if code is None and reason is not None and len(reason) > 0:
+ self._logger.warning(
+ 'Handler specified reason despite code being None')
+ reason = ''
+ if reason is None:
+ reason = ''
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake '
+ '(code=%r, reason=%r)', code, reason)
+
+ def _process_ping_message(self, message):
+ """Processes ping message.
+
+ Args:
+ message: ping message.
+ """
+
+ try:
+ handler = self._request.on_ping_handler
+ if handler:
+ handler(self._request, message)
+ return
+ except AttributeError, e:
+ pass
+ self._send_pong(message)
+
+ def _process_pong_message(self, message):
+ """Processes pong message.
+
+ Args:
+ message: pong message.
+ """
+
+ # TODO(tyoshino): Add ping timeout handling.
+
+ inflight_pings = deque()
+
+ while True:
+ try:
+ expected_body = self._ping_queue.popleft()
+ if expected_body == message:
+ # inflight_pings contains pings ignored by the
+ # other peer. Just forget them.
+ self._logger.debug(
+ 'Ping %r is acked (%d pings were ignored)',
+ expected_body, len(inflight_pings))
+ break
+ else:
+ inflight_pings.append(expected_body)
+ except IndexError, e:
+ # The received pong was unsolicited pong. Keep the
+ # ping queue as is.
+ self._ping_queue = inflight_pings
+ self._logger.debug('Received a unsolicited pong')
+ break
+
+ try:
+ handler = self._request.on_pong_handler
+ if handler:
+ handler(self._request, message)
+ except AttributeError, e:
+ pass
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Returns:
+ payload data of the frame
+ - as unicode instance if received text frame
+ - as str instance if received binary frame
+ or None iff received closing handshake.
+ Raises:
+ BadOperationException: when called on a client-terminated
+ connection.
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid
+ data.
+ UnsupportedFrameException: when the received frame has
+ flags, opcode we cannot handle. You can ignore this
+ exception and continue receiving the next frame.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+
+ frame = self._receive_frame_as_frame_object()
+
+ # Check the constraint on the payload size for control frames
+ # before extension processes the frame.
+ # See also http://tools.ietf.org/html/rfc6455#section-5.5
+ if (common.is_control_opcode(frame.opcode) and
+ len(frame.payload) > 125):
+ raise InvalidFrameException(
+ 'Payload data size of control frames must be 125 bytes or '
+ 'less')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+
+ if frame.rsv1 or frame.rsv2 or frame.rsv3:
+ raise UnsupportedFrameException(
+ 'Unsupported flag is set (rsv = %d%d%d)' %
+ (frame.rsv1, frame.rsv2, frame.rsv3))
+
+ message = self._get_message_from_frame(frame)
+ if message is None:
+ continue
+
+ for message_filter in self._options.incoming_message_filters:
+ message = message_filter.filter(message)
+
+ if self._original_opcode == common.OPCODE_TEXT:
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ try:
+ return message.decode('utf-8')
+ except UnicodeDecodeError, e:
+ raise InvalidUTF8Exception(e)
+ elif self._original_opcode == common.OPCODE_BINARY:
+ return message
+ elif self._original_opcode == common.OPCODE_CLOSE:
+ self._process_close_message(message)
+ return None
+ elif self._original_opcode == common.OPCODE_PING:
+ self._process_ping_message(message)
+ elif self._original_opcode == common.OPCODE_PONG:
+ self._process_pong_message(message)
+ else:
+ raise UnsupportedFrameException(
+ 'Opcode %d is not supported' % self._original_opcode)
+
+ def _send_closing_handshake(self, code, reason):
+ body = create_closing_handshake_body(code, reason)
+ frame = create_close_frame(
+ body, mask=self._options.mask_send,
+ frame_filters=self._options.outgoing_frame_filters)
+
+ self._request.server_terminated = True
+
+ self._write(frame)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ """Closes a WebSocket connection.
+
+ Args:
+ code: Status code for close frame. If code is None, a close
+ frame with empty body will be sent.
+ reason: string representing close reason.
+ Raises:
+ BadOperationException: when reason is specified with code None
+ or reason is not an instance of both str and unicode.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if code is None:
+ if reason is not None and len(reason) > 0:
+ raise BadOperationException(
+ 'close reason must not be specified if code is None')
+ reason = ''
+ else:
+ if not isinstance(reason, str) and not isinstance(reason, unicode):
+ raise BadOperationException(
+ 'close reason must be an instance of str or unicode')
+
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Sent server-initiated closing handshake (code=%r, reason=%r)',
+ code, reason)
+
+ if (code == common.STATUS_GOING_AWAY or
+ code == common.STATUS_PROTOCOL_ERROR):
+ # It doesn't make sense to wait for a close frame if the reason is
+ # protocol error or that the server is going away. For some of
+ # other reasons, it might not make sense to wait for a close frame,
+ # but it's not clear, yet.
+ return
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body=''):
+ frame = create_ping_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ frame = create_pong_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ def get_last_received_opcode(self):
+ """Returns the opcode of the WebSocket message which the last received
+ frame belongs to. The return value is valid iff immediately after
+ receive_message call.
+ """
+
+ return self._original_opcode
+
+ def _drain_received_data(self):
+ """Drains unread data in the receive buffer to avoid sending out TCP
+ RST packet. This is because when deflate-stream is enabled, some
+ DEFLATE block for flushing data may follow a close frame. If any data
+ remains in the receive buffer of a socket when the socket is closed,
+ it sends out TCP RST packet to the other peer.
+
+ Since mod_python's mp_conn object doesn't support non-blocking read,
+ we perform this only when pywebsocket is running in standalone mode.
+ """
+
+ # If self._options.deflate_stream is true, self._request is
+ # DeflateRequest, so we can get wrapped request object by
+ # self._request._request.
+ #
+ # Only _StandaloneRequest has _drain_received_data method.
+ if (self._options.deflate_stream and
+ ('_drain_received_data' in dir(self._request._request))):
+ self._request._request._drain_received_data()
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/common.py b/pyload/lib/mod_pywebsocket/common.py
new file mode 100644
index 000000000..2388379c0
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/common.py
@@ -0,0 +1,307 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file must not depend on any module specific to the WebSocket protocol.
+"""
+
+
+from mod_pywebsocket import http_header_util
+
+
+# Additional log level definitions.
+LOGLEVEL_FINE = 9
+
+# Constants indicating WebSocket protocol version.
+VERSION_HIXIE75 = -1
+VERSION_HYBI00 = 0
+VERSION_HYBI01 = 1
+VERSION_HYBI02 = 2
+VERSION_HYBI03 = 2
+VERSION_HYBI04 = 4
+VERSION_HYBI05 = 5
+VERSION_HYBI06 = 6
+VERSION_HYBI07 = 7
+VERSION_HYBI08 = 8
+VERSION_HYBI09 = 8
+VERSION_HYBI10 = 8
+VERSION_HYBI11 = 8
+VERSION_HYBI12 = 8
+VERSION_HYBI13 = 13
+VERSION_HYBI14 = 13
+VERSION_HYBI15 = 13
+VERSION_HYBI16 = 13
+VERSION_HYBI17 = 13
+
+# Constants indicating WebSocket protocol latest version.
+VERSION_HYBI_LATEST = VERSION_HYBI13
+
+# Port numbers
+DEFAULT_WEB_SOCKET_PORT = 80
+DEFAULT_WEB_SOCKET_SECURE_PORT = 443
+
+# Schemes
+WEB_SOCKET_SCHEME = 'ws'
+WEB_SOCKET_SECURE_SCHEME = 'wss'
+
+# Frame opcodes defined in the spec.
+OPCODE_CONTINUATION = 0x0
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+
+# UUIDs used by HyBi 04 and later opening handshake and frame masking.
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Opening handshake header names and expected values.
+UPGRADE_HEADER = 'Upgrade'
+WEBSOCKET_UPGRADE_TYPE = 'websocket'
+WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket'
+CONNECTION_HEADER = 'Connection'
+UPGRADE_CONNECTION_TYPE = 'Upgrade'
+HOST_HEADER = 'Host'
+ORIGIN_HEADER = 'Origin'
+SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin'
+SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key'
+SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept'
+SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version'
+SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol'
+SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions'
+SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft'
+SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1'
+SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
+SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
+
+# Extensions
+DEFLATE_STREAM_EXTENSION = 'deflate-stream'
+DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+PERFRAME_COMPRESSION_EXTENSION = 'perframe-compress'
+PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress'
+X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
+X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress'
+MUX_EXTENSION = 'mux_DO_NOT_USE'
+
+# Status codes
+# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and
+# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases.
+# Could not be used for codes in actual closing frames.
+# Application level errors must use codes in the range
+# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the
+# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed
+# by IANA. Usually application must define user protocol level errors in the
+# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX.
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXTENSION = 1010
+STATUS_INTERNAL_ENDPOINT_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+STATUS_USER_REGISTERED_BASE = 3000
+STATUS_USER_REGISTERED_MAX = 3999
+STATUS_USER_PRIVATE_BASE = 4000
+STATUS_USER_PRIVATE_MAX = 4999
+# Following definitions are aliases to keep compatibility. Applications must
+# not use these obsoleted definitions anymore.
+STATUS_NORMAL = STATUS_NORMAL_CLOSURE
+STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA
+STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED
+STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE
+STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA
+STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION
+
+# HTTP status codes
+HTTP_STATUS_BAD_REQUEST = 400
+HTTP_STATUS_FORBIDDEN = 403
+HTTP_STATUS_NOT_FOUND = 404
+
+
+def is_control_opcode(opcode):
+ return (opcode >> 3) == 1
+
+
+class ExtensionParameter(object):
+ """Holds information about an extension which is exchanged on extension
+ negotiation in opening handshake.
+ """
+
+ def __init__(self, name):
+ self._name = name
+ # TODO(tyoshino): Change the data structure to more efficient one such
+ # as dict when the spec changes to say like
+ # - Parameter names must be unique
+ # - The order of parameters is not significant
+ self._parameters = []
+
+ def name(self):
+ return self._name
+
+ def add_parameter(self, name, value):
+ self._parameters.append((name, value))
+
+ def get_parameters(self):
+ return self._parameters
+
+ def get_parameter_names(self):
+ return [name for name, unused_value in self._parameters]
+
+ def has_parameter(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return True
+ return False
+
+ def get_parameter_value(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return param_value
+
+
+class ExtensionParsingException(Exception):
+ def __init__(self, name):
+ super(ExtensionParsingException, self).__init__(name)
+
+
+def _parse_extension_param(state, definition, allow_quoted_string):
+ param_name = http_header_util.consume_token(state)
+
+ if param_name is None:
+ raise ExtensionParsingException('No valid parameter name found')
+
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, '='):
+ definition.add_parameter(param_name, None)
+ return
+
+ http_header_util.consume_lwses(state)
+
+ if allow_quoted_string:
+ # TODO(toyoshim): Add code to validate that parsed param_value is token
+ param_value = http_header_util.consume_token_or_quoted_string(state)
+ else:
+ param_value = http_header_util.consume_token(state)
+ if param_value is None:
+ raise ExtensionParsingException(
+ 'No valid parameter value found on the right-hand side of '
+ 'parameter %r' % param_name)
+
+ definition.add_parameter(param_name, param_value)
+
+
+def _parse_extension(state, allow_quoted_string):
+ extension_token = http_header_util.consume_token(state)
+ if extension_token is None:
+ return None
+
+ extension = ExtensionParameter(extension_token)
+
+ while True:
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, ';'):
+ break
+
+ http_header_util.consume_lwses(state)
+
+ try:
+ _parse_extension_param(state, extension, allow_quoted_string)
+ except ExtensionParsingException, e:
+ raise ExtensionParsingException(
+ 'Failed to parse parameter for %r (%r)' %
+ (extension_token, e))
+
+ return extension
+
+
+def parse_extensions(data, allow_quoted_string=False):
+ """Parses Sec-WebSocket-Extensions header value returns a list of
+ ExtensionParameter objects.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ extension_list = []
+ while True:
+ extension = _parse_extension(state, allow_quoted_string)
+ if extension is not None:
+ extension_list.append(extension)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise ExtensionParsingException(
+ 'Failed to parse Sec-WebSocket-Extensions header: '
+ 'Expected a comma but found %r' %
+ http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(extension_list) == 0:
+ raise ExtensionParsingException(
+ 'No valid extension entry found')
+
+ return extension_list
+
+
+def format_extension(extension):
+ """Formats an ExtensionParameter object."""
+
+ formatted_params = [extension.name()]
+ for param_name, param_value in extension.get_parameters():
+ if param_value is None:
+ formatted_params.append(param_name)
+ else:
+ quoted_value = http_header_util.quote_if_necessary(param_value)
+ formatted_params.append('%s=%s' % (param_name, quoted_value))
+ return '; '.join(formatted_params)
+
+
+def format_extensions(extension_list):
+ """Formats a list of ExtensionParameter objects."""
+
+ formatted_extension_list = []
+ for extension in extension_list:
+ formatted_extension_list.append(format_extension(extension))
+ return ', '.join(formatted_extension_list)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/dispatch.py b/pyload/lib/mod_pywebsocket/dispatch.py
new file mode 100644
index 000000000..25905f180
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/dispatch.py
@@ -0,0 +1,387 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Dispatch WebSocket request.
+"""
+
+
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import msgutil
+from mod_pywebsocket import mux
+from mod_pywebsocket import stream
+from mod_pywebsocket import util
+
+
+_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
+_SOURCE_SUFFIX = '_wsh.py'
+_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
+_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
+_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
+ 'web_socket_passive_closing_handshake')
+
+
+class DispatchException(Exception):
+ """Exception in dispatching WebSocket request."""
+
+ def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
+ super(DispatchException, self).__init__(name)
+ self.status = status
+
+
+def _default_passive_closing_handshake_handler(request):
+ """Default web_socket_passive_closing_handshake handler."""
+
+ return common.STATUS_NORMAL_CLOSURE, ''
+
+
+def _normalize_path(path):
+ """Normalize path.
+
+ Args:
+ path: the path to normalize.
+
+ Path is converted to the absolute path.
+ The input path can use either '\\' or '/' as the separator.
+ The normalized path always uses '/' regardless of the platform.
+ """
+
+ path = path.replace('\\', os.path.sep)
+ path = os.path.realpath(path)
+ path = path.replace('\\', '/')
+ return path
+
+
+def _create_path_to_resource_converter(base_dir):
+ """Returns a function that converts the path of a WebSocket handler source
+ file to a resource string by removing the path to the base directory from
+ its head, removing _SOURCE_SUFFIX from its tail, and replacing path
+ separators in it with '/'.
+
+ Args:
+ base_dir: the path to the base directory.
+ """
+
+ base_dir = _normalize_path(base_dir)
+
+ base_len = len(base_dir)
+ suffix_len = len(_SOURCE_SUFFIX)
+
+ def converter(path):
+ if not path.endswith(_SOURCE_SUFFIX):
+ return None
+ # _normalize_path must not be used because resolving symlink breaks
+ # following path check.
+ path = path.replace('\\', '/')
+ if not path.startswith(base_dir):
+ return None
+ return path[base_len:-suffix_len]
+
+ return converter
+
+
+def _enumerate_handler_file_paths(directory):
+ """Returns a generator that enumerates WebSocket Handler source file names
+ in the given directory.
+ """
+
+ for root, unused_dirs, files in os.walk(directory):
+ for base in files:
+ path = os.path.join(root, base)
+ if _SOURCE_PATH_PATTERN.search(path):
+ yield path
+
+
+class _HandlerSuite(object):
+ """A handler suite holder class."""
+
+ def __init__(self, do_extra_handshake, transfer_data,
+ passive_closing_handshake):
+ self.do_extra_handshake = do_extra_handshake
+ self.transfer_data = transfer_data
+ self.passive_closing_handshake = passive_closing_handshake
+
+
+def _source_handler_file(handler_definition):
+ """Source a handler definition string.
+
+ Args:
+ handler_definition: a string containing Python statements that define
+ handler functions.
+ """
+
+ global_dic = {}
+ try:
+ exec handler_definition in global_dic
+ except Exception:
+ raise DispatchException('Error in sourcing handler:' +
+ util.get_stack_trace())
+ passive_closing_handshake_handler = None
+ try:
+ passive_closing_handshake_handler = _extract_handler(
+ global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
+ except Exception:
+ passive_closing_handshake_handler = (
+ _default_passive_closing_handshake_handler)
+ return _HandlerSuite(
+ _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
+ _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
+ passive_closing_handshake_handler)
+
+
+def _extract_handler(dic, name):
+ """Extracts a callable with the specified name from the given dictionary
+ dic.
+ """
+
+ if name not in dic:
+ raise DispatchException('%s is not defined.' % name)
+ handler = dic[name]
+ if not callable(handler):
+ raise DispatchException('%s is not callable.' % name)
+ return handler
+
+
+class Dispatcher(object):
+ """Dispatches WebSocket requests.
+
+ This class maintains a map from resource name to handlers.
+ """
+
+ def __init__(
+ self, root_dir, scan_dir=None,
+ allow_handlers_outside_root_dir=True):
+ """Construct an instance.
+
+ Args:
+ root_dir: The directory where handler definition files are
+ placed.
+ scan_dir: The directory where handler definition files are
+ searched. scan_dir must be a directory under root_dir,
+ including root_dir itself. If scan_dir is None,
+ root_dir is used as scan_dir. scan_dir can be useful
+ in saving scan time when root_dir contains many
+ subdirectories.
+ allow_handlers_outside_root_dir: Scans handler files even if their
+ canonical path is not under root_dir.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._handler_suite_map = {}
+ self._source_warnings = []
+ if scan_dir is None:
+ scan_dir = root_dir
+ if not os.path.realpath(scan_dir).startswith(
+ os.path.realpath(root_dir)):
+ raise DispatchException('scan_dir:%s must be a directory under '
+ 'root_dir:%s.' % (scan_dir, root_dir))
+ self._source_handler_files_in_dir(
+ root_dir, scan_dir, allow_handlers_outside_root_dir)
+
+ def add_resource_path_alias(self,
+ alias_resource_path, existing_resource_path):
+ """Add resource path alias.
+
+ Once added, request to alias_resource_path would be handled by
+ handler registered for existing_resource_path.
+
+ Args:
+ alias_resource_path: alias resource path
+ existing_resource_path: existing resource path
+ """
+ try:
+ handler_suite = self._handler_suite_map[existing_resource_path]
+ self._handler_suite_map[alias_resource_path] = handler_suite
+ except KeyError:
+ raise DispatchException('No handler for: %r' %
+ existing_resource_path)
+
+ def source_warnings(self):
+ """Return warnings in sourcing handlers."""
+
+ return self._source_warnings
+
+ def do_extra_handshake(self, request):
+ """Do extra checking in WebSocket handshake.
+
+ Select a handler based on request.uri and call its
+ web_socket_do_extra_handshake function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ HandshakeException: when opening handshake failed
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' % request.ws_resource)
+ do_extra_handshake_ = handler_suite.do_extra_handshake
+ try:
+ do_extra_handshake_(request)
+ except handshake.AbortedByUserException, e:
+ raise
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _DO_EXTRA_HANDSHAKE_HANDLER_NAME,
+ request.ws_resource),
+ e)
+ raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
+
+ def transfer_data(self, request):
+ """Let a handler transfer_data with a WebSocket client.
+
+ Select a handler based on request.ws_resource and call its
+ web_socket_transfer_data function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ """
+
+ # TODO(tyoshino): Terminate underlying TCP connection if possible.
+ try:
+ if mux.use_mux(request):
+ mux.start(request, self)
+ else:
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' %
+ request.ws_resource)
+ transfer_data_ = handler_suite.transfer_data
+ transfer_data_(request)
+
+ if not request.server_terminated:
+ request.ws_stream.close_connection()
+ # Catch non-critical exceptions the handler didn't handle.
+ except handshake.AbortedByUserException, e:
+ self._logger.debug('%s', e)
+ raise
+ except msgutil.BadOperationException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE)
+ except msgutil.InvalidFrameException, e:
+ # InvalidFrameException must be caught before
+ # ConnectionTerminatedException that catches InvalidFrameException.
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
+ except msgutil.UnsupportedFrameException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
+ except stream.InvalidUTF8Exception, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+ except msgutil.ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
+ e)
+ raise
+
+ def passive_closing_handshake(self, request):
+ """Prepare code and reason for responding client initiated closing
+ handshake.
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ return _default_passive_closing_handshake_handler(request)
+ return handler_suite.passive_closing_handshake(request)
+
+ def get_handler_suite(self, resource):
+ """Retrieves two handlers (one for extra handshake processing, and one
+ for data transfer) for the given request as a HandlerSuite object.
+ """
+
+ fragment = None
+ if '#' in resource:
+ resource, fragment = resource.split('#', 1)
+ if '?' in resource:
+ resource = resource.split('?', 1)[0]
+ handler_suite = self._handler_suite_map.get(resource)
+ if handler_suite and fragment:
+ raise DispatchException('Fragment identifiers MUST NOT be used on '
+ 'WebSocket URIs',
+ common.HTTP_STATUS_BAD_REQUEST)
+ return handler_suite
+
+ def _source_handler_files_in_dir(
+ self, root_dir, scan_dir, allow_handlers_outside_root_dir):
+ """Source all the handler source files in the scan_dir directory.
+
+ The resource path is determined relative to root_dir.
+ """
+
+ # We build a map from resource to handler code assuming that there's
+ # only one path from root_dir to scan_dir and it can be obtained by
+ # comparing realpath of them.
+
+ # Here we cannot use abspath. See
+ # https://bugs.webkit.org/show_bug.cgi?id=31603
+
+ convert = _create_path_to_resource_converter(root_dir)
+ scan_realpath = os.path.realpath(scan_dir)
+ root_realpath = os.path.realpath(root_dir)
+ for path in _enumerate_handler_file_paths(scan_realpath):
+ if (not allow_handlers_outside_root_dir and
+ (not os.path.realpath(path).startswith(root_realpath))):
+ self._logger.debug(
+ 'Canonical path of %s is not under root directory' %
+ path)
+ continue
+ try:
+ handler_suite = _source_handler_file(open(path).read())
+ except DispatchException, e:
+ self._source_warnings.append('%s: %s' % (path, e))
+ continue
+ resource = convert(path)
+ if resource is None:
+ self._logger.debug(
+ 'Path to resource conversion on %s failed' % path)
+ else:
+ self._handler_suite_map[convert(path)] = handler_suite
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/extensions.py b/pyload/lib/mod_pywebsocket/extensions.py
new file mode 100644
index 000000000..03dbf9ee1
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/extensions.py
@@ -0,0 +1,727 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket.http_header_util import quote_if_necessary
+
+
+_available_processors = {}
+
+
+class ExtensionProcessorInterface(object):
+
+ def name(self):
+ return None
+
+ def get_extension_response(self):
+ return None
+
+ def setup_stream_options(self, stream_options):
+ pass
+
+
+class DeflateStreamExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket DEFLATE stream extension processor.
+
+ Specification:
+ Section 9.2.1 in
+ http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
+ """
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def name(self):
+ return common.DEFLATE_STREAM_EXTENSION
+
+ def get_extension_response(self):
+ if len(self._request.get_parameter_names()) != 0:
+ return None
+
+ self._logger.debug(
+ 'Enable %s extension', common.DEFLATE_STREAM_EXTENSION)
+
+ return common.ExtensionParameter(common.DEFLATE_STREAM_EXTENSION)
+
+ def setup_stream_options(self, stream_options):
+ stream_options.deflate_stream = True
+
+
+_available_processors[common.DEFLATE_STREAM_EXTENSION] = (
+ DeflateStreamExtensionProcessor)
+
+
+def _log_compression_ratio(logger, original_bytes, total_original_bytes,
+ filtered_bytes, total_filtered_bytes):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ average_ratio = float('inf')
+ if original_bytes != 0:
+ ratio = float(filtered_bytes) / original_bytes
+ if total_original_bytes != 0:
+ average_ratio = (
+ float(total_filtered_bytes) / total_original_bytes)
+ logger.debug('Outgoing compress ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _log_decompression_ratio(logger, received_bytes, total_received_bytes,
+ filtered_bytes, total_filtered_bytes):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ average_ratio = float('inf')
+ if received_bytes != 0:
+ ratio = float(received_bytes) / filtered_bytes
+ if total_filtered_bytes != 0:
+ average_ratio = (
+ float(total_received_bytes) / total_filtered_bytes)
+ logger.debug('Incoming compress ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket Per-frame DEFLATE extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
+ """
+
+ _WINDOW_BITS_PARAM = 'max_window_bits'
+ _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ self._response_window_bits = None
+ self._response_no_context_takeover = False
+ self._bfinal = False
+
+ # Counters for statistics.
+
+ # Total number of outgoing bytes supplied to this filter.
+ self._total_outgoing_payload_bytes = 0
+ # Total number of bytes sent to the network after applying this filter.
+ self._total_filtered_outgoing_payload_bytes = 0
+
+ # Total number of bytes received from the network.
+ self._total_incoming_payload_bytes = 0
+ # Total number of incoming bytes obtained after applying this filter.
+ self._total_filtered_incoming_payload_bytes = 0
+
+ def name(self):
+ return common.DEFLATE_FRAME_EXTENSION
+
+ def get_extension_response(self):
+ # Any unknown parameter will be just ignored.
+
+ window_bits = self._request.get_parameter_value(
+ self._WINDOW_BITS_PARAM)
+ no_context_takeover = self._request.has_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM)
+ if (no_context_takeover and
+ self._request.get_parameter_value(
+ self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ return None
+
+ if window_bits is not None:
+ try:
+ window_bits = int(window_bits)
+ except ValueError, e:
+ return None
+ if window_bits < 8 or window_bits > 15:
+ return None
+
+ self._deflater = util._RFC1979Deflater(
+ window_bits, no_context_takeover)
+
+ self._inflater = util._RFC1979Inflater()
+
+ self._compress_outgoing = True
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if self._response_window_bits is not None:
+ response.add_parameter(
+ self._WINDOW_BITS_PARAM, str(self._response_window_bits))
+ if self._response_no_context_takeover:
+ response.add_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: window_bits=%s; no_context_takeover=%r, '
+ 'response: window_wbits=%s; no_context_takeover=%r)' %
+ (self._request.name(),
+ window_bits,
+ no_context_takeover,
+ self._response_window_bits,
+ self._response_no_context_takeover))
+
+ return response
+
+ def setup_stream_options(self, stream_options):
+
+ class _OutgoingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._outgoing_filter(frame)
+
+ class _IncomingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._incoming_filter(frame)
+
+ stream_options.outgoing_frame_filters.append(
+ _OutgoingFilter(self))
+ stream_options.incoming_frame_filters.insert(
+ 0, _IncomingFilter(self))
+
+ def set_response_window_bits(self, value):
+ self._response_window_bits = value
+
+ def set_response_no_context_takeover(self, value):
+ self._response_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def enable_outgoing_compression(self):
+ self._compress_outgoing = True
+
+ def disable_outgoing_compression(self):
+ self._compress_outgoing = False
+
+ def _outgoing_filter(self, frame):
+ """Transform outgoing frames. This method is called only by
+ an _OutgoingFilter instance.
+ """
+
+ original_payload_size = len(frame.payload)
+ self._total_outgoing_payload_bytes += original_payload_size
+
+ if (not self._compress_outgoing or
+ common.is_control_opcode(frame.opcode)):
+ self._total_filtered_outgoing_payload_bytes += (
+ original_payload_size)
+ return
+
+ frame.payload = self._deflater.filter(
+ frame.payload, bfinal=self._bfinal)
+ frame.rsv1 = 1
+
+ filtered_payload_size = len(frame.payload)
+ self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+
+ _log_compression_ratio(self._logger, original_payload_size,
+ self._total_outgoing_payload_bytes,
+ filtered_payload_size,
+ self._total_filtered_outgoing_payload_bytes)
+
+ def _incoming_filter(self, frame):
+ """Transform incoming frames. This method is called only by
+ an _IncomingFilter instance.
+ """
+
+ received_payload_size = len(frame.payload)
+ self._total_incoming_payload_bytes += received_payload_size
+
+ if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
+ self._total_filtered_incoming_payload_bytes += (
+ received_payload_size)
+ return
+
+ frame.payload = self._inflater.filter(frame.payload)
+ frame.rsv1 = 0
+
+ filtered_payload_size = len(frame.payload)
+ self._total_filtered_incoming_payload_bytes += filtered_payload_size
+
+ _log_decompression_ratio(self._logger, received_payload_size,
+ self._total_incoming_payload_bytes,
+ filtered_payload_size,
+ self._total_filtered_incoming_payload_bytes)
+
+
+_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+
+
+# Adding vendor-prefixed deflate-frame extension.
+# TODO(bashi): Remove this after WebKit stops using vendor prefix.
+_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+
+
+def _parse_compression_method(data):
+ """Parses the value of "method" extension parameter."""
+
+ return common.parse_extensions(data, allow_quoted_string=True)
+
+
+def _create_accepted_method_desc(method_name, method_params):
+ """Creates accepted-method-desc from given method name and parameters"""
+
+ extension = common.ExtensionParameter(method_name)
+ for name, value in method_params:
+ extension.add_parameter(name, value)
+ return common.format_extension(extension)
+
+
+class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
+ """Base class for Per-frame and Per-message compression extension."""
+
+ _METHOD_PARAM = 'method'
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+ self._request = request
+ self._compression_method_name = None
+ self._compression_processor = None
+ self._compression_processor_hook = None
+
+ def name(self):
+ return ''
+
+ def _lookup_compression_processor(self, method_desc):
+ return None
+
+ def _get_compression_processor_response(self):
+ """Looks up the compression processor based on the self._request and
+ returns the compression processor's response.
+ """
+
+ method_list = self._request.get_parameter_value(self._METHOD_PARAM)
+ if method_list is None:
+ return None
+ methods = _parse_compression_method(method_list)
+ if methods is None:
+ return None
+ comression_processor = None
+ # The current implementation tries only the first method that matches
+ # supported algorithm. Following methods aren't tried even if the
+ # first one is rejected.
+ # TODO(bashi): Need to clarify this behavior.
+ for method_desc in methods:
+ compression_processor = self._lookup_compression_processor(
+ method_desc)
+ if compression_processor is not None:
+ self._compression_method_name = method_desc.name()
+ break
+ if compression_processor is None:
+ return None
+
+ if self._compression_processor_hook:
+ self._compression_processor_hook(compression_processor)
+
+ processor_response = compression_processor.get_extension_response()
+ if processor_response is None:
+ return None
+ self._compression_processor = compression_processor
+ return processor_response
+
+ def get_extension_response(self):
+ processor_response = self._get_compression_processor_response()
+ if processor_response is None:
+ return None
+
+ response = common.ExtensionParameter(self._request.name())
+ accepted_method_desc = _create_accepted_method_desc(
+ self._compression_method_name,
+ processor_response.get_parameters())
+ response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
+ self._logger.debug(
+ 'Enable %s extension (method: %s)' %
+ (self._request.name(), self._compression_method_name))
+ return response
+
+ def setup_stream_options(self, stream_options):
+ if self._compression_processor is None:
+ return
+ self._compression_processor.setup_stream_options(stream_options)
+
+ def set_compression_processor_hook(self, hook):
+ self._compression_processor_hook = hook
+
+ def get_compression_processor(self):
+ return self._compression_processor
+
+
+class PerFrameCompressionExtensionProcessor(CompressionExtensionProcessorBase):
+ """WebSocket Per-frame compression extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-websocket-perframe-compression
+ """
+
+ _DEFLATE_METHOD = 'deflate'
+
+ def __init__(self, request):
+ CompressionExtensionProcessorBase.__init__(self, request)
+
+ def name(self):
+ return common.PERFRAME_COMPRESSION_EXTENSION
+
+ def _lookup_compression_processor(self, method_desc):
+ if method_desc.name() == self._DEFLATE_METHOD:
+ return DeflateFrameExtensionProcessor(method_desc)
+ return None
+
+
+_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = (
+ PerFrameCompressionExtensionProcessor)
+
+
+class DeflateMessageProcessor(ExtensionProcessorInterface):
+ """Per-message deflate processor."""
+
+ _S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits'
+ _S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover'
+ _C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits'
+ _C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover'
+
+ def __init__(self, request):
+ self._request = request
+ self._logger = util.get_class_logger(self)
+
+ self._c2s_max_window_bits = None
+ self._c2s_no_context_takeover = False
+ self._bfinal = False
+
+ self._compress_outgoing_enabled = False
+
+ # True if a message is fragmented and compression is ongoing.
+ self._compress_ongoing = False
+
+ # Counters for statistics.
+
+ # Total number of outgoing bytes supplied to this filter.
+ self._total_outgoing_payload_bytes = 0
+ # Total number of bytes sent to the network after applying this filter.
+ self._total_filtered_outgoing_payload_bytes = 0
+
+ # Total number of bytes received from the network.
+ self._total_incoming_payload_bytes = 0
+ # Total number of incoming bytes obtained after applying this filter.
+ self._total_filtered_incoming_payload_bytes = 0
+
+ def name(self):
+ return 'deflate'
+
+ def get_extension_response(self):
+ # Any unknown parameter will be just ignored.
+
+ s2c_max_window_bits = self._request.get_parameter_value(
+ self._S2C_MAX_WINDOW_BITS_PARAM)
+ if s2c_max_window_bits is not None:
+ try:
+ s2c_max_window_bits = int(s2c_max_window_bits)
+ except ValueError, e:
+ return None
+ if s2c_max_window_bits < 8 or s2c_max_window_bits > 15:
+ return None
+
+ s2c_no_context_takeover = self._request.has_parameter(
+ self._S2C_NO_CONTEXT_TAKEOVER_PARAM)
+ if (s2c_no_context_takeover and
+ self._request.get_parameter_value(
+ self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ return None
+
+ self._deflater = util._RFC1979Deflater(
+ s2c_max_window_bits, s2c_no_context_takeover)
+
+ self._inflater = util._RFC1979Inflater()
+
+ self._compress_outgoing_enabled = True
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if s2c_max_window_bits is not None:
+ response.add_parameter(
+ self._S2C_MAX_WINDOW_BITS_PARAM, str(s2c_max_window_bits))
+
+ if s2c_no_context_takeover:
+ response.add_parameter(
+ self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ if self._c2s_max_window_bits is not None:
+ response.add_parameter(
+ self._C2S_MAX_WINDOW_BITS_PARAM,
+ str(self._c2s_max_window_bits))
+ if self._c2s_no_context_takeover:
+ response.add_parameter(
+ self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: s2c_max_window_bits=%s; s2c_no_context_takeover=%r, '
+ 'response: c2s_max_window_bits=%s; c2s_no_context_takeover=%r)' %
+ (self._request.name(),
+ s2c_max_window_bits,
+ s2c_no_context_takeover,
+ self._c2s_max_window_bits,
+ self._c2s_no_context_takeover))
+
+ return response
+
+ def setup_stream_options(self, stream_options):
+ class _OutgoingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, message, end=True, binary=False):
+ return self._parent._process_outgoing_message(
+ message, end, binary)
+
+ class _IncomingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._decompress_next_message = False
+
+ def decompress_next_message(self):
+ self._decompress_next_message = True
+
+ def filter(self, message):
+ message = self._parent._process_incoming_message(
+ message, self._decompress_next_message)
+ self._decompress_next_message = False
+ return message
+
+ self._outgoing_message_filter = _OutgoingMessageFilter(self)
+ self._incoming_message_filter = _IncomingMessageFilter(self)
+ stream_options.outgoing_message_filters.append(
+ self._outgoing_message_filter)
+ stream_options.incoming_message_filters.append(
+ self._incoming_message_filter)
+
+ class _OutgoingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._set_compression_bit = False
+
+ def set_compression_bit(self):
+ self._set_compression_bit = True
+
+ def filter(self, frame):
+ self._parent._process_outgoing_frame(
+ frame, self._set_compression_bit)
+ self._set_compression_bit = False
+
+ class _IncomingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._process_incoming_frame(frame)
+
+ self._outgoing_frame_filter = _OutgoingFrameFilter(self)
+ self._incoming_frame_filter = _IncomingFrameFilter(self)
+ stream_options.outgoing_frame_filters.append(
+ self._outgoing_frame_filter)
+ stream_options.incoming_frame_filters.append(
+ self._incoming_frame_filter)
+
+ stream_options.encode_text_message_to_utf8 = False
+
+ def set_c2s_max_window_bits(self, value):
+ self._c2s_max_window_bits = value
+
+ def set_c2s_no_context_takeover(self, value):
+ self._c2s_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def enable_outgoing_compression(self):
+ self._compress_outgoing_enabled = True
+
+ def disable_outgoing_compression(self):
+ self._compress_outgoing_enabled = False
+
+ def _process_incoming_message(self, message, decompress):
+ if not decompress:
+ return message
+
+ received_payload_size = len(message)
+ self._total_incoming_payload_bytes += received_payload_size
+
+ message = self._inflater.filter(message)
+
+ filtered_payload_size = len(message)
+ self._total_filtered_incoming_payload_bytes += filtered_payload_size
+
+ _log_decompression_ratio(self._logger, received_payload_size,
+ self._total_incoming_payload_bytes,
+ filtered_payload_size,
+ self._total_filtered_incoming_payload_bytes)
+
+ return message
+
+ def _process_outgoing_message(self, message, end, binary):
+ if not binary:
+ message = message.encode('utf-8')
+
+ if not self._compress_outgoing_enabled:
+ return message
+
+ original_payload_size = len(message)
+ self._total_outgoing_payload_bytes += original_payload_size
+
+ message = self._deflater.filter(
+ message, flush=end, bfinal=self._bfinal)
+
+ filtered_payload_size = len(message)
+ self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+
+ _log_compression_ratio(self._logger, original_payload_size,
+ self._total_outgoing_payload_bytes,
+ filtered_payload_size,
+ self._total_filtered_outgoing_payload_bytes)
+
+ if not self._compress_ongoing:
+ self._outgoing_frame_filter.set_compression_bit()
+ self._compress_ongoing = not end
+ return message
+
+ def _process_incoming_frame(self, frame):
+ if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
+ self._incoming_message_filter.decompress_next_message()
+ frame.rsv1 = 0
+
+ def _process_outgoing_frame(self, frame, compression_bit):
+ if (not compression_bit or
+ common.is_control_opcode(frame.opcode)):
+ return
+
+ frame.rsv1 = 1
+
+
+class PerMessageCompressionExtensionProcessor(
+ CompressionExtensionProcessorBase):
+ """WebSocket Per-message compression extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
+ """
+
+ _DEFLATE_METHOD = 'deflate'
+
+ def __init__(self, request):
+ CompressionExtensionProcessorBase.__init__(self, request)
+
+ def name(self):
+ return common.PERMESSAGE_COMPRESSION_EXTENSION
+
+ def _lookup_compression_processor(self, method_desc):
+ if method_desc.name() == self._DEFLATE_METHOD:
+ return DeflateMessageProcessor(method_desc)
+ return None
+
+
+_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
+ PerMessageCompressionExtensionProcessor)
+
+
+# Adding vendor-prefixed permessage-compress extension.
+# TODO(bashi): Remove this after WebKit stops using vendor prefix.
+_available_processors[common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION] = (
+ PerMessageCompressionExtensionProcessor)
+
+
+class MuxExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket multiplexing extension processor."""
+
+ _QUOTA_PARAM = 'quota'
+
+ def __init__(self, request):
+ self._request = request
+
+ def name(self):
+ return common.MUX_EXTENSION
+
+ def get_extension_response(self, ws_request,
+ logical_channel_extensions):
+ # Mux extension cannot be used after extensions that depend on
+ # frame boundary, extension data field, or any reserved bits
+ # which are attributed to each frame.
+ for extension in logical_channel_extensions:
+ name = extension.name()
+ if (name == common.PERFRAME_COMPRESSION_EXTENSION or
+ name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+ return None
+
+ quota = self._request.get_parameter_value(self._QUOTA_PARAM)
+ if quota is None:
+ ws_request.mux_quota = 0
+ else:
+ try:
+ quota = int(quota)
+ except ValueError, e:
+ return None
+ if quota < 0 or quota >= 2 ** 32:
+ return None
+ ws_request.mux_quota = quota
+
+ ws_request.mux = True
+ ws_request.mux_extensions = logical_channel_extensions
+ return common.ExtensionParameter(common.MUX_EXTENSION)
+
+ def setup_stream_options(self, stream_options):
+ pass
+
+
+_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
+
+
+def get_extension_processor(extension_request):
+ global _available_processors
+ processor_class = _available_processors.get(extension_request.name())
+ if processor_class is None:
+ return None
+ return processor_class(extension_request)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/handshake/__init__.py b/pyload/lib/mod_pywebsocket/handshake/__init__.py
new file mode 100644
index 000000000..194f6b395
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/handshake/__init__.py
@@ -0,0 +1,110 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket opening handshake processor. This class try to apply available
+opening handshake processors for each protocol version until a connection is
+successfully established.
+"""
+
+
+import logging
+
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake import hybi00
+from mod_pywebsocket.handshake import hybi
+# Export AbortedByUserException, HandshakeException, and VersionException
+# symbol from this module.
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
+ """Performs WebSocket handshake.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+ allowDraft75: obsolete argument. ignored.
+ strict: obsolete argument. ignored.
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
+ # To print mimetools.Message as escaped one-line string, we converts
+ # headers_in to dict object. Without conversion, if we use %r, it just
+ # prints the type and address, and if we use %s, it prints the original
+ # header string as multiple lines.
+ #
+ # Both mimetools.Message and MpTable_Type of mod_python can be
+ # converted to dict.
+ #
+ # mimetools.Message.__str__ returns the original header string.
+ # dict(mimetools.Message object) returns the map from header names to
+ # header values. While MpTable_Type doesn't have such __str__ but just
+ # __repr__ which formats itself as well as dictionary object.
+ _LOGGER.debug(
+ 'Client\'s opening handshake headers: %r', dict(request.headers_in))
+
+ handshakers = []
+ handshakers.append(
+ ('RFC 6455', hybi.Handshaker(request, dispatcher)))
+ handshakers.append(
+ ('HyBi 00', hybi00.Handshaker(request, dispatcher)))
+
+ for name, handshaker in handshakers:
+ _LOGGER.debug('Trying protocol version %s', name)
+ try:
+ handshaker.do_handshake()
+ _LOGGER.info('Established (%s protocol)', name)
+ return
+ except HandshakeException, e:
+ _LOGGER.debug(
+ 'Failed to complete opening handshake as %s protocol: %r',
+ name, e)
+ if e.status:
+ raise e
+ except AbortedByUserException, e:
+ raise
+ except VersionException, e:
+ raise
+
+ # TODO(toyoshim): Add a test to cover the case all handshakers fail.
+ raise HandshakeException(
+ 'Failed to complete opening handshake for all available protocols',
+ status=common.HTTP_STATUS_BAD_REQUEST)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/handshake/_base.py b/pyload/lib/mod_pywebsocket/handshake/_base.py
new file mode 100644
index 000000000..e5c94ca90
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/handshake/_base.py
@@ -0,0 +1,226 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Common functions and exceptions used by WebSocket opening handshake
+processors.
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import http_header_util
+
+
+class AbortedByUserException(Exception):
+ """Exception for aborting a connection intentionally.
+
+ If this exception is raised in do_extra_handshake handler, the connection
+ will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
+
+ If this exception is raised in transfer_data_handler, the connection will
+ be closed without closing handshake. No other WebSocket or HTTP(S) handler
+ will be invoked.
+ """
+
+ pass
+
+
+class HandshakeException(Exception):
+ """This exception will be raised when an error occurred while processing
+ WebSocket initial handshake.
+ """
+
+ def __init__(self, name, status=None):
+ super(HandshakeException, self).__init__(name)
+ self.status = status
+
+
+class VersionException(Exception):
+ """This exception will be raised when a version of client request does not
+ match with version the server supports.
+ """
+
+ def __init__(self, name, supported_versions=''):
+ """Construct an instance.
+
+ Args:
+ supported_version: a str object to show supported hybi versions.
+ (e.g. '8, 13')
+ """
+ super(VersionException, self).__init__(name)
+ self.supported_versions = supported_versions
+
+
+def get_default_port(is_secure):
+ if is_secure:
+ return common.DEFAULT_WEB_SOCKET_SECURE_PORT
+ else:
+ return common.DEFAULT_WEB_SOCKET_PORT
+
+
+def validate_subprotocol(subprotocol, hixie):
+ """Validate a value in the Sec-WebSocket-Protocol field.
+
+ See
+ - RFC 6455: Section 4.1., 4.2.2., and 4.3.
+ - HyBi 00: Section 4.1. Opening handshake
+
+ Args:
+ hixie: if True, checks if characters in subprotocol are in range
+ between U+0020 and U+007E. It's required by HyBi 00 but not by
+ RFC 6455.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+ if hixie:
+ # Parameter should be in the range U+0020 to U+007E.
+ for c in subprotocol:
+ if not 0x20 <= ord(c) <= 0x7e:
+ raise HandshakeException(
+ 'Illegal character in subprotocol name: %r' % c)
+ else:
+ # Parameter should be encoded HTTP token.
+ state = http_header_util.ParsingState(subprotocol)
+ token = http_header_util.consume_token(state)
+ rest = http_header_util.peek(state)
+ # If |rest| is not None, |subprotocol| is not one token or invalid. If
+ # |rest| is None, |token| must not be None because |subprotocol| is
+ # concatenation of |token| and |rest| and is not None.
+ if rest is not None:
+ raise HandshakeException('Invalid non-token string in subprotocol '
+ 'name: %r' % rest)
+
+
+def parse_host_header(request):
+ fields = request.headers_in['Host'].split(':', 1)
+ if len(fields) == 1:
+ return fields[0], get_default_port(request.is_https())
+ try:
+ return fields[0], int(fields[1])
+ except ValueError, e:
+ raise HandshakeException('Invalid port number format: %r' % e)
+
+
+def format_header(name, value):
+ return '%s: %s\r\n' % (name, value)
+
+
+def build_location(request):
+ """Build WebSocket location for request."""
+ location_parts = []
+ if request.is_https():
+ location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+ else:
+ location_parts.append(common.WEB_SOCKET_SCHEME)
+ location_parts.append('://')
+ host, port = parse_host_header(request)
+ connection_port = request.connection.local_addr[1]
+ if port != connection_port:
+ raise HandshakeException('Header/connection port mismatch: %d/%d' %
+ (port, connection_port))
+ location_parts.append(host)
+ if (port != get_default_port(request.is_https())):
+ location_parts.append(':')
+ location_parts.append(str(port))
+ location_parts.append(request.uri)
+ return ''.join(location_parts)
+
+
+def get_mandatory_header(request, key):
+ value = request.headers_in.get(key)
+ if value is None:
+ raise HandshakeException('Header %s is not defined' % key)
+ return value
+
+
+def validate_mandatory_header(request, key, expected_value, fail_status=None):
+ value = get_mandatory_header(request, key)
+
+ if value.lower() != expected_value.lower():
+ raise HandshakeException(
+ 'Expected %r for header %s but found %r (case-insensitive)' %
+ (expected_value, key, value), status=fail_status)
+
+
+def check_request_line(request):
+ # 5.1 1. The three character UTF-8 string "GET".
+ # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
+ if request.method != 'GET':
+ raise HandshakeException('Method is not GET: %r' % request.method)
+
+ if request.protocol != 'HTTP/1.1':
+ raise HandshakeException('Version is not HTTP/1.1: %r' %
+ request.protocol)
+
+
+def check_header_lines(request, mandatory_headers):
+ check_request_line(request)
+
+ # The expected field names, and the meaning of their corresponding
+ # values, are as follows.
+ # |Upgrade| and |Connection|
+ for key, expected_value in mandatory_headers:
+ validate_mandatory_header(request, key, expected_value)
+
+
+def parse_token_list(data):
+ """Parses a header value which follows 1#token and returns parsed elements
+ as a list of strings.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ token_list = []
+
+ while True:
+ token = http_header_util.consume_token(state)
+ if token is not None:
+ token_list.append(token)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise HandshakeException(
+ 'Expected a comma but found %r' % http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(token_list) == 0:
+ raise HandshakeException('No valid token found')
+
+ return token_list
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/handshake/hybi.py b/pyload/lib/mod_pywebsocket/handshake/hybi.py
new file mode 100644
index 000000000..fc0e2a096
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/handshake/hybi.py
@@ -0,0 +1,404 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import base64
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import parse_token_list
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
+# disallows non-zero padding, so the character right before == must be any of
+# A, Q, g and w.
+_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
+
+# Defining aliases for values used frequently.
+_VERSION_HYBI08 = common.VERSION_HYBI08
+_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
+_VERSION_LATEST = common.VERSION_HYBI_LATEST
+_VERSION_LATEST_STRING = str(_VERSION_LATEST)
+_SUPPORTED_VERSIONS = [
+ _VERSION_LATEST,
+ _VERSION_HYBI08,
+]
+
+
+def compute_accept(key):
+ """Computes value for the Sec-WebSocket-Accept header from value of the
+ Sec-WebSocket-Key header.
+ """
+
+ accept_binary = util.sha1_hash(
+ key + common.WEBSOCKET_ACCEPT_UUID).digest()
+ accept = base64.b64encode(accept_binary)
+
+ return (accept, accept_binary)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource during handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def _validate_connection_header(self):
+ connection = get_mandatory_header(
+ self._request, common.CONNECTION_HEADER)
+
+ try:
+ connection_tokens = parse_token_list(connection)
+ except HandshakeException, e:
+ raise HandshakeException(
+ 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
+
+ connection_is_valid = False
+ for token in connection_tokens:
+ if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
+ connection_is_valid = True
+ break
+ if not connection_is_valid:
+ raise HandshakeException(
+ '%s header doesn\'t contain "%s"' %
+ (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+
+ def do_handshake(self):
+ self._request.ws_close_code = None
+ self._request.ws_close_reason = None
+
+ # Parsing.
+
+ check_request_line(self._request)
+
+ validate_mandatory_header(
+ self._request,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE)
+
+ self._validate_connection_header()
+
+ self._request.ws_resource = self._request.uri
+
+ unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
+
+ self._request.ws_version = self._check_version()
+
+ # This handshake must be based on latest hybi. We are responsible to
+ # fallback to HTTP on handshake failure as latest hybi handshake
+ # specifies.
+ try:
+ self._get_origin()
+ self._set_protocol()
+ self._parse_extensions()
+
+ # Key validation, response generation.
+
+ key = self._get_key()
+ (accept, accept_binary) = compute_accept(key)
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_ACCEPT_HEADER,
+ accept,
+ util.hexify(accept_binary))
+
+ self._logger.debug('Protocol version is RFC 6455')
+
+ # Setup extension processors.
+
+ processors = []
+ if self._request.ws_requested_extensions is not None:
+ for extension_request in self._request.ws_requested_extensions:
+ processor = get_extension_processor(extension_request)
+ # Unknown extension requests are just ignored.
+ if processor is not None:
+ processors.append(processor)
+ self._request.ws_extension_processors = processors
+
+ # Extra handshake handler may modify/remove processors.
+ self._dispatcher.do_extra_handshake(self._request)
+ processors = filter(lambda processor: processor is not None,
+ self._request.ws_extension_processors)
+
+ accepted_extensions = []
+
+ # We need to take care of mux extension here. Extensions that
+ # are placed before mux should be applied to logical channels.
+ mux_index = -1
+ for i, processor in enumerate(processors):
+ if processor.name() == common.MUX_EXTENSION:
+ mux_index = i
+ break
+ if mux_index >= 0:
+ mux_processor = processors[mux_index]
+ logical_channel_processors = processors[:mux_index]
+ processors = processors[mux_index+1:]
+
+ for processor in logical_channel_processors:
+ extension_response = processor.get_extension_response()
+ if extension_response is None:
+ # Rejected.
+ continue
+ accepted_extensions.append(extension_response)
+ # Pass a shallow copy of accepted_extensions as extensions for
+ # logical channels.
+ mux_response = mux_processor.get_extension_response(
+ self._request, accepted_extensions[:])
+ if mux_response is not None:
+ accepted_extensions.append(mux_response)
+
+ stream_options = StreamOptions()
+
+ # When there is mux extension, here, |processors| contain only
+ # prosessors for extensions placed after mux.
+ for processor in processors:
+
+ extension_response = processor.get_extension_response()
+ if extension_response is None:
+ # Rejected.
+ continue
+
+ accepted_extensions.append(extension_response)
+
+ processor.setup_stream_options(stream_options)
+
+ if len(accepted_extensions) > 0:
+ self._request.ws_extensions = accepted_extensions
+ self._logger.debug(
+ 'Extensions accepted: %r',
+ map(common.ExtensionParameter.name, accepted_extensions))
+ else:
+ self._request.ws_extensions = None
+
+ self._request.ws_stream = self._create_stream(stream_options)
+
+ if self._request.ws_requested_protocols is not None:
+ if self._request.ws_protocol is None:
+ raise HandshakeException(
+ 'do_extra_handshake must choose one subprotocol from '
+ 'ws_requested_protocols and set it to ws_protocol')
+ validate_subprotocol(self._request.ws_protocol, hixie=False)
+
+ self._logger.debug(
+ 'Subprotocol accepted: %r',
+ self._request.ws_protocol)
+ else:
+ if self._request.ws_protocol is not None:
+ raise HandshakeException(
+ 'ws_protocol must be None when the client didn\'t '
+ 'request any subprotocol')
+
+ self._send_handshake(accept)
+ except HandshakeException, e:
+ if not e.status:
+ # Fallback to 400 bad request by default.
+ e.status = common.HTTP_STATUS_BAD_REQUEST
+ raise e
+
+ def _get_origin(self):
+ if self._request.ws_version is _VERSION_HYBI08:
+ origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
+ else:
+ origin_header = common.ORIGIN_HEADER
+ origin = self._request.headers_in.get(origin_header)
+ if origin is None:
+ self._logger.debug('Client request does not have origin header')
+ self._request.ws_origin = origin
+
+ def _check_version(self):
+ version = get_mandatory_header(self._request,
+ common.SEC_WEBSOCKET_VERSION_HEADER)
+ if version == _VERSION_HYBI08_STRING:
+ return _VERSION_HYBI08
+ if version == _VERSION_LATEST_STRING:
+ return _VERSION_LATEST
+
+ if version.find(',') >= 0:
+ raise HandshakeException(
+ 'Multiple versions (%r) are not allowed for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ status=common.HTTP_STATUS_BAD_REQUEST)
+ raise VersionException(
+ 'Unsupported version %r for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
+
+ def _set_protocol(self):
+ self._request.ws_protocol = None
+
+ protocol_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+
+ if protocol_header is None:
+ self._request.ws_requested_protocols = None
+ return
+
+ self._request.ws_requested_protocols = parse_token_list(
+ protocol_header)
+ self._logger.debug('Subprotocols requested: %r',
+ self._request.ws_requested_protocols)
+
+ def _parse_extensions(self):
+ extensions_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
+ if not extensions_header:
+ self._request.ws_requested_extensions = None
+ return
+
+ if self._request.ws_version is common.VERSION_HYBI08:
+ allow_quoted_string=False
+ else:
+ allow_quoted_string=True
+ try:
+ self._request.ws_requested_extensions = common.parse_extensions(
+ extensions_header, allow_quoted_string=allow_quoted_string)
+ except common.ExtensionParsingException, e:
+ raise HandshakeException(
+ 'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
+
+ self._logger.debug(
+ 'Extensions requested: %r',
+ map(common.ExtensionParameter.name,
+ self._request.ws_requested_extensions))
+
+ def _validate_key(self, key):
+ if key.find(',') >= 0:
+ raise HandshakeException('Request has multiple %s header lines or '
+ 'contains illegal character \',\': %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ # Validate
+ key_is_valid = False
+ try:
+ # Validate key by quick regex match before parsing by base64
+ # module. Because base64 module skips invalid characters, we have
+ # to do this in advance to make this server strictly reject illegal
+ # keys.
+ if _SEC_WEBSOCKET_KEY_REGEX.match(key):
+ decoded_key = base64.b64decode(key)
+ if len(decoded_key) == 16:
+ key_is_valid = True
+ except TypeError, e:
+ pass
+
+ if not key_is_valid:
+ raise HandshakeException(
+ 'Illegal value for header %s: %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ return decoded_key
+
+ def _get_key(self):
+ key = get_mandatory_header(
+ self._request, common.SEC_WEBSOCKET_KEY_HEADER)
+
+ decoded_key = self._validate_key(key)
+
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_KEY_HEADER,
+ key,
+ util.hexify(decoded_key))
+
+ return key
+
+ def _create_stream(self, stream_options):
+ return Stream(self._request, stream_options)
+
+ def _create_handshake_response(self, accept):
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+ if self._request.ws_protocol is not None:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append(format_header(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ raw_response = self._create_handshake_response(accept)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/handshake/hybi00.py b/pyload/lib/mod_pywebsocket/handshake/hybi00.py
new file mode 100644
index 000000000..cc6f8dc43
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/handshake/hybi00.py
@@ -0,0 +1,242 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol version HyBi 00.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import logging
+import re
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import build_location
+from mod_pywebsocket.handshake._base import check_header_lines
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+
+
+_MANDATORY_HEADERS = [
+ # key, expected value or None
+ [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
+ [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
+]
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol version HyBi 00.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def do_handshake(self):
+ """Perform WebSocket Handshake.
+
+ On _request, we set
+ ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
+ ws_challenge_md5: WebSocket handshake information.
+ ws_stream: Frame generation/parsing class.
+ ws_version: Protocol version.
+
+ Raises:
+ HandshakeException: when any error happened in parsing the opening
+ handshake request.
+ """
+
+ # 5.1 Reading the client's opening handshake.
+ # dispatcher sets it in self._request.
+ check_header_lines(self._request, _MANDATORY_HEADERS)
+ self._set_resource()
+ self._set_subprotocol()
+ self._set_location()
+ self._set_origin()
+ self._set_challenge_response()
+ self._set_protocol_version()
+
+ self._dispatcher.do_extra_handshake(self._request)
+
+ self._send_handshake()
+
+ def _set_resource(self):
+ self._request.ws_resource = self._request.uri
+
+ def _set_subprotocol(self):
+ # |Sec-WebSocket-Protocol|
+ subprotocol = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+ if subprotocol is not None:
+ validate_subprotocol(subprotocol, hixie=True)
+ self._request.ws_protocol = subprotocol
+
+ def _set_location(self):
+ # |Host|
+ host = self._request.headers_in.get(common.HOST_HEADER)
+ if host is not None:
+ self._request.ws_location = build_location(self._request)
+ # TODO(ukai): check host is this host.
+
+ def _set_origin(self):
+ # |Origin|
+ origin = self._request.headers_in.get(common.ORIGIN_HEADER)
+ if origin is not None:
+ self._request.ws_origin = origin
+
+ def _set_protocol_version(self):
+ # |Sec-WebSocket-Draft|
+ draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
+ if draft is not None and draft != '0':
+ raise HandshakeException('Illegal value for %s: %s' %
+ (common.SEC_WEBSOCKET_DRAFT_HEADER,
+ draft))
+
+ self._logger.debug('Protocol version is HyBi 00')
+ self._request.ws_version = common.VERSION_HYBI00
+ self._request.ws_stream = StreamHixie75(self._request, True)
+
+ def _set_challenge_response(self):
+ # 5.2 4-8.
+ self._request.ws_challenge = self._get_challenge()
+ # 5.2 9. let /response/ be the MD5 finterprint of /challenge/
+ self._request.ws_challenge_md5 = util.md5_hash(
+ self._request.ws_challenge).digest()
+ self._logger.debug(
+ 'Challenge: %r (%s)',
+ self._request.ws_challenge,
+ util.hexify(self._request.ws_challenge))
+ self._logger.debug(
+ 'Challenge response: %r (%s)',
+ self._request.ws_challenge_md5,
+ util.hexify(self._request.ws_challenge_md5))
+
+ def _get_key_value(self, key_field):
+ key_value = get_mandatory_header(self._request, key_field)
+
+ self._logger.debug('%s: %r', key_field, key_value)
+
+ # 5.2 4. let /key-number_n/ be the digits (characters in the range
+ # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
+ # interpreted as a base ten integer, ignoring all other characters
+ # in /key_n/.
+ try:
+ key_number = int(re.sub("\\D", "", key_value))
+ except:
+ raise HandshakeException('%s field contains no digit' % key_field)
+ # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
+ # in /key_n/.
+ spaces = re.subn(" ", "", key_value)[1]
+ if spaces == 0:
+ raise HandshakeException('%s field contains no space' % key_field)
+
+ self._logger.debug(
+ '%s: Key-number is %d and number of spaces is %d',
+ key_field, key_number, spaces)
+
+ # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
+ # then abort the WebSocket connection.
+ if key_number % spaces != 0:
+ raise HandshakeException(
+ '%s: Key-number (%d) is not an integral multiple of spaces '
+ '(%d)' % (key_field, key_number, spaces))
+ # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
+ part = key_number / spaces
+ self._logger.debug('%s: Part is %d', key_field, part)
+ return part
+
+ def _get_challenge(self):
+ # 5.2 4-7.
+ key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
+ key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
+ # 5.2 8. let /challenge/ be the concatenation of /part_1/,
+ challenge = ''
+ challenge += struct.pack('!I', key1) # network byteorder int
+ challenge += struct.pack('!I', key2) # network byteorder int
+ challenge += self._request.connection.read(8)
+ return challenge
+
+ def _send_handshake(self):
+ response = []
+
+ # 5.2 10. send the following line.
+ response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
+
+ # 5.2 11. send the following fields to the client.
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
+ if self._request.ws_protocol:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ # 5.2 12. send two bytes 0x0D 0x0A.
+ response.append('\r\n')
+ # 5.2 13. send /response/
+ response.append(self._request.ws_challenge_md5)
+
+ raw_response = ''.join(response)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/headerparserhandler.py b/pyload/lib/mod_pywebsocket/headerparserhandler.py
new file mode 100644
index 000000000..2cc62de04
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/headerparserhandler.py
@@ -0,0 +1,244 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""PythonHeaderParserHandler for mod_pywebsocket.
+
+Apache HTTP Server and mod_python must be configured such that this
+function is called to handle WebSocket request.
+"""
+
+
+import logging
+
+from mod_python import apache
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+
+
+# PythonOption to specify the handler root directory.
+_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
+
+# PythonOption to specify the handler scan directory.
+# This must be a directory under the root directory.
+# The default is the root directory.
+_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
+
+# PythonOption to allow handlers whose canonical path is
+# not under the root directory. It's disallowed by default.
+# Set this option with value of 'yes' to allow.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
+ 'mod_pywebsocket.allow_handlers_outside_root_dir')
+# Map from values to their meanings. 'Yes' and 'No' are allowed just for
+# compatibility.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
+ 'off': False, 'no': False, 'on': True, 'yes': True}
+
+# (Obsolete option. Ignored.)
+# PythonOption to specify to allow handshake defined in Hixie 75 version
+# protocol. The default is None (Off)
+_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
+# Map from values to their meanings.
+_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
+
+
+class ApacheLogHandler(logging.Handler):
+ """Wrapper logging.Handler to emit log message to apache's error.log."""
+
+ _LEVELS = {
+ logging.DEBUG: apache.APLOG_DEBUG,
+ logging.INFO: apache.APLOG_INFO,
+ logging.WARNING: apache.APLOG_WARNING,
+ logging.ERROR: apache.APLOG_ERR,
+ logging.CRITICAL: apache.APLOG_CRIT,
+ }
+
+ def __init__(self, request=None):
+ logging.Handler.__init__(self)
+ self._log_error = apache.log_error
+ if request is not None:
+ self._log_error = request.log_error
+
+ # Time and level will be printed by Apache.
+ self._formatter = logging.Formatter('%(name)s: %(message)s')
+
+ def emit(self, record):
+ apache_level = apache.APLOG_DEBUG
+ if record.levelno in ApacheLogHandler._LEVELS:
+ apache_level = ApacheLogHandler._LEVELS[record.levelno]
+
+ msg = self._formatter.format(record)
+
+ # "server" parameter must be passed to have "level" parameter work.
+ # If only "level" parameter is passed, nothing shows up on Apache's
+ # log. However, at this point, we cannot get the server object of the
+ # virtual host which will process WebSocket requests. The only server
+ # object we can get here is apache.main_server. But Wherever (server
+ # configuration context or virtual host context) we put
+ # PythonHeaderParserHandler directive, apache.main_server just points
+ # the main server instance (not any of virtual server instance). Then,
+ # Apache follows LogLevel directive in the server configuration context
+ # to filter logs. So, we need to specify LogLevel in the server
+ # configuration context. Even if we specify "LogLevel debug" in the
+ # virtual host context which actually handles WebSocket connections,
+ # DEBUG level logs never show up unless "LogLevel debug" is specified
+ # in the server configuration context.
+ #
+ # TODO(tyoshino): Provide logging methods on request object. When
+ # request is mp_request object (when used together with Apache), the
+ # methods call request.log_error indirectly. When request is
+ # _StandaloneRequest, the methods call Python's logging facility which
+ # we create in standalone.py.
+ self._log_error(msg, apache_level, apache.main_server)
+
+
+def _configure_logging():
+ logger = logging.getLogger()
+ # Logs are filtered by Apache based on LogLevel directive in Apache
+ # configuration file. We must just pass logs for all levels to
+ # ApacheLogHandler.
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(ApacheLogHandler())
+
+
+_configure_logging()
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _parse_option(name, value, definition):
+ if value is None:
+ return False
+
+ meaning = definition.get(value.lower())
+ if meaning is None:
+ raise Exception('Invalid value for PythonOption %s: %r' %
+ (name, value))
+ return meaning
+
+
+def _create_dispatcher():
+ _LOGGER.info('Initializing Dispatcher')
+
+ options = apache.main_server.get_options()
+
+ handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
+ if not handler_root:
+ raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
+ apache.APLOG_ERR)
+
+ handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
+
+ allow_handlers_outside_root = _parse_option(
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
+ options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
+
+ dispatcher = dispatch.Dispatcher(
+ handler_root, handler_scan, allow_handlers_outside_root)
+
+ for warning in dispatcher.source_warnings():
+ apache.log_error('mod_pywebsocket: %s' % warning, apache.APLOG_WARNING)
+
+ return dispatcher
+
+
+# Initialize
+_dispatcher = _create_dispatcher()
+
+
+def headerparserhandler(request):
+ """Handle request.
+
+ Args:
+ request: mod_python request.
+
+ This function is named headerparserhandler because it is the default
+ name for a PythonHeaderParserHandler.
+ """
+
+ handshake_is_done = False
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not _dispatcher.get_handler_suite(request.uri):
+ request.log_error('No handler for resource: %r' % request.uri,
+ apache.APLOG_INFO)
+ request.log_error('Fallback to Apache', apache.APLOG_INFO)
+ return apache.DECLINED
+ except dispatch.DispatchException, e:
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ if not handshake_is_done:
+ return e.status
+
+ try:
+ allow_draft75 = _parse_option(
+ _PYOPT_ALLOW_DRAFT75,
+ apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
+ _PYOPT_ALLOW_DRAFT75_DEFINITION)
+
+ try:
+ handshake.do_handshake(
+ request, _dispatcher, allowDraft75=allow_draft75)
+ except handshake.VersionException, e:
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ return apache.HTTP_BAD_REQUEST
+ except handshake.HandshakeException, e:
+ # Handshake for ws/wss failed.
+ # Send http response with error status.
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ return e.status
+
+ handshake_is_done = True
+ request._dispatcher = _dispatcher
+ _dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ except Exception, e:
+ # DispatchException can also be thrown if something is wrong in
+ # pywebsocket code. It's caught here, then.
+
+ request.log_error('mod_pywebsocket: %s\n%s' %
+ (e, util.get_stack_trace()),
+ apache.APLOG_ERR)
+ # Unknown exceptions before handshake mean Apache must handle its
+ # request with another handler.
+ if not handshake_is_done:
+ return apache.DECLINED
+ # Set assbackwards to suppress response header generation by Apache.
+ request.assbackwards = 1
+ return apache.DONE # Return DONE such that no other handlers are invoked.
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/http_header_util.py b/pyload/lib/mod_pywebsocket/http_header_util.py
new file mode 100644
index 000000000..b77465393
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/http_header_util.py
@@ -0,0 +1,263 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities for parsing and formatting headers that follow the grammar defined
+in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
+"""
+
+
+import urlparse
+
+
+_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+def _is_char(c):
+ """Returns true iff c is in CHAR as specified in HTTP RFC."""
+
+ return ord(c) <= 127
+
+
+def _is_ctl(c):
+ """Returns true iff c is in CTL as specified in HTTP RFC."""
+
+ return ord(c) <= 31 or ord(c) == 127
+
+
+class ParsingState(object):
+
+ def __init__(self, data):
+ self.data = data
+ self.head = 0
+
+
+def peek(state, pos=0):
+ """Peeks the character at pos from the head of data."""
+
+ if state.head + pos >= len(state.data):
+ return None
+
+ return state.data[state.head + pos]
+
+
+def consume(state, amount=1):
+ """Consumes specified amount of bytes from the head and returns the
+ consumed bytes. If there's not enough bytes to consume, returns None.
+ """
+
+ if state.head + amount > len(state.data):
+ return None
+
+ result = state.data[state.head:state.head + amount]
+ state.head = state.head + amount
+ return result
+
+
+def consume_string(state, expected):
+ """Given a parsing state and a expected string, consumes the string from
+ the head. Returns True if consumed successfully. Otherwise, returns
+ False.
+ """
+
+ pos = 0
+
+ for c in expected:
+ if c != peek(state, pos):
+ return False
+ pos += 1
+
+ consume(state, pos)
+ return True
+
+
+def consume_lws(state):
+ """Consumes a LWS from the head. Returns True if any LWS is consumed.
+ Otherwise, returns False.
+
+ LWS = [CRLF] 1*( SP | HT )
+ """
+
+ original_head = state.head
+
+ consume_string(state, '\r\n')
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c == ' ' or c == '\t':
+ pos += 1
+ else:
+ if pos == 0:
+ state.head = original_head
+ return False
+ else:
+ consume(state, pos)
+ return True
+
+
+def consume_lwses(state):
+ """Consumes *LWS from the head."""
+
+ while consume_lws(state):
+ pass
+
+
+def consume_token(state):
+ """Consumes a token from the head. Returns the token or None if no token
+ was found.
+ """
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ if pos == 0:
+ return None
+
+ return consume(state, pos)
+ else:
+ pos += 1
+
+
+def consume_token_or_quoted_string(state):
+ """Consumes a token or a quoted-string, and returns the token or unquoted
+ string. If no token or quoted-string was found, returns None.
+ """
+
+ original_head = state.head
+
+ if not consume_string(state, '"'):
+ return consume_token(state)
+
+ result = []
+
+ expect_quoted_pair = False
+
+ while True:
+ if not expect_quoted_pair and consume_lws(state):
+ result.append(' ')
+ continue
+
+ c = consume(state)
+ if c is None:
+ # quoted-string is not enclosed with double quotation
+ state.head = original_head
+ return None
+ elif expect_quoted_pair:
+ expect_quoted_pair = False
+ if _is_char(c):
+ result.append(c)
+ else:
+ # Non CHAR character found in quoted-pair
+ state.head = original_head
+ return None
+ elif c == '\\':
+ expect_quoted_pair = True
+ elif c == '"':
+ return ''.join(result)
+ elif _is_ctl(c):
+ # Invalid character %r found in qdtext
+ state.head = original_head
+ return None
+ else:
+ result.append(c)
+
+
+def quote_if_necessary(s):
+ """Quotes arbitrary string into quoted-string."""
+
+ quote = False
+ if s == '':
+ return '""'
+
+ result = []
+ for c in s:
+ if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ quote = True
+
+ if c == '"' or _is_ctl(c):
+ result.append('\\' + c)
+ else:
+ result.append(c)
+
+ if quote:
+ return '"' + ''.join(result) + '"'
+ else:
+ return ''.join(result)
+
+
+def parse_uri(uri):
+ """Parse absolute URI then return host, port and resource."""
+
+ parsed = urlparse.urlsplit(uri)
+ if parsed.scheme != 'wss' and parsed.scheme != 'ws':
+ # |uri| must be a relative URI.
+ # TODO(toyoshim): Should validate |uri|.
+ return None, None, uri
+
+ if parsed.hostname is None:
+ return None, None, None
+
+ port = None
+ try:
+ port = parsed.port
+ except ValueError, e:
+ # port property cause ValueError on invalid null port description like
+ # 'ws://host:/path'.
+ return None, None, None
+
+ if port is None:
+ if parsed.scheme == 'ws':
+ port = 80
+ else:
+ port = 443
+
+ path = parsed.path
+ if not path:
+ path += '/'
+ if parsed.query:
+ path += '?' + parsed.query
+ if parsed.fragment:
+ path += '#' + parsed.fragment
+
+ return parsed.hostname, port, path
+
+
+try:
+ urlparse.uses_netloc.index('ws')
+except ValueError, e:
+ # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
+ urlparse.uses_netloc.append('ws')
+ urlparse.uses_netloc.append('wss')
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/memorizingfile.py b/pyload/lib/mod_pywebsocket/memorizingfile.py
new file mode 100644
index 000000000..4d4cd9585
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/memorizingfile.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Memorizing file.
+
+A memorizing file wraps a file and memorizes lines read by readline.
+"""
+
+
+import sys
+
+
+class MemorizingFile(object):
+ """MemorizingFile wraps a file and memorizes lines read by readline.
+
+ Note that data read by other methods are not memorized. This behavior
+ is good enough for memorizing lines SimpleHTTPServer reads before
+ the control reaches WebSocketRequestHandler.
+ """
+
+ def __init__(self, file_, max_memorized_lines=sys.maxint):
+ """Construct an instance.
+
+ Args:
+ file_: the file object to wrap.
+ max_memorized_lines: the maximum number of lines to memorize.
+ Only the first max_memorized_lines are memorized.
+ Default: sys.maxint.
+ """
+
+ self._file = file_
+ self._memorized_lines = []
+ self._max_memorized_lines = max_memorized_lines
+ self._buffered = False
+ self._buffered_line = None
+
+ def __getattribute__(self, name):
+ if name in ('_file', '_memorized_lines', '_max_memorized_lines',
+ '_buffered', '_buffered_line', 'readline',
+ 'get_memorized_lines'):
+ return object.__getattribute__(self, name)
+ return self._file.__getattribute__(name)
+
+ def readline(self, size=-1):
+ """Override file.readline and memorize the line read.
+
+ Note that even if size is specified and smaller than actual size,
+ the whole line will be read out from underlying file object by
+ subsequent readline calls.
+ """
+
+ if self._buffered:
+ line = self._buffered_line
+ self._buffered = False
+ else:
+ line = self._file.readline()
+ if line and len(self._memorized_lines) < self._max_memorized_lines:
+ self._memorized_lines.append(line)
+ if size >= 0 and size < len(line):
+ self._buffered = True
+ self._buffered_line = line[size:]
+ return line[:size]
+ return line
+
+ def get_memorized_lines(self):
+ """Get lines memorized so far."""
+ return self._memorized_lines
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/msgutil.py b/pyload/lib/mod_pywebsocket/msgutil.py
new file mode 100644
index 000000000..4c1a0114b
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/msgutil.py
@@ -0,0 +1,219 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Message related utilities.
+
+Note: request.connection.write/read are used in this module, even though
+mod_python document says that they should be used only in connection
+handlers. Unfortunately, we have no other options. For example,
+request.write/read are not suitable because they don't allow direct raw
+bytes writing/reading.
+"""
+
+
+import Queue
+import threading
+
+
+# Export Exception symbols from msgutil for backward compatibility
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+# An API for handler to send/receive WebSocket messages.
+def close_connection(request):
+ """Close connection.
+
+ Args:
+ request: mod_python request.
+ """
+ request.ws_stream.close_connection()
+
+
+def send_message(request, payload_data, end=True, binary=False):
+ """Send a message (or part of a message).
+
+ Args:
+ request: mod_python request.
+ payload_data: unicode text or str binary to send.
+ end: True to terminate a message.
+ False to send payload_data as part of a message that is to be
+ terminated by next or later send_message call with end=True.
+ binary: send payload_data as binary frame(s).
+ Raises:
+ BadOperationException: when server already terminated.
+ """
+ request.ws_stream.send_message(payload_data, end, binary)
+
+
+def receive_message(request):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Args:
+ request: mod_python request.
+ Raises:
+ InvalidFrameException: when client send invalid frame.
+ UnsupportedFrameException: when client send unsupported frame e.g. some
+ of reserved bit is set but no extension can
+ recognize it.
+ InvalidUTF8Exception: when client send a text frame containing any
+ invalid UTF-8 string.
+ ConnectionTerminatedException: when the connection is closed
+ unexpectedly.
+ BadOperationException: when client already terminated.
+ """
+ return request.ws_stream.receive_message()
+
+
+def send_ping(request, body=''):
+ request.ws_stream.send_ping(body)
+
+
+class MessageReceiver(threading.Thread):
+ """This class receives messages from the client.
+
+ This class provides three ways to receive messages: blocking,
+ non-blocking, and via callback. Callback has the highest precedence.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request, onmessage=None):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ onmessage: a function to be called when a message is received.
+ May be None. If not None, the function is called on
+ another thread. In that case, MessageReceiver.receive
+ and MessageReceiver.receive_nowait are useless
+ because they will never return any messages.
+ """
+
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self._onmessage = onmessage
+ self._stop_requested = False
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ while not self._stop_requested:
+ message = receive_message(self._request)
+ if self._onmessage:
+ self._onmessage(message)
+ else:
+ self._queue.put(message)
+ finally:
+ close_connection(self._request)
+
+ def receive(self):
+ """ Receive a message from the channel, blocking.
+
+ Returns:
+ message as a unicode string.
+ """
+ return self._queue.get()
+
+ def receive_nowait(self):
+ """ Receive a message from the channel, non-blocking.
+
+ Returns:
+ message as a unicode string if available. None otherwise.
+ """
+ try:
+ message = self._queue.get_nowait()
+ except Queue.Empty:
+ message = None
+ return message
+
+ def stop(self):
+ """Request to stop this instance.
+
+ The instance will be stopped after receiving the next message.
+ This method may not be very useful, but there is no clean way
+ in Python to forcefully stop a running thread.
+ """
+ self._stop_requested = True
+
+
+class MessageSender(threading.Thread):
+ """This class sends messages to the client.
+
+ This class provides both synchronous and asynchronous ways to send
+ messages.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ while True:
+ message, condition = self._queue.get()
+ condition.acquire()
+ send_message(self._request, message)
+ condition.notify()
+ condition.release()
+
+ def send(self, message):
+ """Send a message, blocking."""
+
+ condition = threading.Condition()
+ condition.acquire()
+ self._queue.put((message, condition))
+ condition.wait()
+
+ def send_nowait(self, message):
+ """Send a message, non-blocking."""
+
+ self._queue.put((message, threading.Condition()))
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/mux.py b/pyload/lib/mod_pywebsocket/mux.py
new file mode 100644
index 000000000..f0bdd2461
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/mux.py
@@ -0,0 +1,1636 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for multiplexing extension.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
+"""
+
+
+import collections
+import copy
+import email
+import email.parser
+import logging
+import math
+import struct
+import threading
+import traceback
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import parse_frame
+from mod_pywebsocket.handshake import hybi
+
+
+_CONTROL_CHANNEL_ID = 0
+_DEFAULT_CHANNEL_ID = 1
+
+_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
+_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
+_MUX_OPCODE_FLOW_CONTROL = 2
+_MUX_OPCODE_DROP_CHANNEL = 3
+_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
+
+_MAX_CHANNEL_ID = 2 ** 29 - 1
+
+_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
+_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
+
+_HANDSHAKE_ENCODING_IDENTITY = 0
+_HANDSHAKE_ENCODING_DELTA = 1
+
+# We need only these status code for now.
+_HTTP_BAD_RESPONSE_MESSAGES = {
+ common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
+}
+
+# DropChannel reason code
+# TODO(bashi): Define all reason code defined in -05 draft.
+_DROP_CODE_NORMAL_CLOSURE = 1000
+
+_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
+_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
+_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
+_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
+_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
+_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
+_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
+
+_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 3002
+_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
+_DROP_CODE_ACKNOWLEDGED = 3008
+
+
+class MuxUnexpectedException(Exception):
+ """Exception in handling multiplexing extension."""
+ pass
+
+
+# Temporary
+class MuxNotImplementedException(Exception):
+ """Raised when a flow enters unimplemented code path."""
+ pass
+
+
+class LogicalConnectionClosedException(Exception):
+ """Raised when logical connection is gracefully closed."""
+ pass
+
+
+class PhysicalConnectionError(Exception):
+ """Raised when there is a physical connection error."""
+ def __init__(self, drop_code, message=''):
+ super(PhysicalConnectionError, self).__init__(
+ 'code=%d, message=%r' % (drop_code, message))
+ self.drop_code = drop_code
+ self.message = message
+
+
+class LogicalChannelError(Exception):
+ """Raised when there is a logical channel error."""
+ def __init__(self, channel_id, drop_code, message=''):
+ super(LogicalChannelError, self).__init__(
+ 'channel_id=%d, code=%d, message=%r' % (
+ channel_id, drop_code, message))
+ self.channel_id = channel_id
+ self.drop_code = drop_code
+ self.message = message
+
+
+def _encode_channel_id(channel_id):
+ if channel_id < 0:
+ raise ValueError('Channel id %d must not be negative' % channel_id)
+
+ if channel_id < 2 ** 7:
+ return chr(channel_id)
+ if channel_id < 2 ** 14:
+ return struct.pack('!H', 0x8000 + channel_id)
+ if channel_id < 2 ** 21:
+ first = chr(0xc0 + (channel_id >> 16))
+ return first + struct.pack('!H', channel_id & 0xffff)
+ if channel_id < 2 ** 29:
+ return struct.pack('!L', 0xe0000000 + channel_id)
+
+ raise ValueError('Channel id %d is too large' % channel_id)
+
+
+def _encode_number(number):
+ return create_length_header(number, False)
+
+
+def _create_add_channel_response(channel_id, encoded_handshake,
+ encoding=0, rejected=False,
+ outer_frame_mask=False):
+ if encoding != 0 and encoding != 1:
+ raise ValueError('Invalid encoding %d' % encoding)
+
+ first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
+ (rejected << 4) | encoding)
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(len(encoded_handshake)) +
+ encoded_handshake)
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+ return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_drop_channel(channel_id, code=None, message='',
+ outer_frame_mask=False):
+ if len(message) > 0 and code is None:
+ raise ValueError('Code must be specified if message is specified')
+
+ first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
+ block = chr(first_byte) + _encode_channel_id(channel_id)
+ if code is None:
+ block += _encode_number(0) # Reason size
+ else:
+ reason = struct.pack('!H', code) + message
+ reason_size = _encode_number(len(reason))
+ block += reason_size + reason
+
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+ return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_flow_control(channel_id, replenished_quota,
+ outer_frame_mask=False):
+ first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(replenished_quota))
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+ return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_new_channel_slot(slots, send_quota, outer_frame_mask=False):
+ if slots < 0 or send_quota < 0:
+ raise ValueError('slots and send_quota must be non-negative.')
+ first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
+ block = (chr(first_byte) +
+ _encode_number(slots) +
+ _encode_number(send_quota))
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+ return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_fallback_new_channel_slot(outer_frame_mask=False):
+ first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
+ block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+ return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _parse_request_text(request_text):
+ request_line, header_lines = request_text.split('\r\n', 1)
+
+ words = request_line.split(' ')
+ if len(words) != 3:
+ raise ValueError('Bad Request-Line syntax %r' % request_line)
+ [command, path, version] = words
+ if version != 'HTTP/1.1':
+ raise ValueError('Bad request version %r' % version)
+
+ # email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
+ # RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
+ # RFC 822.
+ headers = email.parser.Parser().parsestr(header_lines)
+ return command, path, version, headers
+
+
+class _ControlBlock(object):
+ """A structure that holds parsing result of multiplexing control block.
+ Control block specific attributes will be added by _MuxFramePayloadParser.
+ (e.g. encoded_handshake will be added for AddChannelRequest and
+ AddChannelResponse)
+ """
+
+ def __init__(self, opcode):
+ self.opcode = opcode
+
+
+class _MuxFramePayloadParser(object):
+ """A class that parses multiplexed frame payload."""
+
+ def __init__(self, payload):
+ self._data = payload
+ self._read_position = 0
+ self._logger = util.get_class_logger(self)
+
+ def read_channel_id(self):
+ """Reads channel id.
+
+ Raises:
+ ValueError: when the payload doesn't contain
+ valid channel id.
+ """
+
+ remaining_length = len(self._data) - self._read_position
+ pos = self._read_position
+ if remaining_length == 0:
+ raise ValueError('Invalid channel id format')
+
+ channel_id = ord(self._data[pos])
+ channel_id_length = 1
+ if channel_id & 0xe0 == 0xe0:
+ if remaining_length < 4:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!L',
+ self._data[pos:pos+4])[0] & 0x1fffffff
+ channel_id_length = 4
+ elif channel_id & 0xc0 == 0xc0:
+ if remaining_length < 3:
+ raise ValueError('Invalid channel id format')
+ channel_id = (((channel_id & 0x1f) << 16) +
+ struct.unpack('!H', self._data[pos+1:pos+3])[0])
+ channel_id_length = 3
+ elif channel_id & 0x80 == 0x80:
+ if remaining_length < 2:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!H',
+ self._data[pos:pos+2])[0] & 0x3fff
+ channel_id_length = 2
+ self._read_position += channel_id_length
+
+ return channel_id
+
+ def read_inner_frame(self):
+ """Reads an inner frame.
+
+ Raises:
+ PhysicalConnectionError: when the inner frame is invalid.
+ """
+
+ if len(self._data) == self._read_position:
+ raise PhysicalConnectionError(
+ _DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
+
+ bits = ord(self._data[self._read_position])
+ self._read_position += 1
+ fin = (bits & 0x80) == 0x80
+ rsv1 = (bits & 0x40) == 0x40
+ rsv2 = (bits & 0x20) == 0x20
+ rsv3 = (bits & 0x10) == 0x10
+ opcode = bits & 0xf
+ payload = self.remaining_data()
+ # Consume rest of the message which is payload data of the original
+ # frame.
+ self._read_position = len(self._data)
+ return fin, rsv1, rsv2, rsv3, opcode, payload
+
+ def _read_number(self):
+ if self._read_position + 1 > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Cannot read the first byte of number field')
+
+ number = ord(self._data[self._read_position])
+ if number & 0x80 == 0x80:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'The most significant bit of the first byte of number should '
+ 'be unset')
+ self._read_position += 1
+ pos = self._read_position
+ if number == 127:
+ if pos + 8 > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Invalid number field')
+ self._read_position += 8
+ number = struct.unpack('!Q', self._data[pos:pos+8])[0]
+ if number > 0x7FFFFFFFFFFFFFFF:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Encoded number >= 2^63')
+ if number <= 0xFFFF:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ '%d should not be encoded by 9 bytes encoding' % number)
+ return number
+ if number == 126:
+ if pos + 2 > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Invalid number field')
+ self._read_position += 2
+ number = struct.unpack('!H', self._data[pos:pos+2])[0]
+ if number <= 125:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ '%d should not be encoded by 3 bytes encoding' % number)
+ return number
+
+ def _read_size_and_contents(self):
+ """Reads data that consists of followings:
+ - the size of the contents encoded the same way as payload length
+ of the WebSocket Protocol with 1 bit padding at the head.
+ - the contents.
+ """
+
+ size = self._read_number()
+ pos = self._read_position
+ if pos + size > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Cannot read %d bytes data' % size)
+
+ self._read_position += size
+ return self._data[pos:pos+size]
+
+ def _read_add_channel_request(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x7
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ # Invalid encoding will be handled by MuxHandler.
+ encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoding = encoding
+ encoded_handshake = self._read_size_and_contents()
+ control_block.encoded_handshake = encoded_handshake
+ return control_block
+
+ def _read_add_channel_response(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x3
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ control_block.accepted = (first_byte >> 4) & 1
+ control_block.encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoded_handshake = self._read_size_and_contents()
+ return control_block
+
+ def _read_flow_control(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.send_quota = self._read_number()
+ return control_block
+
+ def _read_drop_channel(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ reason = self._read_size_and_contents()
+ if len(reason) == 0:
+ control_block.drop_code = None
+ control_block.drop_message = ''
+ elif len(reason) >= 2:
+ control_block.drop_code = struct.unpack('!H', reason[:2])[0]
+ control_block.drop_message = reason[2:]
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received DropChannel that conains only 1-byte reason')
+ return control_block
+
+ def _read_new_channel_slot(self, first_byte, control_block):
+ reserved = first_byte & 0x1e
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+ control_block.fallback = first_byte & 1
+ control_block.slots = self._read_number()
+ control_block.send_quota = self._read_number()
+ return control_block
+
+ def read_control_blocks(self):
+ """Reads control block(s).
+
+ Raises:
+ PhysicalConnectionError: when the payload contains invalid control
+ block(s).
+ StopIteration: when no control blocks left.
+ """
+
+ while self._read_position < len(self._data):
+ first_byte = ord(self._data[self._read_position])
+ self._read_position += 1
+ opcode = (first_byte >> 5) & 0x7
+ control_block = _ControlBlock(opcode=opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ yield self._read_add_channel_request(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ yield self._read_add_channel_response(
+ first_byte, control_block)
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ yield self._read_flow_control(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ yield self._read_drop_channel(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ yield self._read_new_channel_slot(first_byte, control_block)
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_MUX_OPCODE,
+ 'Invalid opcode %d' % opcode)
+
+ assert self._read_position == len(self._data)
+ raise StopIteration
+
+ def remaining_data(self):
+ """Returns remaining data."""
+
+ return self._data[self._read_position:]
+
+
+class _LogicalRequest(object):
+ """Mimics mod_python request."""
+
+ def __init__(self, channel_id, command, path, protocol, headers,
+ connection):
+ """Constructs an instance.
+
+ Args:
+ channel_id: the channel id of the logical channel.
+ command: HTTP request command.
+ path: HTTP request path.
+ headers: HTTP headers.
+ connection: _LogicalConnection instance.
+ """
+
+ self.channel_id = channel_id
+ self.method = command
+ self.uri = path
+ self.protocol = protocol
+ self.headers_in = headers
+ self.connection = connection
+ self.server_terminated = False
+ self.client_terminated = False
+
+ def is_https(self):
+ """Mimics request.is_https(). Returns False because this method is
+ used only by old protocols (hixie and hybi00).
+ """
+
+ return False
+
+
+class _LogicalConnection(object):
+ """Mimics mod_python mp_conn."""
+
+ # For details, see the comment of set_read_state().
+ STATE_ACTIVE = 1
+ STATE_GRACEFULLY_CLOSED = 2
+ STATE_TERMINATED = 3
+
+ def __init__(self, mux_handler, channel_id):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ channel_id: channel id of this connection.
+ """
+
+ self._mux_handler = mux_handler
+ self._channel_id = channel_id
+ self._incoming_data = ''
+ self._write_condition = threading.Condition()
+ self._waiting_write_completion = False
+ self._read_condition = threading.Condition()
+ self._read_state = self.STATE_ACTIVE
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return self._mux_handler.physical_connection.get_local_addr()
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr."""
+
+ return self._mux_handler.physical_connection.get_remote_addr()
+ remote_addr = property(get_remote_addr)
+
+ def get_memorized_lines(self):
+ """Gets memorized lines. Not supported."""
+
+ raise MuxUnexpectedException('_LogicalConnection does not support '
+ 'get_memorized_lines')
+
+ def write(self, data):
+ """Writes data. mux_handler sends data asynchronously. The caller will
+ be suspended until write done.
+
+ Args:
+ data: data to be written.
+
+ Raises:
+ MuxUnexpectedException: when called before finishing the previous
+ write.
+ """
+
+ try:
+ self._write_condition.acquire()
+ if self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Logical connection %d is already waiting the completion '
+ 'of write' % self._channel_id)
+
+ self._waiting_write_completion = True
+ self._mux_handler.send_data(self._channel_id, data)
+ self._write_condition.wait()
+ finally:
+ self._write_condition.release()
+
+ def write_control_data(self, data):
+ """Writes data via the control channel. Don't wait finishing write
+ because this method can be called by mux dispatcher.
+
+ Args:
+ data: data to be written.
+ """
+
+ self._mux_handler.send_control_data(data)
+
+ def notify_write_done(self):
+ """Called when sending data is completed."""
+
+ try:
+ self._write_condition.acquire()
+ if not self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Invalid call of notify_write_done for logical connection'
+ ' %d' % self._channel_id)
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+ def append_frame_data(self, frame_data):
+ """Appends incoming frame data. Called when mux_handler dispatches
+ frame data to the corresponding application.
+
+ Args:
+ frame_data: incoming frame data.
+ """
+
+ self._read_condition.acquire()
+ self._incoming_data += frame_data
+ self._read_condition.notify()
+ self._read_condition.release()
+
+ def read(self, length):
+ """Reads data. Blocks until enough data has arrived via physical
+ connection.
+
+ Args:
+ length: length of data to be read.
+ Raises:
+ LogicalConnectionClosedException: when closing handshake for this
+ logical channel has been received.
+ ConnectionTerminatedException: when the physical connection has
+ closed, or an error is caused on the reader thread.
+ """
+
+ self._read_condition.acquire()
+ while (self._read_state == self.STATE_ACTIVE and
+ len(self._incoming_data) < length):
+ self._read_condition.wait()
+
+ try:
+ if self._read_state == self.STATE_GRACEFULLY_CLOSED:
+ raise LogicalConnectionClosedException(
+ 'Logical channel %d has closed.' % self._channel_id)
+ elif self._read_state == self.STATE_TERMINATED:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Logical channel (%d) closed' %
+ (length, self._channel_id))
+
+ value = self._incoming_data[:length]
+ self._incoming_data = self._incoming_data[length:]
+ finally:
+ self._read_condition.release()
+
+ return value
+
+ def set_read_state(self, new_state):
+ """Sets the state of this connection. Called when an event for this
+ connection has occurred.
+
+ Args:
+ new_state: state to be set. new_state must be one of followings:
+ - STATE_GRACEFULLY_CLOSED: when closing handshake for this
+ connection has been received.
+ - STATE_TERMINATED: when the physical connection has closed or
+ DropChannel of this connection has received.
+ """
+
+ self._read_condition.acquire()
+ self._read_state = new_state
+ self._read_condition.notify()
+ self._read_condition.release()
+
+
+class _LogicalStream(Stream):
+ """Mimics the Stream class. This class interprets multiplexed WebSocket
+ frames.
+ """
+
+ def __init__(self, request, send_quota, receive_quota):
+ """Constructs an instance.
+
+ Args:
+ request: _LogicalRequest instance.
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ # TODO(bashi): Support frame filters.
+ stream_options = StreamOptions()
+ # Physical stream is responsible for masking.
+ stream_options.unmask_receive = False
+ # Control frames can be fragmented on logical channel.
+ stream_options.allow_fragmented_control_frame = True
+ Stream.__init__(self, request, stream_options)
+ self._send_quota = send_quota
+ self._send_quota_condition = threading.Condition()
+ self._receive_quota = receive_quota
+ self._write_inner_frame_semaphore = threading.Semaphore()
+
+ def _create_inner_frame(self, opcode, payload, end=True):
+ # TODO(bashi): Support extensions that use reserved bits.
+ first_byte = (end << 7) | opcode
+ return (_encode_channel_id(self._request.channel_id) +
+ chr(first_byte) + payload)
+
+ def _write_inner_frame(self, opcode, payload, end=True):
+ payload_length = len(payload)
+ write_position = 0
+
+ try:
+ # An inner frame will be fragmented if there is no enough send
+ # quota. This semaphore ensures that fragmented inner frames are
+ # sent in order on the logical channel.
+ # Note that frames that come from other logical channels or
+ # multiplexing control blocks can be inserted between fragmented
+ # inner frames on the physical channel.
+ self._write_inner_frame_semaphore.acquire()
+ while write_position < payload_length:
+ try:
+ self._send_quota_condition.acquire()
+ while self._send_quota == 0:
+ self._logger.debug(
+ 'No quota. Waiting FlowControl message for %d.' %
+ self._request.channel_id)
+ self._send_quota_condition.wait()
+
+ remaining = payload_length - write_position
+ write_length = min(self._send_quota, remaining)
+ inner_frame_end = (
+ end and
+ (write_position + write_length == payload_length))
+
+ inner_frame = self._create_inner_frame(
+ opcode,
+ payload[write_position:write_position+write_length],
+ inner_frame_end)
+ frame_data = self._writer.build(
+ inner_frame, end=True, binary=True)
+ self._send_quota -= write_length
+ self._logger.debug('Consumed quota=%d, remaining=%d' %
+ (write_length, self._send_quota))
+ finally:
+ self._send_quota_condition.release()
+
+ # Writing data will block the worker so we need to release
+ # _send_quota_condition before writing.
+ self._logger.debug('Sending inner frame: %r' % frame_data)
+ self._request.connection.write(frame_data)
+ write_position += write_length
+
+ opcode = common.OPCODE_CONTINUATION
+
+ except ValueError, e:
+ raise BadOperationException(e)
+ finally:
+ self._write_inner_frame_semaphore.release()
+
+ def replenish_send_quota(self, send_quota):
+ """Replenish send quota."""
+
+ self._send_quota_condition.acquire()
+ self._send_quota += send_quota
+ self._logger.debug('Replenished send quota for channel id %d: %d' %
+ (self._request.channel_id, self._send_quota))
+ self._send_quota_condition.notify()
+ self._send_quota_condition.release()
+
+ def consume_receive_quota(self, amount):
+ """Consumes receive quota. Returns False on failure."""
+
+ if self._receive_quota < amount:
+ self._logger.debug('Violate quota on channel id %d: %d < %d' %
+ (self._request.channel_id,
+ self._receive_quota, amount))
+ return False
+ self._receive_quota -= amount
+ return True
+
+ def send_message(self, message, end=True, binary=False):
+ """Override Stream.send_message."""
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ if binary:
+ opcode = common.OPCODE_BINARY
+ else:
+ opcode = common.OPCODE_TEXT
+ message = message.encode('utf-8')
+
+ self._write_inner_frame(opcode, message, end)
+
+ def _receive_frame(self):
+ """Overrides Stream._receive_frame.
+
+ In addition to call Stream._receive_frame, this method adds the amount
+ of payload to receiving quota and sends FlowControl to the client.
+ We need to do it here because Stream.receive_message() handles
+ control frames internally.
+ """
+
+ opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
+ amount = len(payload)
+ self._receive_quota += amount
+ frame_data = _create_flow_control(self._request.channel_id,
+ amount)
+ self._logger.debug('Sending flow control for %d, replenished=%d' %
+ (self._request.channel_id, amount))
+ self._request.connection.write_control_data(frame_data)
+ return opcode, payload, fin, rsv1, rsv2, rsv3
+
+ def receive_message(self):
+ """Overrides Stream.receive_message."""
+
+ # Just call Stream.receive_message(), but catch
+ # LogicalConnectionClosedException, which is raised when the logical
+ # connection has closed gracefully.
+ try:
+ return Stream.receive_message(self)
+ except LogicalConnectionClosedException, e:
+ self._logger.debug('%s', e)
+ return None
+
+ def _send_closing_handshake(self, code, reason):
+ """Overrides Stream._send_closing_handshake."""
+
+ body = create_closing_handshake_body(code, reason)
+ self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
+ (self._request.channel_id, code, reason))
+ self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
+
+ self._request.server_terminated = True
+
+ def send_ping(self, body=''):
+ """Overrides Stream.send_ping"""
+
+ self._logger.debug('Sending ping on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PING, body, end=True)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ """Overrides Stream._send_pong"""
+
+ self._logger.debug('Sending pong on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PONG, body, end=True)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ """Overrides Stream.close_connection."""
+
+ # TODO(bashi): Implement
+ self._logger.debug('Closing logical connection %d' %
+ self._request.channel_id)
+ self._request.server_terminated = True
+
+ def _drain_received_data(self):
+ """Overrides Stream._drain_received_data. Nothing need to be done for
+ logical channel.
+ """
+
+ pass
+
+
+class _OutgoingData(object):
+ """A structure that holds data to be sent via physical connection and
+ origin of the data.
+ """
+
+ def __init__(self, channel_id, data):
+ self.channel_id = channel_id
+ self.data = data
+
+
+class _PhysicalConnectionWriter(threading.Thread):
+ """A thread that is responsible for writing data to physical connection.
+
+ TODO(bashi): Make sure there is no thread-safety problem when the reader
+ thread reads data from the same socket at a time.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+ self._stop_requested = False
+ self._deque = collections.deque()
+ self._deque_condition = threading.Condition()
+
+ def put_outgoing_data(self, data):
+ """Puts outgoing data.
+
+ Args:
+ data: _OutgoingData instance.
+
+ Raises:
+ BadOperationException: when the thread has been requested to
+ terminate.
+ """
+
+ try:
+ self._deque_condition.acquire()
+ if self._stop_requested:
+ raise BadOperationException('Cannot write data anymore')
+
+ self._deque.append(data)
+ self._deque_condition.notify()
+ finally:
+ self._deque_condition.release()
+
+ def _write_data(self, outgoing_data):
+ try:
+ self._mux_handler.physical_connection.write(outgoing_data.data)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._mux_handler.physical_connection.remote_addr,), e)
+ raise
+
+ # TODO(bashi): It would be better to block the thread that sends
+ # control data as well.
+ if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
+ self._mux_handler.notify_write_done(outgoing_data.channel_id)
+
+ def run(self):
+ self._deque_condition.acquire()
+ while not self._stop_requested:
+ if len(self._deque) == 0:
+ self._deque_condition.wait()
+ continue
+
+ outgoing_data = self._deque.popleft()
+ self._deque_condition.release()
+ self._write_data(outgoing_data)
+ self._deque_condition.acquire()
+
+ # Flush deque
+ try:
+ while len(self._deque) > 0:
+ outgoing_data = self._deque.popleft()
+ self._write_data(outgoing_data)
+ finally:
+ self._deque_condition.release()
+
+ def stop(self):
+ """Stops the writer thread."""
+
+ self._deque_condition.acquire()
+ self._stop_requested = True
+ self._deque_condition.notify()
+ self._deque_condition.release()
+
+
+class _PhysicalConnectionReader(threading.Thread):
+ """A thread that is responsible for reading data from physical connection.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ def run(self):
+ while True:
+ try:
+ physical_stream = self._mux_handler.physical_stream
+ message = physical_stream.receive_message()
+ if message is None:
+ break
+ # Below happens only when a data message is received.
+ opcode = physical_stream.get_last_received_opcode()
+ if opcode != common.OPCODE_BINARY:
+ self._mux_handler.fail_physical_connection(
+ _DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
+ 'Received a text message on physical connection')
+ break
+
+ except ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ break
+
+ try:
+ self._mux_handler.dispatch_message(message)
+ except PhysicalConnectionError, e:
+ self._mux_handler.fail_physical_connection(
+ e.drop_code, e.message)
+ break
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ except Exception, e:
+ self._logger.debug(traceback.format_exc())
+ break
+
+ self._mux_handler.notify_reader_done()
+
+
+class _Worker(threading.Thread):
+ """A thread that is responsible for running the corresponding application
+ handler.
+ """
+
+ def __init__(self, mux_handler, request):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ request: _LogicalRequest instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self._request = request
+ self.setDaemon(True)
+
+ def run(self):
+ self._logger.debug('Logical channel worker started. (id=%d)' %
+ self._request.channel_id)
+ try:
+ # Non-critical exceptions will be handled by dispatcher.
+ self._mux_handler.dispatcher.transfer_data(self._request)
+ finally:
+ self._mux_handler.notify_worker_done(self._request.channel_id)
+
+
+class _MuxHandshaker(hybi.Handshaker):
+ """Opening handshake processor for multiplexing."""
+
+ _DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
+
+ def __init__(self, request, dispatcher, send_quota, receive_quota):
+ """Constructs an instance.
+ Args:
+ request: _LogicalRequest instance.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ hybi.Handshaker.__init__(self, request, dispatcher)
+ self._send_quota = send_quota
+ self._receive_quota = receive_quota
+
+ # Append headers which should not be included in handshake field of
+ # AddChannelRequest.
+ # TODO(bashi): Make sure whether we should raise exception when
+ # these headers are included already.
+ request.headers_in[common.UPGRADE_HEADER] = (
+ common.WEBSOCKET_UPGRADE_TYPE)
+ request.headers_in[common.CONNECTION_HEADER] = (
+ common.UPGRADE_CONNECTION_TYPE)
+ request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
+ str(common.VERSION_HYBI_LATEST))
+ request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
+ self._DUMMY_WEBSOCKET_KEY)
+
+ def _create_stream(self, stream_options):
+ """Override hybi.Handshaker._create_stream."""
+
+ self._logger.debug('Creating logical stream for %d' %
+ self._request.channel_id)
+ return _LogicalStream(self._request, self._send_quota,
+ self._receive_quota)
+
+ def _create_handshake_response(self, accept):
+ """Override hybi._create_handshake_response."""
+
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # Upgrade, Connection and Sec-WebSocket-Accept should be excluded.
+ if self._request.ws_protocol is not None:
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ """Override hybi.Handshaker._send_handshake."""
+
+ # Don't send handshake response for the default channel
+ if self._request.channel_id == _DEFAULT_CHANNEL_ID:
+ return
+
+ handshake_response = self._create_handshake_response(accept)
+ frame_data = _create_add_channel_response(
+ self._request.channel_id,
+ handshake_response)
+ self._logger.debug('Sending handshake response for %d: %r' %
+ (self._request.channel_id, frame_data))
+ self._request.connection.write_control_data(frame_data)
+
+
+class _LogicalChannelData(object):
+ """A structure that holds information about logical channel.
+ """
+
+ def __init__(self, request, worker):
+ self.request = request
+ self.worker = worker
+ self.drop_code = _DROP_CODE_NORMAL_CLOSURE
+ self.drop_message = ''
+
+
+class _HandshakeDeltaBase(object):
+ """A class that holds information for delta-encoded handshake."""
+
+ def __init__(self, headers):
+ self._headers = headers
+
+ def create_headers(self, delta=None):
+ """Creates request headers for an AddChannelRequest that has
+ delta-encoded handshake.
+
+ Args:
+ delta: headers should be overridden.
+ """
+
+ headers = copy.copy(self._headers)
+ if delta:
+ for key, value in delta.items():
+ # The spec requires that a header with an empty value is
+ # removed from the delta base.
+ if len(value) == 0 and headers.has_key(key):
+ del headers[key]
+ else:
+ headers[key] = value
+ # TODO(bashi): Support extensions
+ headers['Sec-WebSocket-Extensions'] = ''
+ return headers
+
+
+class _MuxHandler(object):
+ """Multiplexing handler. When a handler starts, it launches three
+ threads; the reader thread, the writer thread, and a worker thread.
+
+ The reader thread reads data from the physical stream, i.e., the
+ ws_stream object of the underlying websocket connection. The reader
+ thread interprets multiplexed frames and dispatches them to logical
+ channels. Methods of this class are mostly called by the reader thread.
+
+ The writer thread sends multiplexed frames which are created by
+ logical channels via the physical connection.
+
+ The worker thread launched at the starting point handles the
+ "Implicitly Opened Connection". If multiplexing handler receives
+ an AddChannelRequest and accepts it, the handler will launch a new worker
+ thread and dispatch the request to it.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request of the physical connection.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ """
+
+ self.original_request = request
+ self.dispatcher = dispatcher
+ self.physical_connection = request.connection
+ self.physical_stream = request.ws_stream
+ self._logger = util.get_class_logger(self)
+ self._logical_channels = {}
+ self._logical_channels_condition = threading.Condition()
+ # Holds client's initial quota
+ self._channel_slots = collections.deque()
+ self._handshake_base = None
+ self._worker_done_notify_received = False
+ self._reader = None
+ self._writer = None
+
+ def start(self):
+ """Starts the handler.
+
+ Raises:
+ MuxUnexpectedException: when the handler already started, or when
+ opening handshake of the default channel fails.
+ """
+
+ if self._reader or self._writer:
+ raise MuxUnexpectedException('MuxHandler already started')
+
+ self._reader = _PhysicalConnectionReader(self)
+ self._writer = _PhysicalConnectionWriter(self)
+ self._reader.start()
+ self._writer.start()
+
+ # Create "Implicitly Opened Connection".
+ logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
+ self._handshake_base = _HandshakeDeltaBase(
+ self.original_request.headers_in)
+ logical_request = _LogicalRequest(
+ _DEFAULT_CHANNEL_ID,
+ self.original_request.method,
+ self.original_request.uri,
+ self.original_request.protocol,
+ self._handshake_base.create_headers(),
+ logical_connection)
+ # Client's send quota for the implicitly opened connection is zero,
+ # but we will send FlowControl later so set the initial quota to
+ # _INITIAL_QUOTA_FOR_CLIENT.
+ self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
+ if not self._do_handshake_for_logical_request(
+ logical_request, send_quota=self.original_request.mux_quota):
+ raise MuxUnexpectedException(
+ 'Failed handshake on the default channel id')
+ self._add_logical_channel(logical_request)
+
+ # Send FlowControl for the implicitly opened connection.
+ frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
+ _INITIAL_QUOTA_FOR_CLIENT)
+ logical_request.connection.write_control_data(frame_data)
+
+ def add_channel_slots(self, slots, send_quota):
+ """Adds channel slots.
+
+ Args:
+ slots: number of slots to be added.
+ send_quota: initial send quota for slots.
+ """
+
+ self._channel_slots.extend([send_quota] * slots)
+ # Send NewChannelSlot to client.
+ frame_data = _create_new_channel_slot(slots, send_quota)
+ self.send_control_data(frame_data)
+
+ def wait_until_done(self, timeout=None):
+ """Waits until all workers are done. Returns False when timeout has
+ occurred. Returns True on success.
+
+ Args:
+ timeout: timeout in sec.
+ """
+
+ self._logical_channels_condition.acquire()
+ try:
+ while len(self._logical_channels) > 0:
+ self._logger.debug('Waiting workers(%d)...' %
+ len(self._logical_channels))
+ self._worker_done_notify_received = False
+ self._logical_channels_condition.wait(timeout)
+ if not self._worker_done_notify_received:
+ self._logger.debug('Waiting worker(s) timed out')
+ return False
+
+ finally:
+ self._logical_channels_condition.release()
+
+ # Flush pending outgoing data
+ self._writer.stop()
+ self._writer.join()
+
+ return True
+
+ def notify_write_done(self, channel_id):
+ """Called by the writer thread when a write operation has done.
+
+ Args:
+ channel_id: objective channel id.
+ """
+
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ channel_data.request.connection.notify_write_done()
+ else:
+ self._logger.debug('Seems that logical channel for %d has gone'
+ % channel_id)
+ finally:
+ self._logical_channels_condition.release()
+
+ def send_control_data(self, data):
+ """Sends data via the control channel.
+
+ Args:
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=_CONTROL_CHANNEL_ID, data=data))
+
+ def send_data(self, channel_id, data):
+ """Sends data via given logical channel. This method is called by
+ worker threads.
+
+ Args:
+ channel_id: objective channel id.
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=channel_id, data=data))
+
+ def _send_drop_channel(self, channel_id, code=None, message=''):
+ frame_data = _create_drop_channel(channel_id, code, message)
+ self._logger.debug(
+ 'Sending drop channel for channel id %d' % channel_id)
+ self.send_control_data(frame_data)
+
+ def _send_error_add_channel_response(self, channel_id, status=None):
+ if status is None:
+ status = common.HTTP_STATUS_BAD_REQUEST
+
+ if status in _HTTP_BAD_RESPONSE_MESSAGES:
+ message = _HTTP_BAD_RESPONSE_MESSAGES[status]
+ else:
+ self._logger.debug('Response message for %d is not found' % status)
+ message = '???'
+
+ response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
+ frame_data = _create_add_channel_response(channel_id,
+ encoded_handshake=response,
+ encoding=0, rejected=True)
+ self.send_control_data(frame_data)
+
+ def _create_logical_request(self, block):
+ if block.channel_id == _CONTROL_CHANNEL_ID:
+ # TODO(bashi): Raise PhysicalConnectionError with code 2006
+ # instead of MuxUnexpectedException.
+ raise MuxUnexpectedException(
+ 'Received the control channel id (0) as objective channel '
+ 'id for AddChannel')
+
+ if block.encoding > _HANDSHAKE_ENCODING_DELTA:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_REQUEST_ENCODING)
+
+ method, path, version, headers = _parse_request_text(
+ block.encoded_handshake)
+ if block.encoding == _HANDSHAKE_ENCODING_DELTA:
+ headers = self._handshake_base.create_headers(headers)
+
+ connection = _LogicalConnection(self, block.channel_id)
+ request = _LogicalRequest(block.channel_id, method, path, version,
+ headers, connection)
+ return request
+
+ def _do_handshake_for_logical_request(self, request, send_quota=0):
+ try:
+ receive_quota = self._channel_slots.popleft()
+ except IndexError:
+ raise LogicalChannelError(
+ request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
+
+ handshaker = _MuxHandshaker(request, self.dispatcher,
+ send_quota, receive_quota)
+ try:
+ handshaker.do_handshake()
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(
+ request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return False
+ except handshake.HandshakeException, e:
+ # TODO(bashi): Should we _Fail the Logical Channel_ with 3001
+ # instead?
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id,
+ status=e.status)
+ return False
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id)
+ return False
+
+ return True
+
+ def _add_logical_channel(self, logical_request):
+ try:
+ self._logical_channels_condition.acquire()
+ if logical_request.channel_id in self._logical_channels:
+ self._logger.debug('Channel id %d already exists' %
+ logical_request.channel_id)
+ raise PhysicalConnectionError(
+ _DROP_CODE_CHANNEL_ALREADY_EXISTS,
+ 'Channel id %d already exists' %
+ logical_request.channel_id)
+ worker = _Worker(self, logical_request)
+ channel_data = _LogicalChannelData(logical_request, worker)
+ self._logical_channels[logical_request.channel_id] = channel_data
+ worker.start()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_add_channel_request(self, block):
+ try:
+ logical_request = self._create_logical_request(block)
+ except ValueError, e:
+ self._logger.debug('Failed to create logical request: %r' % e)
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return
+ if self._do_handshake_for_logical_request(logical_request):
+ if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
+ # Update handshake base.
+ # TODO(bashi): Make sure this is the right place to update
+ # handshake base.
+ self._handshake_base = _HandshakeDeltaBase(
+ logical_request.headers_in)
+ self._add_logical_channel(logical_request)
+ else:
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+
+ def _process_flow_control(self, block):
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.request.ws_stream.replenish_send_quota(
+ block.send_quota)
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_drop_channel(self, block):
+ self._logger.debug(
+ 'DropChannel received for %d: code=%r, reason=%r' %
+ (block.channel_id, block.drop_code, block.drop_message))
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
+ # Close the logical channel
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_control_blocks(self, parser):
+ for control_block in parser.read_control_blocks():
+ opcode = control_block.opcode
+ self._logger.debug('control block received, opcode: %d' % opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ self._process_add_channel_request(control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received AddChannelResponse')
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ self._process_flow_control(control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ self._process_drop_channel(control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received NewChannelSlot')
+ else:
+ raise MuxUnexpectedException(
+ 'Unexpected opcode %r' % opcode)
+
+ def _process_logical_frame(self, channel_id, parser):
+ self._logger.debug('Received a frame. channel id=%d' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ # We must ignore the message for an inactive channel.
+ return
+ channel_data = self._logical_channels[channel_id]
+ fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
+ if not channel_data.request.ws_stream.consume_receive_quota(
+ len(payload)):
+ # The client violates quota. Close logical channel.
+ raise LogicalChannelError(
+ channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
+ header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
+ mask=False)
+ frame_data = header + payload
+ channel_data.request.connection.append_frame_data(frame_data)
+ finally:
+ self._logical_channels_condition.release()
+
+ def dispatch_message(self, message):
+ """Dispatches message. The reader thread calls this method.
+
+ Args:
+ message: a message that contains encapsulated frame.
+ Raises:
+ PhysicalConnectionError: if the message contains physical
+ connection level errors.
+ LogicalChannelError: if the message contains logical channel
+ level errors.
+ """
+
+ parser = _MuxFramePayloadParser(message)
+ try:
+ channel_id = parser.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
+ if channel_id == _CONTROL_CHANNEL_ID:
+ self._process_control_blocks(parser)
+ else:
+ self._process_logical_frame(channel_id, parser)
+
+ def notify_worker_done(self, channel_id):
+ """Called when a worker has finished.
+
+ Args:
+ channel_id: channel id corresponded with the worker.
+ """
+
+ self._logger.debug('Worker for channel id %d terminated' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ raise MuxUnexpectedException(
+ 'Channel id %d not found' % channel_id)
+ channel_data = self._logical_channels.pop(channel_id)
+ finally:
+ self._worker_done_notify_received = True
+ self._logical_channels_condition.notify()
+ self._logical_channels_condition.release()
+
+ if not channel_data.request.server_terminated:
+ self._send_drop_channel(
+ channel_id, code=channel_data.drop_code,
+ message=channel_data.drop_message)
+
+ def notify_reader_done(self):
+ """This method is called by the reader thread when the reader has
+ finished.
+ """
+
+ # Terminate all logical connections
+ self._logger.debug('termiating all logical connections...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ except Exception:
+ pass
+ self._logical_channels_condition.release()
+
+ def fail_physical_connection(self, code, message):
+ """Fail the physical connection.
+
+ Args:
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing the physical connection...')
+ self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
+ self.physical_stream.close_connection(
+ common.STATUS_INTERNAL_ENDPOINT_ERROR)
+
+ def fail_logical_channel(self, channel_id, code, message):
+ """Fail a logical channel.
+
+ Args:
+ channel_id: channel id.
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing logical channel %d...' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ # Close the logical channel. notify_worker_done() will be
+ # called later and it will send DropChannel.
+ channel_data.drop_code = code
+ channel_data.drop_message = message
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ else:
+ self._send_drop_channel(channel_id, code, message)
+ finally:
+ self._logical_channels_condition.release()
+
+
+def use_mux(request):
+ return hasattr(request, 'mux') and request.mux
+
+
+def start(request, dispatcher):
+ mux_handler = _MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ _INITIAL_QUOTA_FOR_CLIENT)
+
+ mux_handler.wait_until_done()
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/standalone.py b/pyload/lib/mod_pywebsocket/standalone.py
new file mode 100755
index 000000000..07a33d9c9
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/standalone.py
@@ -0,0 +1,998 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Standalone WebSocket server.
+
+Use this file to launch pywebsocket without Apache HTTP Server.
+
+
+BASIC USAGE
+
+Go to the src directory and run
+
+ $ python mod_pywebsocket/standalone.py [-p <ws_port>]
+ [-w <websock_handlers>]
+ [-d <document_root>]
+
+<ws_port> is the port number to use for ws:// connection.
+
+<document_root> is the path to the root directory of HTML files.
+
+<websock_handlers> is the path to the root directory of WebSocket handlers.
+If not specified, <document_root> will be used. See __init__.py (or
+run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
+
+For more detail and other options, run
+
+ $ python mod_pywebsocket/standalone.py --help
+
+or see _build_option_parser method below.
+
+For trouble shooting, adding "--log_level debug" might help you.
+
+
+TRY DEMO
+
+Go to the src directory and run
+
+ $ python standalone.py -d example
+
+to launch pywebsocket with the sample handler and html on port 80. Open
+http://localhost/console.html, click the connect button, type something into
+the text box next to the send button and click the send button. If everything
+is working, you'll see the message you typed echoed by the server.
+
+
+SUPPORTING TLS
+
+To support TLS, run standalone.py with -t, -k, and -c options.
+
+
+SUPPORTING CLIENT AUTHENTICATION
+
+To support client authentication with TLS, run standalone.py with -t, -k, -c,
+and --tls-client-auth, and --tls-client-ca options.
+
+E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k
+../test/cert/key.pem --tls-client-auth --tls-client-ca=../test/cert/cacert.pem
+
+
+CONFIGURATION FILE
+
+You can also write a configuration file and use it by specifying the path to
+the configuration file by --config option. Please write a configuration file
+following the documentation of the Python ConfigParser library. Name of each
+entry must be the long version argument name. E.g. to set log level to debug,
+add the following line:
+
+log_level=debug
+
+For options which doesn't take value, please add some fake value. E.g. for
+--tls option, add the following line:
+
+tls=True
+
+Note that tls will be enabled even if you write tls=False as the value part is
+fake.
+
+When both a command line argument and a configuration file entry are set for
+the same configuration item, the command line value will override one in the
+configuration file.
+
+
+THREADING
+
+This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
+used for each request.
+
+
+SECURITY WARNING
+
+This uses CGIHTTPServer and CGIHTTPServer is not secure.
+It may execute arbitrary Python code or external programs. It should not be
+used outside a firewall.
+"""
+
+import BaseHTTPServer
+import CGIHTTPServer
+import SimpleHTTPServer
+import SocketServer
+import ConfigParser
+import base64
+import httplib
+import logging
+import logging.handlers
+import optparse
+import os
+import re
+import select
+import socket
+import sys
+import threading
+import time
+
+_HAS_SSL = False
+_HAS_OPEN_SSL = False
+try:
+ import ssl
+ _HAS_SSL = True
+except ImportError:
+ try:
+ import OpenSSL.SSL
+ _HAS_OPEN_SSL = True
+ except ImportError:
+ pass
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import http_header_util
+from mod_pywebsocket import memorizingfile
+from mod_pywebsocket import util
+
+
+_DEFAULT_LOG_MAX_BYTES = 1024 * 256
+_DEFAULT_LOG_BACKUP_COUNT = 5
+
+_DEFAULT_REQUEST_QUEUE_SIZE = 128
+
+# 1024 is practically large enough to contain WebSocket handshake lines.
+_MAX_MEMORIZED_LINES = 1024
+
+
+class _StandaloneConnection(object):
+ """Mimic mod_python mp_conn."""
+
+ def __init__(self, request_handler):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._request_handler = request_handler
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return (self._request_handler.server.server_name,
+ self._request_handler.server.server_port)
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr.
+
+ Setting the property in __init__ won't work because the request
+ handler is not initialized yet there."""
+
+ return self._request_handler.client_address
+ remote_addr = property(get_remote_addr)
+
+ def write(self, data):
+ """Mimic mp_conn.write()."""
+
+ return self._request_handler.wfile.write(data)
+
+ def read(self, length):
+ """Mimic mp_conn.read()."""
+
+ return self._request_handler.rfile.read(length)
+
+ def get_memorized_lines(self):
+ """Get memorized lines."""
+
+ return self._request_handler.rfile.get_memorized_lines()
+
+
+class _StandaloneRequest(object):
+ """Mimic mod_python request."""
+
+ def __init__(self, request_handler, use_tls):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request_handler = request_handler
+ self.connection = _StandaloneConnection(request_handler)
+ self._use_tls = use_tls
+ self.headers_in = request_handler.headers
+
+ def get_uri(self):
+ """Getter to mimic request.uri."""
+
+ return self._request_handler.path
+ uri = property(get_uri)
+
+ def get_method(self):
+ """Getter to mimic request.method."""
+
+ return self._request_handler.command
+ method = property(get_method)
+
+ def get_protocol(self):
+ """Getter to mimic request.protocol."""
+
+ return self._request_handler.request_version
+ protocol = property(get_protocol)
+
+ def is_https(self):
+ """Mimic request.is_https()."""
+
+ return self._use_tls
+
+ def _drain_received_data(self):
+ """Don't use this method from WebSocket handler. Drains unread data
+ in the receive buffer.
+ """
+
+ raw_socket = self._request_handler.connection
+ drained_data = util.drain_received_data(raw_socket)
+
+ if drained_data:
+ self._logger.debug(
+ 'Drained data following close frame: %r', drained_data)
+
+
+class _StandaloneSSLConnection(object):
+ """A wrapper class for OpenSSL.SSL.Connection to provide makefile method
+ which is not supported by the class.
+ """
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def __getattribute__(self, name):
+ if name in ('_connection', 'makefile'):
+ return object.__getattribute__(self, name)
+ return self._connection.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in ('_connection', 'makefile'):
+ return object.__setattr__(self, name, value)
+ return self._connection.__setattr__(name, value)
+
+ def makefile(self, mode='r', bufsize=-1):
+ return socket._fileobject(self._connection, mode, bufsize)
+
+
+def _alias_handlers(dispatcher, websock_handlers_map_file):
+ """Set aliases specified in websock_handler_map_file in dispatcher.
+
+ Args:
+ dispatcher: dispatch.Dispatcher instance
+ websock_handler_map_file: alias map file
+ """
+
+ fp = open(websock_handlers_map_file)
+ try:
+ for line in fp:
+ if line[0] == '#' or line.isspace():
+ continue
+ m = re.match('(\S+)\s+(\S+)', line)
+ if not m:
+ logging.warning('Wrong format in map file:' + line)
+ continue
+ try:
+ dispatcher.add_resource_path_alias(
+ m.group(1), m.group(2))
+ except dispatch.DispatchException, e:
+ logging.error(str(e))
+ finally:
+ fp.close()
+
+
+class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """HTTPServer specialized for WebSocket."""
+
+ # Overrides SocketServer.ThreadingMixIn.daemon_threads
+ daemon_threads = True
+ # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
+ allow_reuse_address = True
+
+ def __init__(self, options):
+ """Override SocketServer.TCPServer.__init__ to set SSL enabled
+ socket object to self.socket before server_bind and server_activate,
+ if necessary.
+ """
+
+ # Share a Dispatcher among request handlers to save time for
+ # instantiation. Dispatcher can be shared because it is thread-safe.
+ options.dispatcher = dispatch.Dispatcher(
+ options.websock_handlers,
+ options.scan_dir,
+ options.allow_handlers_outside_root_dir)
+ if options.websock_handlers_map_file:
+ _alias_handlers(options.dispatcher,
+ options.websock_handlers_map_file)
+ warnings = options.dispatcher.source_warnings()
+ if warnings:
+ for warning in warnings:
+ logging.warning('mod_pywebsocket: %s' % warning)
+
+ self._logger = util.get_class_logger(self)
+
+ self.request_queue_size = options.request_queue_size
+ self.__ws_is_shut_down = threading.Event()
+ self.__ws_serving = False
+
+ SocketServer.BaseServer.__init__(
+ self, (options.server_host, options.port), WebSocketRequestHandler)
+
+ # Expose the options object to allow handler objects access it. We name
+ # it with websocket_ prefix to avoid conflict.
+ self.websocket_server_options = options
+
+ self._create_sockets()
+ self.server_bind()
+ self.server_activate()
+
+ def _create_sockets(self):
+ self.server_name, self.server_port = self.server_address
+ self._sockets = []
+ if not self.server_name:
+ # On platforms that doesn't support IPv6, the first bind fails.
+ # On platforms that supports IPv6
+ # - If it binds both IPv4 and IPv6 on call with AF_INET6, the
+ # first bind succeeds and the second fails (we'll see 'Address
+ # already in use' error).
+ # - If it binds only IPv6 on call with AF_INET6, both call are
+ # expected to succeed to listen both protocol.
+ addrinfo_array = [
+ (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
+ (socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
+ else:
+ addrinfo_array = socket.getaddrinfo(self.server_name,
+ self.server_port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ socket.IPPROTO_TCP)
+ for addrinfo in addrinfo_array:
+ self._logger.info('Create socket on: %r', addrinfo)
+ family, socktype, proto, canonname, sockaddr = addrinfo
+ try:
+ socket_ = socket.socket(family, socktype)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ continue
+ if self.websocket_server_options.use_tls:
+ if _HAS_SSL:
+ if self.websocket_server_options.tls_client_auth:
+ client_cert_ = ssl.CERT_REQUIRED
+ else:
+ client_cert_ = ssl.CERT_NONE
+ socket_ = ssl.wrap_socket(socket_,
+ keyfile=self.websocket_server_options.private_key,
+ certfile=self.websocket_server_options.certificate,
+ ssl_version=ssl.PROTOCOL_SSLv23,
+ ca_certs=self.websocket_server_options.tls_client_ca,
+ cert_reqs=client_cert_)
+ if _HAS_OPEN_SSL:
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(
+ self.websocket_server_options.private_key)
+ ctx.use_certificate_file(
+ self.websocket_server_options.certificate)
+ socket_ = OpenSSL.SSL.Connection(ctx, socket_)
+ self._sockets.append((socket_, addrinfo))
+
+ def server_bind(self):
+ """Override SocketServer.TCPServer.server_bind to enable multiple
+ sockets bind.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Bind on: %r', addrinfo)
+ if self.allow_reuse_address:
+ socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ socket_.bind(self.server_address)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+ if self.server_address[1] == 0:
+ # The operating system assigns the actual port number for port
+ # number 0. This case, the second and later sockets should use
+ # the same port number. Also self.server_port is rewritten
+ # because it is exported, and will be used by external code.
+ self.server_address = (
+ self.server_name, socket_.getsockname()[1])
+ self.server_port = self.server_address[1]
+ self._logger.info('Port %r is assigned', self.server_port)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ def server_activate(self):
+ """Override SocketServer.TCPServer.server_activate to enable multiple
+ sockets listen.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Listen on: %r', addrinfo)
+ try:
+ socket_.listen(self.request_queue_size)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ if len(self._sockets) == 0:
+ self._logger.critical(
+ 'No sockets activated. Use info log level to see the reason.')
+
+ def server_close(self):
+ """Override SocketServer.TCPServer.server_close to enable multiple
+ sockets close.
+ """
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Close on: %r', addrinfo)
+ socket_.close()
+
+ def fileno(self):
+ """Override SocketServer.TCPServer.fileno."""
+
+ self._logger.critical('Not supported: fileno')
+ return self._sockets[0][0].fileno()
+
+ def handle_error(self, rquest, client_address):
+ """Override SocketServer.handle_error."""
+
+ self._logger.error(
+ 'Exception in processing request from: %r\n%s',
+ client_address,
+ util.get_stack_trace())
+ # Note: client_address is a tuple.
+
+ def get_request(self):
+ """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
+ object with _StandaloneSSLConnection to provide makefile method. We
+ cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
+ attribute.
+ """
+
+ accepted_socket, client_address = self.socket.accept()
+ if self.websocket_server_options.use_tls and _HAS_OPEN_SSL:
+ accepted_socket = _StandaloneSSLConnection(accepted_socket)
+ return accepted_socket, client_address
+
+ def serve_forever(self, poll_interval=0.5):
+ """Override SocketServer.BaseServer.serve_forever."""
+
+ self.__ws_serving = True
+ self.__ws_is_shut_down.clear()
+ handle_request = self.handle_request
+ if hasattr(self, '_handle_request_noblock'):
+ handle_request = self._handle_request_noblock
+ else:
+ self._logger.warning('Fallback to blocking request handler')
+ try:
+ while self.__ws_serving:
+ r, w, e = select.select(
+ [socket_[0] for socket_ in self._sockets],
+ [], [], poll_interval)
+ for socket_ in r:
+ self.socket = socket_
+ handle_request()
+ self.socket = None
+ finally:
+ self.__ws_is_shut_down.set()
+
+ def shutdown(self):
+ """Override SocketServer.BaseServer.shutdown."""
+
+ self.__ws_serving = False
+ self.__ws_is_shut_down.wait()
+
+
+class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
+ """CGIHTTPRequestHandler specialized for WebSocket."""
+
+ # Use httplib.HTTPMessage instead of mimetools.Message.
+ MessageClass = httplib.HTTPMessage
+
+ def setup(self):
+ """Override SocketServer.StreamRequestHandler.setup to wrap rfile
+ with MemorizingFile.
+
+ This method will be called by BaseRequestHandler's constructor
+ before calling BaseHTTPRequestHandler.handle.
+ BaseHTTPRequestHandler.handle will call
+ BaseHTTPRequestHandler.handle_one_request and it will call
+ WebSocketRequestHandler.parse_request.
+ """
+
+ # Call superclass's setup to prepare rfile, wfile, etc. See setup
+ # definition on the root class SocketServer.StreamRequestHandler to
+ # understand what this does.
+ CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
+
+ self.rfile = memorizingfile.MemorizingFile(
+ self.rfile,
+ max_memorized_lines=_MAX_MEMORIZED_LINES)
+
+ def __init__(self, request, client_address, server):
+ self._logger = util.get_class_logger(self)
+
+ self._options = server.websocket_server_options
+
+ # Overrides CGIHTTPServerRequestHandler.cgi_directories.
+ self.cgi_directories = self._options.cgi_directories
+ # Replace CGIHTTPRequestHandler.is_executable method.
+ if self._options.is_executable_method is not None:
+ self.is_executable = self._options.is_executable_method
+
+ # This actually calls BaseRequestHandler.__init__.
+ CGIHTTPServer.CGIHTTPRequestHandler.__init__(
+ self, request, client_address, server)
+
+ def parse_request(self):
+ """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
+
+ Return True to continue processing for HTTP(S), False otherwise.
+
+ See BaseHTTPRequestHandler.handle_one_request method which calls
+ this method to understand how the return value will be handled.
+ """
+
+ # We hook parse_request method, but also call the original
+ # CGIHTTPRequestHandler.parse_request since when we return False,
+ # CGIHTTPRequestHandler.handle_one_request continues processing and
+ # it needs variables set by CGIHTTPRequestHandler.parse_request.
+ #
+ # Variables set by this method will be also used by WebSocket request
+ # handling (self.path, self.command, self.requestline, etc. See also
+ # how _StandaloneRequest's members are implemented using these
+ # attributes).
+ if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
+ return False
+
+ if self._options.use_basic_auth:
+ auth = self.headers.getheader('Authorization')
+ if auth != self._options.basic_auth_credential:
+ self.send_response(401)
+ self.send_header('WWW-Authenticate',
+ 'Basic realm="Pywebsocket"')
+ self.end_headers()
+ self._logger.info('Request basic authentication')
+ return True
+
+ host, port, resource = http_header_util.parse_uri(self.path)
+ if resource is None:
+ self._logger.info('Invalid URI: %r', self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ server_options = self.server.websocket_server_options
+ if host is not None:
+ validation_host = server_options.validation_host
+ if validation_host is not None and host != validation_host:
+ self._logger.info('Invalid host: %r (expected: %r)',
+ host,
+ validation_host)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ if port is not None:
+ validation_port = server_options.validation_port
+ if validation_port is not None and port != validation_port:
+ self._logger.info('Invalid port: %r (expected: %r)',
+ port,
+ validation_port)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ self.path = resource
+
+ request = _StandaloneRequest(self, self._options.use_tls)
+
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not self._options.dispatcher.get_handler_suite(self.path):
+ self._logger.info('No handler for resource: %r',
+ self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ except dispatch.DispatchException, e:
+ self._logger.info('%s', e)
+ self.send_error(e.status)
+ return False
+
+ # If any Exceptions without except clause setup (including
+ # DispatchException) is raised below this point, it will be caught
+ # and logged by WebSocketServer.
+
+ try:
+ try:
+ handshake.do_handshake(
+ request,
+ self._options.dispatcher,
+ allowDraft75=self._options.allow_draft75,
+ strict=self._options.strict)
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self.send_response(common.HTTP_STATUS_BAD_REQUEST)
+ self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ self.end_headers()
+ return False
+ except handshake.HandshakeException, e:
+ # Handshake for ws(s) failed.
+ self._logger.info('%s', e)
+ self.send_error(e.status)
+ return False
+
+ request._dispatcher = self._options.dispatcher
+ self._options.dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ return False
+
+ def log_request(self, code='-', size='-'):
+ """Override BaseHTTPServer.log_request."""
+
+ self._logger.info('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, *args):
+ """Override BaseHTTPServer.log_error."""
+
+ # Despite the name, this method is for warnings than for errors.
+ # For example, HTTP status code is logged by this method.
+ self._logger.warning('%s - %s',
+ self.address_string(),
+ args[0] % args[1:])
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Add extra check that self.path doesn't contains ..
+ Also check if the file is a executable file or not.
+ If the file is not executable, it is handled as static file or dir
+ rather than a CGI script.
+ """
+
+ if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
+ if '..' in self.path:
+ return False
+ # strip query parameter from request path
+ resource_name = self.path.split('?', 2)[0]
+ # convert resource_name into real path name in filesystem.
+ scriptfile = self.translate_path(resource_name)
+ if not os.path.isfile(scriptfile):
+ return False
+ if not self.is_executable(scriptfile):
+ return False
+ return True
+ return False
+
+
+def _get_logger_from_class(c):
+ return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
+
+
+def _configure_logging(options):
+ logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.getLevelName(options.log_level.upper()))
+ if options.log_file:
+ handler = logging.handlers.RotatingFileHandler(
+ options.log_file, 'a', options.log_max, options.log_count)
+ else:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter(
+ '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ deflate_log_level_name = logging.getLevelName(
+ options.deflate_log_level.upper())
+ _get_logger_from_class(util._Deflater).setLevel(
+ deflate_log_level_name)
+ _get_logger_from_class(util._Inflater).setLevel(
+ deflate_log_level_name)
+
+
+def _build_option_parser():
+ parser = optparse.OptionParser()
+
+ parser.add_option('--config', dest='config_file', type='string',
+ default=None,
+ help=('Path to configuration file. See the file comment '
+ 'at the top of this file for the configuration '
+ 'file format'))
+ parser.add_option('-H', '--server-host', '--server_host',
+ dest='server_host',
+ default='',
+ help='server hostname to listen to')
+ parser.add_option('-V', '--validation-host', '--validation_host',
+ dest='validation_host',
+ default=None,
+ help='server hostname to validate in absolute path.')
+ parser.add_option('-p', '--port', dest='port', type='int',
+ default=common.DEFAULT_WEB_SOCKET_PORT,
+ help='port to listen to')
+ parser.add_option('-P', '--validation-port', '--validation_port',
+ dest='validation_port', type='int',
+ default=None,
+ help='server port to validate in absolute path.')
+ parser.add_option('-w', '--websock-handlers', '--websock_handlers',
+ dest='websock_handlers',
+ default='.',
+ help=('The root directory of WebSocket handler files. '
+ 'If the path is relative, --document-root is used '
+ 'as the base.'))
+ parser.add_option('-m', '--websock-handlers-map-file',
+ '--websock_handlers_map_file',
+ dest='websock_handlers_map_file',
+ default=None,
+ help=('WebSocket handlers map file. '
+ 'Each line consists of alias_resource_path and '
+ 'existing_resource_path, separated by spaces.'))
+ parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
+ default=None,
+ help=('Must be a directory under --websock-handlers. '
+ 'Only handlers under this directory are scanned '
+ 'and registered to the server. '
+ 'Useful for saving scan time when the handler '
+ 'root directory contains lots of files that are '
+ 'not handler file or are handler files but you '
+ 'don\'t want them to be registered. '))
+ parser.add_option('--allow-handlers-outside-root-dir',
+ '--allow_handlers_outside_root_dir',
+ dest='allow_handlers_outside_root_dir',
+ action='store_true',
+ default=False,
+ help=('Scans WebSocket handlers even if their canonical '
+ 'path is not under --websock-handlers.'))
+ parser.add_option('-d', '--document-root', '--document_root',
+ dest='document_root', default='.',
+ help='Document root directory.')
+ parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
+ default=None,
+ help=('CGI paths relative to document_root.'
+ 'Comma-separated. (e.g -x /cgi,/htbin) '
+ 'Files under document_root/cgi_path are handled '
+ 'as CGI programs. Must be executable.'))
+ parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
+ default=False, help='use TLS (wss://)')
+ parser.add_option('-k', '--private-key', '--private_key',
+ dest='private_key',
+ default='', help='TLS private key file.')
+ parser.add_option('-c', '--certificate', dest='certificate',
+ default='', help='TLS certificate file.')
+ parser.add_option('--tls-client-auth', dest='tls_client_auth',
+ action='store_true', default=False,
+ help='Requires TLS client auth on every connection.')
+ parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
+ help=('Specifies a pem file which contains a set of '
+ 'concatenated CA certificates which are used to '
+ 'validate certificates passed from clients'))
+ parser.add_option('--basic-auth', dest='use_basic_auth',
+ action='store_true', default=False,
+ help='Requires Basic authentication.')
+ parser.add_option('--basic-auth-credential',
+ dest='basic_auth_credential', default='test:test',
+ help='Specifies the credential of basic authentication '
+ 'by username:password pair (e.g. test:test).')
+ parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
+ default='', help='Log file.')
+ # Custom log level:
+ # - FINE: Prints status of each frame processing step
+ parser.add_option('--log-level', '--log_level', type='choice',
+ dest='log_level', default='warn',
+ choices=['fine',
+ 'debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level.')
+ parser.add_option('--deflate-log-level', '--deflate_log_level',
+ type='choice',
+ dest='deflate_log_level', default='warn',
+ choices=['debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level for _Deflater and _Inflater.')
+ parser.add_option('--thread-monitor-interval-in-sec',
+ '--thread_monitor_interval_in_sec',
+ dest='thread_monitor_interval_in_sec',
+ type='int', default=-1,
+ help=('If positive integer is specified, run a thread '
+ 'monitor to show the status of server threads '
+ 'periodically in the specified inteval in '
+ 'second. If non-positive integer is specified, '
+ 'disable the thread monitor.'))
+ parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
+ default=_DEFAULT_LOG_MAX_BYTES,
+ help='Log maximum bytes')
+ parser.add_option('--log-count', '--log_count', dest='log_count',
+ type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
+ help='Log backup count')
+ parser.add_option('--allow-draft75', dest='allow_draft75',
+ action='store_true', default=False,
+ help='Obsolete option. Ignored.')
+ parser.add_option('--strict', dest='strict', action='store_true',
+ default=False, help='Obsolete option. Ignored.')
+ parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
+ default=_DEFAULT_REQUEST_QUEUE_SIZE,
+ help='request queue size')
+
+ return parser
+
+
+class ThreadMonitor(threading.Thread):
+ daemon = True
+
+ def __init__(self, interval_in_sec):
+ threading.Thread.__init__(self, name='ThreadMonitor')
+
+ self._logger = util.get_class_logger(self)
+
+ self._interval_in_sec = interval_in_sec
+
+ def run(self):
+ while True:
+ thread_name_list = []
+ for thread in threading.enumerate():
+ thread_name_list.append(thread.name)
+ self._logger.info(
+ "%d active threads: %s",
+ threading.active_count(),
+ ', '.join(thread_name_list))
+ time.sleep(self._interval_in_sec)
+
+
+def _parse_args_and_config(args):
+ parser = _build_option_parser()
+
+ # First, parse options without configuration file.
+ temporary_options, temporary_args = parser.parse_args(args=args)
+ if temporary_args:
+ logging.critical(
+ 'Unrecognized positional arguments: %r', temporary_args)
+ sys.exit(1)
+
+ if temporary_options.config_file:
+ try:
+ config_fp = open(temporary_options.config_file, 'r')
+ except IOError, e:
+ logging.critical(
+ 'Failed to open configuration file %r: %r',
+ temporary_options.config_file,
+ e)
+ sys.exit(1)
+
+ config_parser = ConfigParser.SafeConfigParser()
+ config_parser.readfp(config_fp)
+ config_fp.close()
+
+ args_from_config = []
+ for name, value in config_parser.items('pywebsocket'):
+ args_from_config.append('--' + name)
+ args_from_config.append(value)
+ if args is None:
+ args = args_from_config
+ else:
+ args = args_from_config + args
+ return parser.parse_args(args=args)
+ else:
+ return temporary_options, temporary_args
+
+
+def _main(args=None):
+ """You can call this function from your own program, but please note that
+ this function has some side-effects that might affect your program. For
+ example, util.wrap_popen3_for_win use in this method replaces implementation
+ of os.popen3.
+ """
+
+ options, args = _parse_args_and_config(args=args)
+
+ os.chdir(options.document_root)
+
+ _configure_logging(options)
+
+ # TODO(tyoshino): Clean up initialization of CGI related values. Move some
+ # of code here to WebSocketRequestHandler class if it's better.
+ options.cgi_directories = []
+ options.is_executable_method = None
+ if options.cgi_paths:
+ options.cgi_directories = options.cgi_paths.split(',')
+ if sys.platform in ('cygwin', 'win32'):
+ cygwin_path = None
+ # For Win32 Python, it is expected that CYGWIN_PATH
+ # is set to a directory of cygwin binaries.
+ # For example, websocket_server.py in Chromium sets CYGWIN_PATH to
+ # full path of third_party/cygwin/bin.
+ if 'CYGWIN_PATH' in os.environ:
+ cygwin_path = os.environ['CYGWIN_PATH']
+ util.wrap_popen3_for_win(cygwin_path)
+
+ def __check_script(scriptpath):
+ return util.get_script_interp(scriptpath, cygwin_path)
+
+ options.is_executable_method = __check_script
+
+ if options.use_tls:
+ if not (_HAS_SSL or _HAS_OPEN_SSL):
+ logging.critical('TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ if not options.private_key or not options.certificate:
+ logging.critical(
+ 'To use TLS, specify private_key and certificate.')
+ sys.exit(1)
+
+ if options.tls_client_auth:
+ if not options.use_tls:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+ if not _HAS_SSL:
+ logging.critical('Client authentication requires ssl module.')
+
+ if not options.scan_dir:
+ options.scan_dir = options.websock_handlers
+
+ if options.use_basic_auth:
+ options.basic_auth_credential = 'Basic ' + base64.b64encode(
+ options.basic_auth_credential)
+
+ try:
+ if options.thread_monitor_interval_in_sec > 0:
+ # Run a thread monitor to show the status of server threads for
+ # debugging.
+ ThreadMonitor(options.thread_monitor_interval_in_sec).start()
+
+ server = WebSocketServer(options)
+ server.serve_forever()
+ except Exception, e:
+ logging.critical('mod_pywebsocket: %s' % e)
+ logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ _main(sys.argv[1:])
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/stream.py b/pyload/lib/mod_pywebsocket/stream.py
new file mode 100644
index 000000000..edc533279
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/stream.py
@@ -0,0 +1,57 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file exports public symbols.
+"""
+
+
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hixie75 import StreamHixie75
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+
+# These methods are intended to be used by WebSocket client developers to have
+# their implementations receive broken data in tests.
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import create_ping_frame
+from mod_pywebsocket._stream_hybi import create_pong_frame
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_text_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/mod_pywebsocket/util.py b/pyload/lib/mod_pywebsocket/util.py
new file mode 100644
index 000000000..7bb0b5d9e
--- /dev/null
+++ b/pyload/lib/mod_pywebsocket/util.py
@@ -0,0 +1,515 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket utilities.
+"""
+
+
+import array
+import errno
+
+# Import hash classes from a module available and recommended for each Python
+# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
+# hashlib module in Python 2.6.
+try:
+ import hashlib
+ md5_hash = hashlib.md5
+ sha1_hash = hashlib.sha1
+except ImportError:
+ import md5
+ import sha
+ md5_hash = md5.md5
+ sha1_hash = sha.sha
+
+import StringIO
+import logging
+import os
+import re
+import socket
+import traceback
+import zlib
+
+
+def get_stack_trace():
+ """Get the current stack trace as string.
+
+ This is needed to support Python 2.3.
+ TODO: Remove this when we only support Python 2.4 and above.
+ Use traceback.format_exc instead.
+ """
+
+ out = StringIO.StringIO()
+ traceback.print_exc(file=out)
+ return out.getvalue()
+
+
+def prepend_message_to_exception(message, exc):
+ """Prepend message to the exception."""
+
+ exc.args = (message + str(exc),)
+ return
+
+
+def __translate_interp(interp, cygwin_path):
+ """Translate interp program path for Win32 python to run cygwin program
+ (e.g. perl). Note that it doesn't support path that contains space,
+ which is typically true for Unix, where #!-script is written.
+ For Win32 python, cygwin_path is a directory of cygwin binaries.
+
+ Args:
+ interp: interp command line
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ translated interp command line.
+ """
+ if not cygwin_path:
+ return interp
+ m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
+ if m:
+ cmd = os.path.join(cygwin_path, m.group(1))
+ return cmd + m.group(2)
+ return interp
+
+
+def get_script_interp(script_path, cygwin_path=None):
+ """Gets #!-interpreter command line from the script.
+
+ It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
+ it could run "/usr/bin/perl -wT hello.pl".
+ When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
+ "/usr/bin/perl" to "<cygwin_path>\perl.exe".
+
+ Args:
+ script_path: pathname of the script
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ #!-interpreter command line, or None if it is not #!-script.
+ """
+ fp = open(script_path)
+ line = fp.readline()
+ fp.close()
+ m = re.match('^#!(.*)', line)
+ if m:
+ return __translate_interp(m.group(1), cygwin_path)
+ return None
+
+
+def wrap_popen3_for_win(cygwin_path):
+ """Wrap popen3 to support #!-script on Windows.
+
+ Args:
+ cygwin_path: path for cygwin binary if command path is needed to be
+ translated. None if no translation required.
+ """
+
+ __orig_popen3 = os.popen3
+
+ def __wrap_popen3(cmd, mode='t', bufsize=-1):
+ cmdline = cmd.split(' ')
+ interp = get_script_interp(cmdline[0], cygwin_path)
+ if interp:
+ cmd = interp + ' ' + cmd
+ return __orig_popen3(cmd, mode, bufsize)
+
+ os.popen3 = __wrap_popen3
+
+
+def hexify(s):
+ return ' '.join(map(lambda x: '%02x' % ord(x), s))
+
+
+def get_class_logger(o):
+ return logging.getLogger(
+ '%s.%s' % (o.__class__.__module__, o.__class__.__name__))
+
+
+class NoopMasker(object):
+ """A masking object that has the same interface as RepeatedXorMasker but
+ just returns the string passed in without making any change.
+ """
+
+ def __init__(self):
+ pass
+
+ def mask(self, s):
+ return s
+
+
+class RepeatedXorMasker(object):
+ """A masking object that applies XOR on the string given to mask method
+ with the masking bytes given to the constructor repeatedly. This object
+ remembers the position in the masking bytes the last mask method call
+ ended and resumes from that point on the next mask method call.
+ """
+
+ def __init__(self, mask):
+ self._mask = map(ord, mask)
+ self._mask_size = len(self._mask)
+ self._count = 0
+
+ def mask(self, s):
+ result = array.array('B')
+ result.fromstring(s)
+ # Use temporary local variables to eliminate the cost to access
+ # attributes
+ count = self._count
+ mask = self._mask
+ mask_size = self._mask_size
+ for i in xrange(len(result)):
+ result[i] ^= mask[count]
+ count = (count + 1) % mask_size
+ self._count = count
+
+ return result.tostring()
+
+
+class DeflateRequest(object):
+ """A wrapper class for request object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ def __init__(self, request):
+ self._request = request
+ self.connection = DeflateConnection(request.connection)
+
+ def __getattribute__(self, name):
+ if name in ('_request', 'connection'):
+ return object.__getattribute__(self, name)
+ return self._request.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in ('_request', 'connection'):
+ return object.__setattr__(self, name, value)
+ return self._request.__setattr__(name, value)
+
+
+# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
+# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
+# deflate library. DICTID won't be added as far as we don't set dictionary.
+# LZ77 window of 32K will be used for both compression and decompression.
+# For decompression, we can just use 32K to cover any windows size. For
+# compression, we use 32K so receivers must use 32K.
+#
+# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
+# to decode.
+#
+# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
+# Python. See also RFC1950 (ZLIB 3.3).
+
+
+class _Deflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+
+ self._compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
+
+ def compress(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_flush(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_finish(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_FINISH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+class _Inflater(object):
+
+ def __init__(self):
+ self._logger = get_class_logger(self)
+
+ self._unconsumed = ''
+
+ self.reset()
+
+ def decompress(self, size):
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+
+ while True:
+ if size == -1:
+ data += self._decompress.decompress(self._unconsumed)
+ # See Python bug http://bugs.python.org/issue12050 to
+ # understand why the same code cannot be used for updating
+ # self._unconsumed for here and else block.
+ self._unconsumed = ''
+ else:
+ data += self._decompress.decompress(
+ self._unconsumed, size - len(data))
+ self._unconsumed = self._decompress.unconsumed_tail
+ if self._decompress.unused_data:
+ # Encountered a last block (i.e. a block with BFINAL = 1) and
+ # found a new stream (unused_data). We cannot use the same
+ # zlib.Decompress object for the new stream. Create a new
+ # Decompress object to decompress the new one.
+ #
+ # It's fine to ignore unconsumed_tail if unused_data is not
+ # empty.
+ self._unconsumed = self._decompress.unused_data
+ self.reset()
+ if size >= 0 and len(data) == size:
+ # data is filled. Don't call decompress again.
+ break
+ else:
+ # Re-invoke Decompress.decompress to try to decompress all
+ # available bytes before invoking read which blocks until
+ # any new byte is available.
+ continue
+ else:
+ # Here, since unused_data is empty, even if unconsumed_tail is
+ # not empty, bytes of requested length are already in data. We
+ # don't have to "continue" here.
+ break
+
+ if data:
+ self._logger.debug('Decompressed %r', data)
+ return data
+
+ def append(self, data):
+ self._logger.debug('Appended %r', data)
+ self._unconsumed += data
+
+ def reset(self):
+ self._logger.debug('Reset')
+ self._decompress = zlib.decompressobj(-zlib.MAX_WBITS)
+
+
+# Compresses/decompresses given octets using the method introduced in RFC1979.
+
+
+class _RFC1979Deflater(object):
+ """A compressor class that applies DEFLATE to given byte sequence and
+ flushes using the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits, no_context_takeover):
+ self._deflater = None
+ if window_bits is None:
+ window_bits = zlib.MAX_WBITS
+ self._window_bits = window_bits
+ self._no_context_takeover = no_context_takeover
+
+ def filter(self, bytes, flush=True, bfinal=False):
+ if self._deflater is None or (self._no_context_takeover and flush):
+ self._deflater = _Deflater(self._window_bits)
+
+ if bfinal:
+ result = self._deflater.compress_and_finish(bytes)
+ # Add a padding block with BFINAL = 0 and BTYPE = 0.
+ result = result + chr(0)
+ self._deflater = None
+ return result
+ if flush:
+ # Strip last 4 octets which is LEN and NLEN field of a
+ # non-compressed block added for Z_SYNC_FLUSH.
+ return self._deflater.compress_and_flush(bytes)[:-4]
+ return self._deflater.compress(bytes)
+
+class _RFC1979Inflater(object):
+ """A decompressor class for byte sequence compressed and flushed following
+ the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self):
+ self._inflater = _Inflater()
+
+ def filter(self, bytes):
+ # Restore stripped LEN and NLEN field of a non-compressed block added
+ # for Z_SYNC_FLUSH.
+ self._inflater.append(bytes + '\x00\x00\xff\xff')
+ return self._inflater.decompress(-1)
+
+
+class DeflateSocket(object):
+ """A wrapper class for socket object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ # Size of the buffer passed to recv to receive compressed data.
+ _RECV_SIZE = 4096
+
+ def __init__(self, socket):
+ self._socket = socket
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater()
+
+ def recv(self, size):
+ """Receives data from the socket specified on the construction up
+ to the specified size. Once any data is available, returns it even
+ if it's smaller than the specified size.
+ """
+
+ # TODO(tyoshino): Allow call with size=0. It should block until any
+ # decompressed data is available.
+ if size <= 0:
+ raise Exception('Non-positive size passed')
+ while True:
+ data = self._inflater.decompress(size)
+ if len(data) != 0:
+ return data
+
+ read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
+ if not read_data:
+ return ''
+ self._inflater.append(read_data)
+
+ def sendall(self, bytes):
+ self.send(bytes)
+
+ def send(self, bytes):
+ self._socket.sendall(self._deflater.compress_and_flush(bytes))
+ return len(bytes)
+
+
+class DeflateConnection(object):
+ """A wrapper class for request object to intercept write and read to
+ perform deflate compression and decompression transparently.
+ """
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater()
+
+ def get_remote_addr(self):
+ return self._connection.remote_addr
+ remote_addr = property(get_remote_addr)
+
+ def put_bytes(self, bytes):
+ self.write(bytes)
+
+ def read(self, size=-1):
+ """Reads at most size bytes. Blocks until there's at least one byte
+ available.
+ """
+
+ # TODO(tyoshino): Allow call with size=0.
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+ while True:
+ if size == -1:
+ data += self._inflater.decompress(-1)
+ else:
+ data += self._inflater.decompress(size - len(data))
+
+ if size >= 0 and len(data) != 0:
+ break
+
+ # TODO(tyoshino): Make this read efficient by some workaround.
+ #
+ # In 3.0.3 and prior of mod_python, read blocks until length bytes
+ # was read. We don't know the exact size to read while using
+ # deflate, so read byte-by-byte.
+ #
+ # _StandaloneRequest.read that ultimately performs
+ # socket._fileobject.read also blocks until length bytes was read
+ read_data = self._connection.read(1)
+ if not read_data:
+ break
+ self._inflater.append(read_data)
+ return data
+
+ def write(self, bytes):
+ self._connection.write(self._deflater.compress_and_flush(bytes))
+
+
+def _is_ewouldblock_errno(error_number):
+ """Returns True iff error_number indicates that receive operation would
+ block. To make this portable, we check availability of errno and then
+ compare them.
+ """
+
+ for error_name in ['WSAEWOULDBLOCK', 'EWOULDBLOCK', 'EAGAIN']:
+ if (error_name in dir(errno) and
+ error_number == getattr(errno, error_name)):
+ return True
+ return False
+
+
+def drain_received_data(raw_socket):
+ # Set the socket non-blocking.
+ original_timeout = raw_socket.gettimeout()
+ raw_socket.settimeout(0.0)
+
+ drained_data = []
+
+ # Drain until the socket is closed or no data is immediately
+ # available for read.
+ while True:
+ try:
+ data = raw_socket.recv(1)
+ if not data:
+ break
+ drained_data.append(data)
+ except socket.error, e:
+ # e can be either a pair (errno, string) or just a string (or
+ # something else) telling what went wrong. We suppress only
+ # the errors that indicates that the socket blocks. Those
+ # exceptions can be parsed as a pair (errno, string).
+ try:
+ error_number, message = e
+ except:
+ # Failed to parse socket.error.
+ raise e
+
+ if _is_ewouldblock_errno(error_number):
+ break
+ else:
+ raise e
+
+ # Rollback timeout value.
+ raw_socket.settimeout(original_timeout)
+
+ return ''.join(drained_data)
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/lib/new_collections.py b/pyload/lib/new_collections.py
new file mode 100644
index 000000000..12d05b4b9
--- /dev/null
+++ b/pyload/lib/new_collections.py
@@ -0,0 +1,375 @@
+## {{{ http://code.activestate.com/recipes/576693/ (r9)
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+ pass
+
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
+## end of http://code.activestate.com/recipes/576693/ }}}
+
+## {{{ http://code.activestate.com/recipes/500261/ (r15)
+from operator import itemgetter as _itemgetter
+from keyword import iskeyword as _iskeyword
+import sys as _sys
+
+def namedtuple(typename, field_names, verbose=False, rename=False):
+ """Returns a new subclass of tuple with named fields.
+
+ >>> Point = namedtuple('Point', 'x y')
+ >>> Point.__doc__ # docstring for the new class
+ 'Point(x, y)'
+ >>> p = Point(11, y=22) # instantiate with positional args or keywords
+ >>> p[0] + p[1] # indexable like a plain tuple
+ 33
+ >>> x, y = p # unpack like a regular tuple
+ >>> x, y
+ (11, 22)
+ >>> p.x + p.y # fields also accessable by name
+ 33
+ >>> d = p._asdict() # convert to a dictionary
+ >>> d['x']
+ 11
+ >>> Point(**d) # convert from a dictionary
+ Point(x=11, y=22)
+ >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
+ Point(x=100, y=22)
+
+ """
+
+ # Parse and validate the field names. Validation serves two purposes,
+ # generating informative error messages and preventing template injection attacks.
+ if isinstance(field_names, basestring):
+ field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
+ field_names = tuple(map(str, field_names))
+ if rename:
+ names = list(field_names)
+ seen = set()
+ for i, name in enumerate(names):
+ if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
+ or not name or name[0].isdigit() or name.startswith('_')
+ or name in seen):
+ names[i] = '_%d' % i
+ seen.add(name)
+ field_names = tuple(names)
+ for name in (typename,) + field_names:
+ if not min(c.isalnum() or c=='_' for c in name):
+ raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
+ if _iskeyword(name):
+ raise ValueError('Type names and field names cannot be a keyword: %r' % name)
+ if name[0].isdigit():
+ raise ValueError('Type names and field names cannot start with a number: %r' % name)
+ seen_names = set()
+ for name in field_names:
+ if name.startswith('_') and not rename:
+ raise ValueError('Field names cannot start with an underscore: %r' % name)
+ if name in seen_names:
+ raise ValueError('Encountered duplicate field name: %r' % name)
+ seen_names.add(name)
+
+ # Create and fill-in the class template
+ numfields = len(field_names)
+ argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
+ reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+ template = '''class %(typename)s(tuple):
+ '%(typename)s(%(argtxt)s)' \n
+ __slots__ = () \n
+ _fields = %(field_names)r \n
+ def __new__(_cls, %(argtxt)s):
+ return _tuple.__new__(_cls, (%(argtxt)s)) \n
+ @classmethod
+ def _make(cls, iterable, new=tuple.__new__, len=len):
+ 'Make a new %(typename)s object from a sequence or iterable'
+ result = new(cls, iterable)
+ if len(result) != %(numfields)d:
+ raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
+ return result \n
+ def __repr__(self):
+ return '%(typename)s(%(reprtxt)s)' %% self \n
+ def _asdict(self):
+ 'Return a new dict which maps field names to their values'
+ return dict(zip(self._fields, self)) \n
+ def _replace(_self, **kwds):
+ 'Return a new %(typename)s object replacing specified fields with new values'
+ result = _self._make(map(kwds.pop, %(field_names)r, _self))
+ if kwds:
+ raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
+ return result \n
+ def __getnewargs__(self):
+ return tuple(self) \n\n''' % locals()
+ for i, name in enumerate(field_names):
+ template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
+ if verbose:
+ print template
+
+ # Execute the template string in a temporary namespace
+ namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+ _property=property, _tuple=tuple)
+ try:
+ exec template in namespace
+ except SyntaxError, e:
+ raise SyntaxError(e.message + ':\n' + template)
+ result = namespace[typename]
+
+ # For pickling to work, the __module__ variable needs to be set to the frame
+ # where the named tuple is created. Bypass this step in enviroments where
+ # sys._getframe is not defined (Jython for example) or sys._getframe is not
+ # defined for arguments greater than 0 (IronPython).
+ try:
+ result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return result
+## end of http://code.activestate.com/recipes/500261/ }}}
diff --git a/pyload/lib/rename_process.py b/pyload/lib/rename_process.py
new file mode 100644
index 000000000..2527cef39
--- /dev/null
+++ b/pyload/lib/rename_process.py
@@ -0,0 +1,14 @@
+import sys
+
+def renameProcess(new_name):
+ """ Renames the process calling the function to the given name. """
+ if sys.platform != 'linux2':
+ return False
+ try:
+ from ctypes import CDLL
+ libc = CDLL('libc.so.6')
+ libc.prctl(15, new_name, 0, 0, 0)
+ return True
+ except Exception, e:
+ #print "Rename process failed", e
+ return False
diff --git a/pyload/lib/simplejson/__init__.py b/pyload/lib/simplejson/__init__.py
new file mode 100644
index 000000000..ef5c0db48
--- /dev/null
+++ b/pyload/lib/simplejson/__init__.py
@@ -0,0 +1,466 @@
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`simplejson` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+ >>> import simplejson as json
+ >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+ '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+ >>> print json.dumps("\"foo\bar")
+ "\"foo\bar"
+ >>> print json.dumps(u'\u1234')
+ "\u1234"
+ >>> print json.dumps('\\')
+ "\\"
+ >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
+ {"a": 0, "b": 0, "c": 0}
+ >>> from StringIO import StringIO
+ >>> io = StringIO()
+ >>> json.dump(['streaming API'], io)
+ >>> io.getvalue()
+ '["streaming API"]'
+
+Compact encoding::
+
+ >>> import simplejson as json
+ >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
+ '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+ >>> import simplejson as json
+ >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
+ >>> print '\n'.join([l.rstrip() for l in s.splitlines()])
+ {
+ "4": 5,
+ "6": 7
+ }
+
+Decoding JSON::
+
+ >>> import simplejson as json
+ >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
+ >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+ True
+ >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
+ True
+ >>> from StringIO import StringIO
+ >>> io = StringIO('["streaming API"]')
+ >>> json.load(io)[0] == 'streaming API'
+ True
+
+Specializing JSON object decoding::
+
+ >>> import simplejson as json
+ >>> def as_complex(dct):
+ ... if '__complex__' in dct:
+ ... return complex(dct['real'], dct['imag'])
+ ... return dct
+ ...
+ >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+ ... object_hook=as_complex)
+ (1+2j)
+ >>> from decimal import Decimal
+ >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
+ True
+
+Specializing JSON object encoding::
+
+ >>> import simplejson as json
+ >>> def encode_complex(obj):
+ ... if isinstance(obj, complex):
+ ... return [obj.real, obj.imag]
+ ... raise TypeError(repr(o) + " is not JSON serializable")
+ ...
+ >>> json.dumps(2 + 1j, default=encode_complex)
+ '[2.0, 1.0]'
+ >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+ '[2.0, 1.0]'
+ >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+ '[2.0, 1.0]'
+
+
+Using simplejson.tool from the shell to validate and pretty-print::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+"""
+__version__ = '2.2.1'
+__all__ = [
+ 'dump', 'dumps', 'load', 'loads',
+ 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
+ 'OrderedDict',
+]
+
+__author__ = 'Bob Ippolito <bob@redivi.com>'
+
+from decimal import Decimal
+
+from decoder import JSONDecoder, JSONDecodeError
+from encoder import JSONEncoder
+def _import_OrderedDict():
+ import collections
+ try:
+ return collections.OrderedDict
+ except AttributeError:
+ import ordered_dict
+ return ordered_dict.OrderedDict
+OrderedDict = _import_OrderedDict()
+
+def _import_c_make_encoder():
+ try:
+ from simplejson._speedups import make_encoder
+ return make_encoder
+ except ImportError:
+ return None
+
+_default_encoder = JSONEncoder(
+ skipkeys=False,
+ ensure_ascii=True,
+ check_circular=True,
+ allow_nan=True,
+ indent=None,
+ separators=None,
+ encoding='utf-8',
+ default=None,
+ use_decimal=True,
+ namedtuple_as_object=True,
+ tuple_as_array=True,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, use_decimal=True,
+ namedtuple_as_object=True, tuple_as_array=True,
+ **kw):
+ """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+ ``.write()``-supporting file-like object).
+
+ If ``skipkeys`` is true then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the some chunks written to ``fp``
+ may be ``unicode`` instances, subject to normal Python ``str`` to
+ ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
+ understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
+ to cause an error.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+ in strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If *indent* is a string, then JSON array elements and object members
+ will be pretty-printed with a newline followed by that string repeated
+ for each level of nesting. ``None`` (the default) selects the most compact
+ representation without any newlines. For backwards compatibility with
+ versions of simplejson earlier than 2.1.0, an integer is also accepted
+ and is converted to a string with that many spaces.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ If *use_decimal* is true (default: ``True``) then decimal.Decimal
+ will be natively serialized to JSON with full precision.
+
+ If *namedtuple_as_object* is true (default: ``True``),
+ :class:`tuple` subclasses with ``_asdict()`` methods will be encoded
+ as JSON objects.
+
+ If *tuple_as_array* is true (default: ``True``),
+ :class:`tuple` (and subclasses) will be encoded as JSON arrays.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and use_decimal
+ and namedtuple_as_object and tuple_as_array and not kw):
+ iterable = _default_encoder.iterencode(obj)
+ else:
+ if cls is None:
+ cls = JSONEncoder
+ iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding,
+ default=default, use_decimal=use_decimal,
+ namedtuple_as_object=namedtuple_as_object,
+ tuple_as_array=tuple_as_array,
+ **kw).iterencode(obj)
+ # could accelerate with writelines in some versions of Python, at
+ # a debuggability cost
+ for chunk in iterable:
+ fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, use_decimal=True,
+ namedtuple_as_object=True,
+ tuple_as_array=True,
+ **kw):
+ """Serialize ``obj`` to a JSON formatted ``str``.
+
+ If ``skipkeys`` is false then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the return value will be a
+ ``unicode`` instance subject to normal Python ``str`` to ``unicode``
+ coercion rules instead of being escaped to an ASCII ``str``.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+ strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a string, then JSON array elements and object members
+ will be pretty-printed with a newline followed by that string repeated
+ for each level of nesting. ``None`` (the default) selects the most compact
+ representation without any newlines. For backwards compatibility with
+ versions of simplejson earlier than 2.1.0, an integer is also accepted
+ and is converted to a string with that many spaces.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ If *use_decimal* is true (default: ``True``) then decimal.Decimal
+ will be natively serialized to JSON with full precision.
+
+ If *namedtuple_as_object* is true (default: ``True``),
+ :class:`tuple` subclasses with ``_asdict()`` methods will be encoded
+ as JSON objects.
+
+ If *tuple_as_array* is true (default: ``True``),
+ :class:`tuple` (and subclasses) will be encoded as JSON arrays.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and use_decimal
+ and namedtuple_as_object and tuple_as_array and not kw):
+ return _default_encoder.encode(obj)
+ if cls is None:
+ cls = JSONEncoder
+ return cls(
+ skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding, default=default,
+ use_decimal=use_decimal,
+ namedtuple_as_object=namedtuple_as_object,
+ tuple_as_array=tuple_as_array,
+ **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(encoding=None, object_hook=None,
+ object_pairs_hook=None)
+
+
+def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, object_pairs_hook=None,
+ use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
+ **kw):
+ """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+ a JSON document) to a Python object.
+
+ *encoding* determines the encoding used to interpret any
+ :class:`str` objects decoded by this instance (``'utf-8'`` by
+ default). It has no effect when decoding :class:`unicode` objects.
+
+ Note that currently only encodings that are a superset of ASCII work,
+ strings of other encodings should be passed in as :class:`unicode`.
+
+ *object_hook*, if specified, will be called with the result of every
+ JSON object decoded and its return value will be used in place of the
+ given :class:`dict`. This can be used to provide custom
+ deserializations (e.g. to support JSON-RPC class hinting).
+
+ *object_pairs_hook* is an optional function that will be called with
+ the result of any object literal decode with an ordered list of pairs.
+ The return value of *object_pairs_hook* will be used instead of the
+ :class:`dict`. This feature can be used to implement custom decoders
+ that rely on the order that the key and value pairs are decoded (for
+ example, :func:`collections.OrderedDict` will remember the order of
+ insertion). If *object_hook* is also defined, the *object_pairs_hook*
+ takes priority.
+
+ *parse_float*, if specified, will be called with the string of every
+ JSON float to be decoded. By default, this is equivalent to
+ ``float(num_str)``. This can be used to use another datatype or parser
+ for JSON floats (e.g. :class:`decimal.Decimal`).
+
+ *parse_int*, if specified, will be called with the string of every
+ JSON int to be decoded. By default, this is equivalent to
+ ``int(num_str)``. This can be used to use another datatype or parser
+ for JSON integers (e.g. :class:`float`).
+
+ *parse_constant*, if specified, will be called with one of the
+ following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
+ can be used to raise an exception if invalid JSON numbers are
+ encountered.
+
+ If *use_decimal* is true (default: ``False``) then it implies
+ parse_float=decimal.Decimal for parity with ``dump``.
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ return loads(fp.read(),
+ encoding=encoding, cls=cls, object_hook=object_hook,
+ parse_float=parse_float, parse_int=parse_int,
+ parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
+ use_decimal=use_decimal, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, object_pairs_hook=None,
+ use_decimal=False, **kw):
+ """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+ document) to a Python object.
+
+ *encoding* determines the encoding used to interpret any
+ :class:`str` objects decoded by this instance (``'utf-8'`` by
+ default). It has no effect when decoding :class:`unicode` objects.
+
+ Note that currently only encodings that are a superset of ASCII work,
+ strings of other encodings should be passed in as :class:`unicode`.
+
+ *object_hook*, if specified, will be called with the result of every
+ JSON object decoded and its return value will be used in place of the
+ given :class:`dict`. This can be used to provide custom
+ deserializations (e.g. to support JSON-RPC class hinting).
+
+ *object_pairs_hook* is an optional function that will be called with
+ the result of any object literal decode with an ordered list of pairs.
+ The return value of *object_pairs_hook* will be used instead of the
+ :class:`dict`. This feature can be used to implement custom decoders
+ that rely on the order that the key and value pairs are decoded (for
+ example, :func:`collections.OrderedDict` will remember the order of
+ insertion). If *object_hook* is also defined, the *object_pairs_hook*
+ takes priority.
+
+ *parse_float*, if specified, will be called with the string of every
+ JSON float to be decoded. By default, this is equivalent to
+ ``float(num_str)``. This can be used to use another datatype or parser
+ for JSON floats (e.g. :class:`decimal.Decimal`).
+
+ *parse_int*, if specified, will be called with the string of every
+ JSON int to be decoded. By default, this is equivalent to
+ ``int(num_str)``. This can be used to use another datatype or parser
+ for JSON integers (e.g. :class:`float`).
+
+ *parse_constant*, if specified, will be called with one of the
+ following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
+ can be used to raise an exception if invalid JSON numbers are
+ encountered.
+
+ If *use_decimal* is true (default: ``False``) then it implies
+ parse_float=decimal.Decimal for parity with ``dump``.
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ if (cls is None and encoding is None and object_hook is None and
+ parse_int is None and parse_float is None and
+ parse_constant is None and object_pairs_hook is None
+ and not use_decimal and not kw):
+ return _default_decoder.decode(s)
+ if cls is None:
+ cls = JSONDecoder
+ if object_hook is not None:
+ kw['object_hook'] = object_hook
+ if object_pairs_hook is not None:
+ kw['object_pairs_hook'] = object_pairs_hook
+ if parse_float is not None:
+ kw['parse_float'] = parse_float
+ if parse_int is not None:
+ kw['parse_int'] = parse_int
+ if parse_constant is not None:
+ kw['parse_constant'] = parse_constant
+ if use_decimal:
+ if parse_float is not None:
+ raise TypeError("use_decimal=True implies parse_float=Decimal")
+ kw['parse_float'] = Decimal
+ return cls(encoding=encoding, **kw).decode(s)
+
+
+def _toggle_speedups(enabled):
+ import simplejson.decoder as dec
+ import simplejson.encoder as enc
+ import simplejson.scanner as scan
+ c_make_encoder = _import_c_make_encoder()
+ if enabled:
+ dec.scanstring = dec.c_scanstring or dec.py_scanstring
+ enc.c_make_encoder = c_make_encoder
+ enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
+ enc.py_encode_basestring_ascii)
+ scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
+ else:
+ dec.scanstring = dec.py_scanstring
+ enc.c_make_encoder = None
+ enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
+ scan.make_scanner = scan.py_make_scanner
+ dec.make_scanner = scan.make_scanner
+ global _default_decoder
+ _default_decoder = JSONDecoder(
+ encoding=None,
+ object_hook=None,
+ object_pairs_hook=None,
+ )
+ global _default_encoder
+ _default_encoder = JSONEncoder(
+ skipkeys=False,
+ ensure_ascii=True,
+ check_circular=True,
+ allow_nan=True,
+ indent=None,
+ separators=None,
+ encoding='utf-8',
+ default=None,
+ )
diff --git a/pyload/lib/simplejson/decoder.py b/pyload/lib/simplejson/decoder.py
new file mode 100644
index 000000000..e5496d6e7
--- /dev/null
+++ b/pyload/lib/simplejson/decoder.py
@@ -0,0 +1,421 @@
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+import struct
+
+from simplejson.scanner import make_scanner
+def _import_c_scanstring():
+ try:
+ from simplejson._speedups import scanstring
+ return scanstring
+ except ImportError:
+ return None
+c_scanstring = _import_c_scanstring()
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+def _floatconstants():
+ _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
+ # The struct module in Python 2.4 would get frexp() out of range here
+ # when an endian is specified in the format string. Fixed in Python 2.5+
+ if sys.byteorder != 'big':
+ _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+ nan, inf = struct.unpack('dd', _BYTES)
+ return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
+
+
+class JSONDecodeError(ValueError):
+ """Subclass of ValueError with the following additional properties:
+
+ msg: The unformatted error message
+ doc: The JSON document being parsed
+ pos: The start index of doc where parsing failed
+ end: The end index of doc where parsing failed (may be None)
+ lineno: The line corresponding to pos
+ colno: The column corresponding to pos
+ endlineno: The line corresponding to end (may be None)
+ endcolno: The column corresponding to end (may be None)
+
+ """
+ def __init__(self, msg, doc, pos, end=None):
+ ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
+ self.msg = msg
+ self.doc = doc
+ self.pos = pos
+ self.end = end
+ self.lineno, self.colno = linecol(doc, pos)
+ if end is not None:
+ self.endlineno, self.endcolno = linecol(doc, end)
+ else:
+ self.endlineno, self.endcolno = None, None
+
+
+def linecol(doc, pos):
+ lineno = doc.count('\n', 0, pos) + 1
+ if lineno == 1:
+ colno = pos
+ else:
+ colno = pos - doc.rindex('\n', 0, pos)
+ return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+ # Note that this function is called from _speedups
+ lineno, colno = linecol(doc, pos)
+ if end is None:
+ #fmt = '{0}: line {1} column {2} (char {3})'
+ #return fmt.format(msg, lineno, colno, pos)
+ fmt = '%s: line %d column %d (char %d)'
+ return fmt % (msg, lineno, colno, pos)
+ endlineno, endcolno = linecol(doc, end)
+ #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+ #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+ fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+ return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+ '-Infinity': NegInf,
+ 'Infinity': PosInf,
+ 'NaN': NaN,
+}
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+ '"': u'"', '\\': u'\\', '/': u'/',
+ 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
+}
+
+DEFAULT_ENCODING = "utf-8"
+
+def py_scanstring(s, end, encoding=None, strict=True,
+ _b=BACKSLASH, _m=STRINGCHUNK.match):
+ """Scan the string s for a JSON string. End is the index of the
+ character in s after the quote that started the JSON string.
+ Unescapes all valid JSON string escape sequences and raises ValueError
+ on attempt to decode an invalid string. If strict is False then literal
+ control characters are allowed in the string.
+
+ Returns a tuple of the decoded string and the index of the character in s
+ after the end quote."""
+ if encoding is None:
+ encoding = DEFAULT_ENCODING
+ chunks = []
+ _append = chunks.append
+ begin = end - 1
+ while 1:
+ chunk = _m(s, end)
+ if chunk is None:
+ raise JSONDecodeError(
+ "Unterminated string starting at", s, begin)
+ end = chunk.end()
+ content, terminator = chunk.groups()
+ # Content is contains zero or more unescaped string characters
+ if content:
+ if not isinstance(content, unicode):
+ content = unicode(content, encoding)
+ _append(content)
+ # Terminator is the end of string, a literal control character,
+ # or a backslash denoting that an escape sequence follows
+ if terminator == '"':
+ break
+ elif terminator != '\\':
+ if strict:
+ msg = "Invalid control character %r at" % (terminator,)
+ #msg = "Invalid control character {0!r} at".format(terminator)
+ raise JSONDecodeError(msg, s, end)
+ else:
+ _append(terminator)
+ continue
+ try:
+ esc = s[end]
+ except IndexError:
+ raise JSONDecodeError(
+ "Unterminated string starting at", s, begin)
+ # If not a unicode escape sequence, must be in the lookup table
+ if esc != 'u':
+ try:
+ char = _b[esc]
+ except KeyError:
+ msg = "Invalid \\escape: " + repr(esc)
+ raise JSONDecodeError(msg, s, end)
+ end += 1
+ else:
+ # Unicode escape sequence
+ esc = s[end + 1:end + 5]
+ next_end = end + 5
+ if len(esc) != 4:
+ msg = "Invalid \\uXXXX escape"
+ raise JSONDecodeError(msg, s, end)
+ uni = int(esc, 16)
+ # Check for surrogate pair on UCS-4 systems
+ if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+ msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+ if not s[end + 5:end + 7] == '\\u':
+ raise JSONDecodeError(msg, s, end)
+ esc2 = s[end + 7:end + 11]
+ if len(esc2) != 4:
+ raise JSONDecodeError(msg, s, end)
+ uni2 = int(esc2, 16)
+ uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+ next_end += 6
+ char = unichr(uni)
+ end = next_end
+ # Append the unescaped character
+ _append(char)
+ return u''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+def JSONObject((s, end), encoding, strict, scan_once, object_hook,
+ object_pairs_hook, memo=None,
+ _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ # Backwards compatibility
+ if memo is None:
+ memo = {}
+ memo_get = memo.setdefault
+ pairs = []
+ # Use a slice to prevent IndexError from being raised, the following
+ # check will raise a more specific ValueError if the string is empty
+ nextchar = s[end:end + 1]
+ # Normally we expect nextchar == '"'
+ if nextchar != '"':
+ if nextchar in _ws:
+ end = _w(s, end).end()
+ nextchar = s[end:end + 1]
+ # Trivial empty object
+ if nextchar == '}':
+ if object_pairs_hook is not None:
+ result = object_pairs_hook(pairs)
+ return result, end + 1
+ pairs = {}
+ if object_hook is not None:
+ pairs = object_hook(pairs)
+ return pairs, end + 1
+ elif nextchar != '"':
+ raise JSONDecodeError("Expecting property name", s, end)
+ end += 1
+ while True:
+ key, end = scanstring(s, end, encoding, strict)
+ key = memo_get(key, key)
+
+ # To skip some function call overhead we optimize the fast paths where
+ # the JSON key separator is ": " or just ":".
+ if s[end:end + 1] != ':':
+ end = _w(s, end).end()
+ if s[end:end + 1] != ':':
+ raise JSONDecodeError("Expecting : delimiter", s, end)
+
+ end += 1
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise JSONDecodeError("Expecting object", s, end)
+ pairs.append((key, value))
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+ end += 1
+
+ if nextchar == '}':
+ break
+ elif nextchar != ',':
+ raise JSONDecodeError("Expecting , delimiter", s, end - 1)
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end += 1
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+
+ end += 1
+ if nextchar != '"':
+ raise JSONDecodeError("Expecting property name", s, end - 1)
+
+ if object_pairs_hook is not None:
+ result = object_pairs_hook(pairs)
+ return result, end
+ pairs = dict(pairs)
+ if object_hook is not None:
+ pairs = object_hook(pairs)
+ return pairs, end
+
+def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ values = []
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ # Look-ahead for trivial empty array
+ if nextchar == ']':
+ return values, end + 1
+ _append = values.append
+ while True:
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise JSONDecodeError("Expecting object", s, end)
+ _append(value)
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ end += 1
+ if nextchar == ']':
+ break
+ elif nextchar != ',':
+ raise JSONDecodeError("Expecting , delimiter", s, end)
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ return values, end
+
+class JSONDecoder(object):
+ """Simple JSON <http://json.org> decoder
+
+ Performs the following translations in decoding by default:
+
+ +---------------+-------------------+
+ | JSON | Python |
+ +===============+===================+
+ | object | dict |
+ +---------------+-------------------+
+ | array | list |
+ +---------------+-------------------+
+ | string | unicode |
+ +---------------+-------------------+
+ | number (int) | int, long |
+ +---------------+-------------------+
+ | number (real) | float |
+ +---------------+-------------------+
+ | true | True |
+ +---------------+-------------------+
+ | false | False |
+ +---------------+-------------------+
+ | null | None |
+ +---------------+-------------------+
+
+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+ their corresponding ``float`` values, which is outside the JSON spec.
+
+ """
+
+ def __init__(self, encoding=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, strict=True,
+ object_pairs_hook=None):
+ """
+ *encoding* determines the encoding used to interpret any
+ :class:`str` objects decoded by this instance (``'utf-8'`` by
+ default). It has no effect when decoding :class:`unicode` objects.
+
+ Note that currently only encodings that are a superset of ASCII work,
+ strings of other encodings should be passed in as :class:`unicode`.
+
+ *object_hook*, if specified, will be called with the result of every
+ JSON object decoded and its return value will be used in place of the
+ given :class:`dict`. This can be used to provide custom
+ deserializations (e.g. to support JSON-RPC class hinting).
+
+ *object_pairs_hook* is an optional function that will be called with
+ the result of any object literal decode with an ordered list of pairs.
+ The return value of *object_pairs_hook* will be used instead of the
+ :class:`dict`. This feature can be used to implement custom decoders
+ that rely on the order that the key and value pairs are decoded (for
+ example, :func:`collections.OrderedDict` will remember the order of
+ insertion). If *object_hook* is also defined, the *object_pairs_hook*
+ takes priority.
+
+ *parse_float*, if specified, will be called with the string of every
+ JSON float to be decoded. By default, this is equivalent to
+ ``float(num_str)``. This can be used to use another datatype or parser
+ for JSON floats (e.g. :class:`decimal.Decimal`).
+
+ *parse_int*, if specified, will be called with the string of every
+ JSON int to be decoded. By default, this is equivalent to
+ ``int(num_str)``. This can be used to use another datatype or parser
+ for JSON integers (e.g. :class:`float`).
+
+ *parse_constant*, if specified, will be called with one of the
+ following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
+ can be used to raise an exception if invalid JSON numbers are
+ encountered.
+
+ *strict* controls the parser's behavior when it encounters an
+ invalid control character in a string. The default setting of
+ ``True`` means that unescaped control characters are parse errors, if
+ ``False`` then control characters will be allowed in strings.
+
+ """
+ self.encoding = encoding
+ self.object_hook = object_hook
+ self.object_pairs_hook = object_pairs_hook
+ self.parse_float = parse_float or float
+ self.parse_int = parse_int or int
+ self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+ self.strict = strict
+ self.parse_object = JSONObject
+ self.parse_array = JSONArray
+ self.parse_string = scanstring
+ self.memo = {}
+ self.scan_once = make_scanner(self)
+
+ def decode(self, s, _w=WHITESPACE.match):
+ """Return the Python representation of ``s`` (a ``str`` or ``unicode``
+ instance containing a JSON document)
+
+ """
+ obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+ end = _w(s, end).end()
+ if end != len(s):
+ raise JSONDecodeError("Extra data", s, end, len(s))
+ return obj
+
+ def raw_decode(self, s, idx=0):
+ """Decode a JSON document from ``s`` (a ``str`` or ``unicode``
+ beginning with a JSON document) and return a 2-tuple of the Python
+ representation and the index in ``s`` where the document ended.
+
+ This can be used to decode a JSON document from a string that may
+ have extraneous data at the end.
+
+ """
+ try:
+ obj, end = self.scan_once(s, idx)
+ except StopIteration:
+ raise JSONDecodeError("No JSON object could be decoded", s, idx)
+ return obj, end
diff --git a/pyload/lib/simplejson/encoder.py b/pyload/lib/simplejson/encoder.py
new file mode 100644
index 000000000..5ec7440f1
--- /dev/null
+++ b/pyload/lib/simplejson/encoder.py
@@ -0,0 +1,534 @@
+"""Implementation of JSONEncoder
+"""
+import re
+from decimal import Decimal
+
+def _import_speedups():
+ try:
+ from simplejson import _speedups
+ return _speedups.encode_basestring_ascii, _speedups.make_encoder
+ except ImportError:
+ return None, None
+c_encode_basestring_ascii, c_make_encoder = _import_speedups()
+
+from simplejson.decoder import PosInf
+
+ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(r'[\x80-\xff]')
+ESCAPE_DCT = {
+ '\\': '\\\\',
+ '"': '\\"',
+ '\b': '\\b',
+ '\f': '\\f',
+ '\n': '\\n',
+ '\r': '\\r',
+ '\t': '\\t',
+ u'\u2028': '\\u2028',
+ u'\u2029': '\\u2029',
+}
+for i in range(0x20):
+ #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+ """Return a JSON representation of a Python string
+
+ """
+ if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+ s = s.decode('utf-8')
+ def replace(match):
+ return ESCAPE_DCT[match.group(0)]
+ return u'"' + ESCAPE.sub(replace, s) + u'"'
+
+
+def py_encode_basestring_ascii(s):
+ """Return an ASCII-only JSON representation of a Python string
+
+ """
+ if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+ s = s.decode('utf-8')
+ def replace(match):
+ s = match.group(0)
+ try:
+ return ESCAPE_DCT[s]
+ except KeyError:
+ n = ord(s)
+ if n < 0x10000:
+ #return '\\u{0:04x}'.format(n)
+ return '\\u%04x' % (n,)
+ else:
+ # surrogate pair
+ n -= 0x10000
+ s1 = 0xd800 | ((n >> 10) & 0x3ff)
+ s2 = 0xdc00 | (n & 0x3ff)
+ #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+ return '\\u%04x\\u%04x' % (s1, s2)
+ return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+
+
+encode_basestring_ascii = (
+ c_encode_basestring_ascii or py_encode_basestring_ascii)
+
+class JSONEncoder(object):
+ """Extensible JSON <http://json.org> encoder for Python data structures.
+
+ Supports the following objects and types by default:
+
+ +-------------------+---------------+
+ | Python | JSON |
+ +===================+===============+
+ | dict, namedtuple | object |
+ +-------------------+---------------+
+ | list, tuple | array |
+ +-------------------+---------------+
+ | str, unicode | string |
+ +-------------------+---------------+
+ | int, long, float | number |
+ +-------------------+---------------+
+ | True | true |
+ +-------------------+---------------+
+ | False | false |
+ +-------------------+---------------+
+ | None | null |
+ +-------------------+---------------+
+
+ To extend this to recognize other objects, subclass and implement a
+ ``.default()`` method with another method that returns a serializable
+ object for ``o`` if possible, otherwise it should call the superclass
+ implementation (to raise ``TypeError``).
+
+ """
+ item_separator = ', '
+ key_separator = ': '
+ def __init__(self, skipkeys=False, ensure_ascii=True,
+ check_circular=True, allow_nan=True, sort_keys=False,
+ indent=None, separators=None, encoding='utf-8', default=None,
+ use_decimal=True, namedtuple_as_object=True,
+ tuple_as_array=True):
+ """Constructor for JSONEncoder, with sensible defaults.
+
+ If skipkeys is false, then it is a TypeError to attempt
+ encoding of keys that are not str, int, long, float or None. If
+ skipkeys is True, such items are simply skipped.
+
+ If ensure_ascii is true, the output is guaranteed to be str
+ objects with all incoming unicode characters escaped. If
+ ensure_ascii is false, the output will be unicode object.
+
+ If check_circular is true, then lists, dicts, and custom encoded
+ objects will be checked for circular references during encoding to
+ prevent an infinite recursion (which would cause an OverflowError).
+ Otherwise, no such check takes place.
+
+ If allow_nan is true, then NaN, Infinity, and -Infinity will be
+ encoded as such. This behavior is not JSON specification compliant,
+ but is consistent with most JavaScript based encoders and decoders.
+ Otherwise, it will be a ValueError to encode such floats.
+
+ If sort_keys is true, then the output of dictionaries will be
+ sorted by key; this is useful for regression tests to ensure
+ that JSON serializations can be compared on a day-to-day basis.
+
+ If indent is a string, then JSON array elements and object members
+ will be pretty-printed with a newline followed by that string repeated
+ for each level of nesting. ``None`` (the default) selects the most compact
+ representation without any newlines. For backwards compatibility with
+ versions of simplejson earlier than 2.1.0, an integer is also accepted
+ and is converted to a string with that many spaces.
+
+ If specified, separators should be a (item_separator, key_separator)
+ tuple. The default is (', ', ': '). To get the most compact JSON
+ representation you should specify (',', ':') to eliminate whitespace.
+
+ If specified, default is a function that gets called for objects
+ that can't otherwise be serialized. It should return a JSON encodable
+ version of the object or raise a ``TypeError``.
+
+ If encoding is not None, then all input strings will be
+ transformed into unicode using that encoding prior to JSON-encoding.
+ The default is UTF-8.
+
+ If use_decimal is true (not the default), ``decimal.Decimal`` will
+ be supported directly by the encoder. For the inverse, decode JSON
+ with ``parse_float=decimal.Decimal``.
+
+ If namedtuple_as_object is true (the default), tuple subclasses with
+ ``_asdict()`` methods will be encoded as JSON objects.
+
+ If tuple_as_array is true (the default), tuple (and subclasses) will
+ be encoded as JSON arrays.
+ """
+
+ self.skipkeys = skipkeys
+ self.ensure_ascii = ensure_ascii
+ self.check_circular = check_circular
+ self.allow_nan = allow_nan
+ self.sort_keys = sort_keys
+ self.use_decimal = use_decimal
+ self.namedtuple_as_object = namedtuple_as_object
+ self.tuple_as_array = tuple_as_array
+ if isinstance(indent, (int, long)):
+ indent = ' ' * indent
+ self.indent = indent
+ if separators is not None:
+ self.item_separator, self.key_separator = separators
+ elif indent is not None:
+ self.item_separator = ','
+ if default is not None:
+ self.default = default
+ self.encoding = encoding
+
+ def default(self, o):
+ """Implement this method in a subclass such that it returns
+ a serializable object for ``o``, or calls the base implementation
+ (to raise a ``TypeError``).
+
+ For example, to support arbitrary iterators, you could
+ implement default like this::
+
+ def default(self, o):
+ try:
+ iterable = iter(o)
+ except TypeError:
+ pass
+ else:
+ return list(iterable)
+ return JSONEncoder.default(self, o)
+
+ """
+ raise TypeError(repr(o) + " is not JSON serializable")
+
+ def encode(self, o):
+ """Return a JSON string representation of a Python data structure.
+
+ >>> from simplejson import JSONEncoder
+ >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+ '{"foo": ["bar", "baz"]}'
+
+ """
+ # This is for extremely simple cases and benchmarks.
+ if isinstance(o, basestring):
+ if isinstance(o, str):
+ _encoding = self.encoding
+ if (_encoding is not None
+ and not (_encoding == 'utf-8')):
+ o = o.decode(_encoding)
+ if self.ensure_ascii:
+ return encode_basestring_ascii(o)
+ else:
+ return encode_basestring(o)
+ # This doesn't pass the iterator directly to ''.join() because the
+ # exceptions aren't as detailed. The list call should be roughly
+ # equivalent to the PySequence_Fast that ''.join() would do.
+ chunks = self.iterencode(o, _one_shot=True)
+ if not isinstance(chunks, (list, tuple)):
+ chunks = list(chunks)
+ if self.ensure_ascii:
+ return ''.join(chunks)
+ else:
+ return u''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ """Encode the given object and yield each string
+ representation as available.
+
+ For example::
+
+ for chunk in JSONEncoder().iterencode(bigobject):
+ mysocket.write(chunk)
+
+ """
+ if self.check_circular:
+ markers = {}
+ else:
+ markers = None
+ if self.ensure_ascii:
+ _encoder = encode_basestring_ascii
+ else:
+ _encoder = encode_basestring
+ if self.encoding != 'utf-8':
+ def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
+ if isinstance(o, str):
+ o = o.decode(_encoding)
+ return _orig_encoder(o)
+
+ def floatstr(o, allow_nan=self.allow_nan,
+ _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
+ # Check for specials. Note that this type of test is processor
+ # and/or platform-specific, so do tests which don't depend on
+ # the internals.
+
+ if o != o:
+ text = 'NaN'
+ elif o == _inf:
+ text = 'Infinity'
+ elif o == _neginf:
+ text = '-Infinity'
+ else:
+ return _repr(o)
+
+ if not allow_nan:
+ raise ValueError(
+ "Out of range float values are not JSON compliant: " +
+ repr(o))
+
+ return text
+
+
+ key_memo = {}
+ if (_one_shot and c_make_encoder is not None
+ and self.indent is None):
+ _iterencode = c_make_encoder(
+ markers, self.default, _encoder, self.indent,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
+ self.namedtuple_as_object, self.tuple_as_array)
+ else:
+ _iterencode = _make_iterencode(
+ markers, self.default, _encoder, self.indent, floatstr,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, _one_shot, self.use_decimal,
+ self.namedtuple_as_object, self.tuple_as_array)
+ try:
+ return _iterencode(o, 0)
+ finally:
+ key_memo.clear()
+
+
+class JSONEncoderForHTML(JSONEncoder):
+ """An encoder that produces JSON safe to embed in HTML.
+
+ To embed JSON content in, say, a script tag on a web page, the
+ characters &, < and > should be escaped. They cannot be escaped
+ with the usual entities (e.g. &amp;) because they are not expanded
+ within <script> tags.
+ """
+
+ def encode(self, o):
+ # Override JSONEncoder.encode because it has hacks for
+ # performance that make things more complicated.
+ chunks = self.iterencode(o, True)
+ if self.ensure_ascii:
+ return ''.join(chunks)
+ else:
+ return u''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
+ for chunk in chunks:
+ chunk = chunk.replace('&', '\\u0026')
+ chunk = chunk.replace('<', '\\u003c')
+ chunk = chunk.replace('>', '\\u003e')
+ yield chunk
+
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
+ _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+ _use_decimal, _namedtuple_as_object, _tuple_as_array,
+ ## HACK: hand-optimized bytecode; turn globals into locals
+ False=False,
+ True=True,
+ ValueError=ValueError,
+ basestring=basestring,
+ Decimal=Decimal,
+ dict=dict,
+ float=float,
+ id=id,
+ int=int,
+ isinstance=isinstance,
+ list=list,
+ long=long,
+ str=str,
+ tuple=tuple,
+ ):
+
+ def _iterencode_list(lst, _current_indent_level):
+ if not lst:
+ yield '[]'
+ return
+ if markers is not None:
+ markerid = id(lst)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = lst
+ buf = '['
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (_indent * _current_indent_level)
+ separator = _item_separator + newline_indent
+ buf += newline_indent
+ else:
+ newline_indent = None
+ separator = _item_separator
+ first = True
+ for value in lst:
+ if first:
+ first = False
+ else:
+ buf = separator
+ if isinstance(value, basestring):
+ yield buf + _encoder(value)
+ elif value is None:
+ yield buf + 'null'
+ elif value is True:
+ yield buf + 'true'
+ elif value is False:
+ yield buf + 'false'
+ elif isinstance(value, (int, long)):
+ yield buf + str(value)
+ elif isinstance(value, float):
+ yield buf + _floatstr(value)
+ elif _use_decimal and isinstance(value, Decimal):
+ yield buf + str(value)
+ else:
+ yield buf
+ if isinstance(value, list):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif (_namedtuple_as_object and isinstance(value, tuple) and
+ hasattr(value, '_asdict')):
+ chunks = _iterencode_dict(value._asdict(),
+ _current_indent_level)
+ elif _tuple_as_array and isinstance(value, tuple):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (_indent * _current_indent_level)
+ yield ']'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode_dict(dct, _current_indent_level):
+ if not dct:
+ yield '{}'
+ return
+ if markers is not None:
+ markerid = id(dct)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = dct
+ yield '{'
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (_indent * _current_indent_level)
+ item_separator = _item_separator + newline_indent
+ yield newline_indent
+ else:
+ newline_indent = None
+ item_separator = _item_separator
+ first = True
+ if _sort_keys:
+ items = dct.items()
+ items.sort(key=lambda kv: kv[0])
+ else:
+ items = dct.iteritems()
+ for key, value in items:
+ if isinstance(key, basestring):
+ pass
+ # JavaScript is weakly typed for these, so it makes sense to
+ # also allow them. Many encoders seem to do something like this.
+ elif isinstance(key, float):
+ key = _floatstr(key)
+ elif key is True:
+ key = 'true'
+ elif key is False:
+ key = 'false'
+ elif key is None:
+ key = 'null'
+ elif isinstance(key, (int, long)):
+ key = str(key)
+ elif _skipkeys:
+ continue
+ else:
+ raise TypeError("key " + repr(key) + " is not a string")
+ if first:
+ first = False
+ else:
+ yield item_separator
+ yield _encoder(key)
+ yield _key_separator
+ if isinstance(value, basestring):
+ yield _encoder(value)
+ elif value is None:
+ yield 'null'
+ elif value is True:
+ yield 'true'
+ elif value is False:
+ yield 'false'
+ elif isinstance(value, (int, long)):
+ yield str(value)
+ elif isinstance(value, float):
+ yield _floatstr(value)
+ elif _use_decimal and isinstance(value, Decimal):
+ yield str(value)
+ else:
+ if isinstance(value, list):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif (_namedtuple_as_object and isinstance(value, tuple) and
+ hasattr(value, '_asdict')):
+ chunks = _iterencode_dict(value._asdict(),
+ _current_indent_level)
+ elif _tuple_as_array and isinstance(value, tuple):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (_indent * _current_indent_level)
+ yield '}'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode(o, _current_indent_level):
+ if isinstance(o, basestring):
+ yield _encoder(o)
+ elif o is None:
+ yield 'null'
+ elif o is True:
+ yield 'true'
+ elif o is False:
+ yield 'false'
+ elif isinstance(o, (int, long)):
+ yield str(o)
+ elif isinstance(o, float):
+ yield _floatstr(o)
+ elif isinstance(o, list):
+ for chunk in _iterencode_list(o, _current_indent_level):
+ yield chunk
+ elif (_namedtuple_as_object and isinstance(o, tuple) and
+ hasattr(o, '_asdict')):
+ for chunk in _iterencode_dict(o._asdict(), _current_indent_level):
+ yield chunk
+ elif (_tuple_as_array and isinstance(o, tuple)):
+ for chunk in _iterencode_list(o, _current_indent_level):
+ yield chunk
+ elif isinstance(o, dict):
+ for chunk in _iterencode_dict(o, _current_indent_level):
+ yield chunk
+ elif _use_decimal and isinstance(o, Decimal):
+ yield str(o)
+ else:
+ if markers is not None:
+ markerid = id(o)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = o
+ o = _default(o)
+ for chunk in _iterencode(o, _current_indent_level):
+ yield chunk
+ if markers is not None:
+ del markers[markerid]
+
+ return _iterencode
diff --git a/pyload/lib/simplejson/ordered_dict.py b/pyload/lib/simplejson/ordered_dict.py
new file mode 100644
index 000000000..87ad88824
--- /dev/null
+++ b/pyload/lib/simplejson/ordered_dict.py
@@ -0,0 +1,119 @@
+"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
+
+http://code.activestate.com/recipes/576693/
+
+"""
+from UserDict import DictMixin
+
+# Modified from original to support Python 2.4, see
+# http://code.google.com/p/simplejson/issues/detail?id=53
+try:
+ all
+except NameError:
+ def all(seq):
+ for elem in seq:
+ if not elem:
+ return False
+ return True
+
+class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next = self.__map.pop(key)
+ prev[2] = next
+ next[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ # Modified from original to support Python 2.4, see
+ # http://code.google.com/p/simplejson/issues/detail?id=53
+ if last:
+ key = reversed(self).next()
+ else:
+ key = iter(self).next()
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and \
+ all(p==q for p, q in zip(self.items(), other.items()))
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/pyload/lib/simplejson/scanner.py b/pyload/lib/simplejson/scanner.py
new file mode 100644
index 000000000..54593a371
--- /dev/null
+++ b/pyload/lib/simplejson/scanner.py
@@ -0,0 +1,77 @@
+"""JSON token scanner
+"""
+import re
+def _import_c_make_scanner():
+ try:
+ from simplejson._speedups import make_scanner
+ return make_scanner
+ except ImportError:
+ return None
+c_make_scanner = _import_c_make_scanner()
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+ r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+ (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+ parse_object = context.parse_object
+ parse_array = context.parse_array
+ parse_string = context.parse_string
+ match_number = NUMBER_RE.match
+ encoding = context.encoding
+ strict = context.strict
+ parse_float = context.parse_float
+ parse_int = context.parse_int
+ parse_constant = context.parse_constant
+ object_hook = context.object_hook
+ object_pairs_hook = context.object_pairs_hook
+ memo = context.memo
+
+ def _scan_once(string, idx):
+ try:
+ nextchar = string[idx]
+ except IndexError:
+ raise StopIteration
+
+ if nextchar == '"':
+ return parse_string(string, idx + 1, encoding, strict)
+ elif nextchar == '{':
+ return parse_object((string, idx + 1), encoding, strict,
+ _scan_once, object_hook, object_pairs_hook, memo)
+ elif nextchar == '[':
+ return parse_array((string, idx + 1), _scan_once)
+ elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+ return None, idx + 4
+ elif nextchar == 't' and string[idx:idx + 4] == 'true':
+ return True, idx + 4
+ elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+ return False, idx + 5
+
+ m = match_number(string, idx)
+ if m is not None:
+ integer, frac, exp = m.groups()
+ if frac or exp:
+ res = parse_float(integer + (frac or '') + (exp or ''))
+ else:
+ res = parse_int(integer)
+ return res, m.end()
+ elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+ return parse_constant('NaN'), idx + 3
+ elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+ return parse_constant('Infinity'), idx + 8
+ elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+ return parse_constant('-Infinity'), idx + 9
+ else:
+ raise StopIteration
+
+ def scan_once(string, idx):
+ try:
+ return _scan_once(string, idx)
+ finally:
+ memo.clear()
+
+ return scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
diff --git a/pyload/lib/simplejson/tool.py b/pyload/lib/simplejson/tool.py
new file mode 100644
index 000000000..73370db55
--- /dev/null
+++ b/pyload/lib/simplejson/tool.py
@@ -0,0 +1,39 @@
+r"""Command-line tool to validate and pretty-print JSON
+
+Usage::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+
+"""
+import sys
+import simplejson as json
+
+def main():
+ if len(sys.argv) == 1:
+ infile = sys.stdin
+ outfile = sys.stdout
+ elif len(sys.argv) == 2:
+ infile = open(sys.argv[1], 'rb')
+ outfile = sys.stdout
+ elif len(sys.argv) == 3:
+ infile = open(sys.argv[1], 'rb')
+ outfile = open(sys.argv[2], 'wb')
+ else:
+ raise SystemExit(sys.argv[0] + " [infile [outfile]]")
+ try:
+ obj = json.load(infile,
+ object_pairs_hook=json.OrderedDict,
+ use_decimal=True)
+ except ValueError, e:
+ raise SystemExit(e)
+ json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
+ outfile.write('\n')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/pyload/lib/wsgiserver/LICENSE.txt b/pyload/lib/wsgiserver/LICENSE.txt
new file mode 100644
index 000000000..a15165ee2
--- /dev/null
+++ b/pyload/lib/wsgiserver/LICENSE.txt
@@ -0,0 +1,25 @@
+Copyright (c) 2004-2007, CherryPy Team (team@cherrypy.org)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the CherryPy Team nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pyload/lib/wsgiserver/__init__.py b/pyload/lib/wsgiserver/__init__.py
new file mode 100644
index 000000000..c380e18b0
--- /dev/null
+++ b/pyload/lib/wsgiserver/__init__.py
@@ -0,0 +1,1794 @@
+"""A high-speed, production ready, thread pooled, generic WSGI server.
+
+Simplest example on how to use this module directly
+(without using CherryPy's application machinery):
+
+ from cherrypy import wsgiserver
+
+ def my_crazy_app(environ, start_response):
+ status = '200 OK'
+ response_headers = [('Content-type','text/plain')]
+ start_response(status, response_headers)
+ return ['Hello world!\n']
+
+ server = wsgiserver.CherryPyWSGIServer(
+ ('0.0.0.0', 8070), my_crazy_app,
+ server_name='www.cherrypy.example')
+
+The CherryPy WSGI server can serve as many WSGI applications
+as you want in one instance by using a WSGIPathInfoDispatcher:
+
+ d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
+ server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
+
+Want SSL support? Just set these attributes:
+
+ server.ssl_certificate = <filename>
+ server.ssl_private_key = <filename>
+
+ if __name__ == '__main__':
+ try:
+ server.start()
+ except KeyboardInterrupt:
+ server.stop()
+
+This won't call the CherryPy engine (application side) at all, only the
+WSGI server, which is independant from the rest of CherryPy. Don't
+let the name "CherryPyWSGIServer" throw you; the name merely reflects
+its origin, not its coupling.
+
+For those of you wanting to understand internals of this module, here's the
+basic call flow. The server's listening thread runs a very tight loop,
+sticking incoming connections onto a Queue:
+
+ server = CherryPyWSGIServer(...)
+ server.start()
+ while True:
+ tick()
+ # This blocks until a request comes in:
+ child = socket.accept()
+ conn = HTTPConnection(child, ...)
+ server.requests.put(conn)
+
+Worker threads are kept in a pool and poll the Queue, popping off and then
+handling each connection in turn. Each connection can consist of an arbitrary
+number of requests and their responses, so we run a nested loop:
+
+ while True:
+ conn = server.requests.get()
+ conn.communicate()
+ -> while True:
+ req = HTTPRequest(...)
+ req.parse_request()
+ -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
+ req.rfile.readline()
+ req.read_headers()
+ req.respond()
+ -> response = wsgi_app(...)
+ try:
+ for chunk in response:
+ if chunk:
+ req.write(chunk)
+ finally:
+ if hasattr(response, "close"):
+ response.close()
+ if req.close_connection:
+ return
+"""
+
+
+import base64
+import os
+import Queue
+import re
+quoted_slash = re.compile("(?i)%2F")
+import rfc822
+import socket
+try:
+ import cStringIO as StringIO
+except ImportError:
+ import StringIO
+
+_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
+
+import sys
+import threading
+import time
+import traceback
+from urllib import unquote
+from urlparse import urlparse
+import warnings
+
+try:
+ from OpenSSL import SSL
+ from OpenSSL import crypto
+except ImportError:
+ SSL = None
+
+import errno
+
+def plat_specific_errors(*errnames):
+ """Return error numbers for all errors in errnames on this platform.
+
+ The 'errno' module contains different global constants depending on
+ the specific platform (OS). This function will return the list of
+ numeric values for a given list of potential names.
+ """
+ errno_names = dir(errno)
+ nums = [getattr(errno, k) for k in errnames if k in errno_names]
+ # de-dupe the list
+ return dict.fromkeys(nums).keys()
+
+socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
+
+socket_errors_to_ignore = plat_specific_errors(
+ "EPIPE",
+ "EBADF", "WSAEBADF",
+ "ENOTSOCK", "WSAENOTSOCK",
+ "ETIMEDOUT", "WSAETIMEDOUT",
+ "ECONNREFUSED", "WSAECONNREFUSED",
+ "ECONNRESET", "WSAECONNRESET",
+ "ECONNABORTED", "WSAECONNABORTED",
+ "ENETRESET", "WSAENETRESET",
+ "EHOSTDOWN", "EHOSTUNREACH",
+ )
+socket_errors_to_ignore.append("timed out")
+
+socket_errors_nonblocking = plat_specific_errors(
+ 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
+
+comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING',
+ 'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
+ 'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
+ 'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
+ 'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
+ 'WWW-AUTHENTICATE']
+
+
+class WSGIPathInfoDispatcher(object):
+ """A WSGI dispatcher for dispatch based on the PATH_INFO.
+
+ apps: a dict or list of (path_prefix, app) pairs.
+ """
+
+ def __init__(self, apps):
+ try:
+ apps = apps.items()
+ except AttributeError:
+ pass
+
+ # Sort the apps by len(path), descending
+ apps.sort()
+ apps.reverse()
+
+ # The path_prefix strings must start, but not end, with a slash.
+ # Use "" instead of "/".
+ self.apps = [(p.rstrip("/"), a) for p, a in apps]
+
+ def __call__(self, environ, start_response):
+ path = environ["PATH_INFO"] or "/"
+ for p, app in self.apps:
+ # The apps list should be sorted by length, descending.
+ if path.startswith(p + "/") or path == p:
+ environ = environ.copy()
+ environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
+ environ["PATH_INFO"] = path[len(p):]
+ return app(environ, start_response)
+
+ start_response('404 Not Found', [('Content-Type', 'text/plain'),
+ ('Content-Length', '0')])
+ return ['']
+
+
+class MaxSizeExceeded(Exception):
+ pass
+
+class SizeCheckWrapper(object):
+ """Wraps a file-like object, raising MaxSizeExceeded if too large."""
+
+ def __init__(self, rfile, maxlen):
+ self.rfile = rfile
+ self.maxlen = maxlen
+ self.bytes_read = 0
+
+ def _check_length(self):
+ if self.maxlen and self.bytes_read > self.maxlen:
+ raise MaxSizeExceeded()
+
+ def read(self, size=None):
+ data = self.rfile.read(size)
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+ def readline(self, size=None):
+ if size is not None:
+ data = self.rfile.readline(size)
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+ # User didn't specify a size ...
+ # We read the line in chunks to make sure it's not a 100MB line !
+ res = []
+ while True:
+ data = self.rfile.readline(256)
+ self.bytes_read += len(data)
+ self._check_length()
+ res.append(data)
+ # See http://www.cherrypy.org/ticket/421
+ if len(data) < 256 or data[-1:] == "\n":
+ return ''.join(res)
+
+ def readlines(self, sizehint=0):
+ # Shamelessly stolen from StringIO
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+
+ def close(self):
+ self.rfile.close()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ data = self.rfile.next()
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+
+class HTTPRequest(object):
+ """An HTTP Request (and response).
+
+ A single HTTP connection may consist of multiple request/response pairs.
+
+ send: the 'send' method from the connection's socket object.
+ wsgi_app: the WSGI application to call.
+ environ: a partial WSGI environ (server and connection entries).
+ The caller MUST set the following entries:
+ * All wsgi.* entries, including .input
+ * SERVER_NAME and SERVER_PORT
+ * Any SSL_* entries
+ * Any custom entries like REMOTE_ADDR and REMOTE_PORT
+ * SERVER_SOFTWARE: the value to write in the "Server" response header.
+ * ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
+ the response. From RFC 2145: "An HTTP server SHOULD send a
+ response version equal to the highest version for which the
+ server is at least conditionally compliant, and whose major
+ version is less than or equal to the one received in the
+ request. An HTTP server MUST NOT send a version for which
+ it is not at least conditionally compliant."
+
+ outheaders: a list of header tuples to write in the response.
+ ready: when True, the request has been parsed and is ready to begin
+ generating the response. When False, signals the calling Connection
+ that the response should not be generated and the connection should
+ close.
+ close_connection: signals the calling Connection that the request
+ should close. This does not imply an error! The client and/or
+ server may each request that the connection be closed.
+ chunked_write: if True, output will be encoded with the "chunked"
+ transfer-coding. This value is set automatically inside
+ send_headers.
+ """
+
+ max_request_header_size = 0
+ max_request_body_size = 0
+
+ def __init__(self, wfile, environ, wsgi_app):
+ self.rfile = environ['wsgi.input']
+ self.wfile = wfile
+ self.environ = environ.copy()
+ self.wsgi_app = wsgi_app
+
+ self.ready = False
+ self.started_response = False
+ self.status = ""
+ self.outheaders = []
+ self.sent_headers = False
+ self.close_connection = False
+ self.chunked_write = False
+
+ def parse_request(self):
+ """Parse the next HTTP request start-line and message-headers."""
+ self.rfile.maxlen = self.max_request_header_size
+ self.rfile.bytes_read = 0
+
+ try:
+ self._parse_request()
+ except MaxSizeExceeded:
+ self.simple_response("413 Request Entity Too Large")
+ return
+
+ def _parse_request(self):
+ # HTTP/1.1 connections are persistent by default. If a client
+ # requests a page, then idles (leaves the connection open),
+ # then rfile.readline() will raise socket.error("timed out").
+ # Note that it does this based on the value given to settimeout(),
+ # and doesn't need the client to request or acknowledge the close
+ # (although your TCP stack might suffer for it: cf Apache's history
+ # with FIN_WAIT_2).
+ request_line = self.rfile.readline()
+ if not request_line:
+ # Force self.ready = False so the connection will close.
+ self.ready = False
+ return
+
+ if request_line == "\r\n":
+ # RFC 2616 sec 4.1: "...if the server is reading the protocol
+ # stream at the beginning of a message and receives a CRLF
+ # first, it should ignore the CRLF."
+ # But only ignore one leading line! else we enable a DoS.
+ request_line = self.rfile.readline()
+ if not request_line:
+ self.ready = False
+ return
+
+ environ = self.environ
+
+ try:
+ method, path, req_protocol = request_line.strip().split(" ", 2)
+ except ValueError:
+ self.simple_response(400, "Malformed Request-Line")
+ return
+
+ environ["REQUEST_METHOD"] = method
+
+ # path may be an abs_path (including "http://host.domain.tld");
+ scheme, location, path, params, qs, frag = urlparse(path)
+
+ if frag:
+ self.simple_response("400 Bad Request",
+ "Illegal #fragment in Request-URI.")
+ return
+
+ if scheme:
+ environ["wsgi.url_scheme"] = scheme
+ if params:
+ path = path + ";" + params
+
+ environ["SCRIPT_NAME"] = ""
+
+ # Unquote the path+params (e.g. "/this%20path" -> "this path").
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+ #
+ # But note that "...a URI must be separated into its components
+ # before the escaped characters within those components can be
+ # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
+ atoms = [unquote(x) for x in quoted_slash.split(path)]
+ path = "%2F".join(atoms)
+ environ["PATH_INFO"] = path
+
+ # Note that, like wsgiref and most other WSGI servers,
+ # we unquote the path but not the query string.
+ environ["QUERY_STRING"] = qs
+
+ # Compare request and server HTTP protocol versions, in case our
+ # server does not support the requested protocol. Limit our output
+ # to min(req, server). We want the following output:
+ # request server actual written supported response
+ # protocol protocol response protocol feature set
+ # a 1.0 1.0 1.0 1.0
+ # b 1.0 1.1 1.1 1.0
+ # c 1.1 1.0 1.0 1.0
+ # d 1.1 1.1 1.1 1.1
+ # Notice that, in (b), the response will be "HTTP/1.1" even though
+ # the client only understands 1.0. RFC 2616 10.5.6 says we should
+ # only return 505 if the _major_ version is different.
+ rp = int(req_protocol[5]), int(req_protocol[7])
+ server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
+ sp = int(server_protocol[5]), int(server_protocol[7])
+ if sp[0] != rp[0]:
+ self.simple_response("505 HTTP Version Not Supported")
+ return
+ # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
+ environ["SERVER_PROTOCOL"] = req_protocol
+ self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
+
+ # If the Request-URI was an absoluteURI, use its location atom.
+ if location:
+ environ["SERVER_NAME"] = location
+
+ # then all the http headers
+ try:
+ self.read_headers()
+ except ValueError, ex:
+ self.simple_response("400 Bad Request", repr(ex.args))
+ return
+
+ mrbs = self.max_request_body_size
+ if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
+ self.simple_response("413 Request Entity Too Large")
+ return
+
+ # Persistent connection support
+ if self.response_protocol == "HTTP/1.1":
+ # Both server and client are HTTP/1.1
+ if environ.get("HTTP_CONNECTION", "") == "close":
+ self.close_connection = True
+ else:
+ # Either the server or client (or both) are HTTP/1.0
+ if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
+ self.close_connection = True
+
+ # Transfer-Encoding support
+ te = None
+ if self.response_protocol == "HTTP/1.1":
+ te = environ.get("HTTP_TRANSFER_ENCODING")
+ if te:
+ te = [x.strip().lower() for x in te.split(",") if x.strip()]
+
+ self.chunked_read = False
+
+ if te:
+ for enc in te:
+ if enc == "chunked":
+ self.chunked_read = True
+ else:
+ # Note that, even if we see "chunked", we must reject
+ # if there is an extension we don't recognize.
+ self.simple_response("501 Unimplemented")
+ self.close_connection = True
+ return
+
+ # From PEP 333:
+ # "Servers and gateways that implement HTTP 1.1 must provide
+ # transparent support for HTTP 1.1's "expect/continue" mechanism.
+ # This may be done in any of several ways:
+ # 1. Respond to requests containing an Expect: 100-continue request
+ # with an immediate "100 Continue" response, and proceed normally.
+ # 2. Proceed with the request normally, but provide the application
+ # with a wsgi.input stream that will send the "100 Continue"
+ # response if/when the application first attempts to read from
+ # the input stream. The read request must then remain blocked
+ # until the client responds.
+ # 3. Wait until the client decides that the server does not support
+ # expect/continue, and sends the request body on its own.
+ # (This is suboptimal, and is not recommended.)
+ #
+ # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
+ # but it seems like it would be a big slowdown for such a rare case.
+ if environ.get("HTTP_EXPECT", "") == "100-continue":
+ self.simple_response(100)
+
+ self.ready = True
+
+ def read_headers(self):
+ """Read header lines from the incoming stream."""
+ environ = self.environ
+
+ while True:
+ line = self.rfile.readline()
+ if not line:
+ # No more data--illegal end of headers
+ raise ValueError("Illegal end of headers.")
+
+ if line == '\r\n':
+ # Normal end of headers
+ break
+
+ if line[0] in ' \t':
+ # It's a continuation line.
+ v = line.strip()
+ else:
+ k, v = line.split(":", 1)
+ k, v = k.strip().upper(), v.strip()
+ envname = "HTTP_" + k.replace("-", "_")
+
+ if k in comma_separated_headers:
+ existing = environ.get(envname)
+ if existing:
+ v = ", ".join((existing, v))
+ environ[envname] = v
+
+ ct = environ.pop("HTTP_CONTENT_TYPE", None)
+ if ct is not None:
+ environ["CONTENT_TYPE"] = ct
+ cl = environ.pop("HTTP_CONTENT_LENGTH", None)
+ if cl is not None:
+ environ["CONTENT_LENGTH"] = cl
+
+ def decode_chunked(self):
+ """Decode the 'chunked' transfer coding."""
+ cl = 0
+ data = StringIO.StringIO()
+ while True:
+ line = self.rfile.readline().strip().split(";", 1)
+ chunk_size = int(line.pop(0), 16)
+ if chunk_size <= 0:
+ break
+## if line: chunk_extension = line[0]
+ cl += chunk_size
+ data.write(self.rfile.read(chunk_size))
+ crlf = self.rfile.read(2)
+ if crlf != "\r\n":
+ self.simple_response("400 Bad Request",
+ "Bad chunked transfer coding "
+ "(expected '\\r\\n', got %r)" % crlf)
+ return
+
+ # Grab any trailer headers
+ self.read_headers()
+
+ data.seek(0)
+ self.environ["wsgi.input"] = data
+ self.environ["CONTENT_LENGTH"] = str(cl) or ""
+ return True
+
+ def respond(self):
+ """Call the appropriate WSGI app and write its iterable output."""
+ # Set rfile.maxlen to ensure we don't read past Content-Length.
+ # This will also be used to read the entire request body if errors
+ # are raised before the app can read the body.
+ if self.chunked_read:
+ # If chunked, Content-Length will be 0.
+ self.rfile.maxlen = self.max_request_body_size
+ else:
+ cl = int(self.environ.get("CONTENT_LENGTH", 0))
+ if self.max_request_body_size:
+ self.rfile.maxlen = min(cl, self.max_request_body_size)
+ else:
+ self.rfile.maxlen = cl
+ self.rfile.bytes_read = 0
+
+ try:
+ self._respond()
+ except MaxSizeExceeded:
+ if not self.sent_headers:
+ self.simple_response("413 Request Entity Too Large")
+ return
+
+ def _respond(self):
+ if self.chunked_read:
+ if not self.decode_chunked():
+ self.close_connection = True
+ return
+
+ response = self.wsgi_app(self.environ, self.start_response)
+ try:
+ for chunk in response:
+ # "The start_response callable must not actually transmit
+ # the response headers. Instead, it must store them for the
+ # server or gateway to transmit only after the first
+ # iteration of the application return value that yields
+ # a NON-EMPTY string, or upon the application's first
+ # invocation of the write() callable." (PEP 333)
+ if chunk:
+ self.write(chunk)
+ finally:
+ if hasattr(response, "close"):
+ response.close()
+
+ if (self.ready and not self.sent_headers):
+ self.sent_headers = True
+ self.send_headers()
+ if self.chunked_write:
+ self.wfile.sendall("0\r\n\r\n")
+
+ def simple_response(self, status, msg=""):
+ """Write a simple response back to the client."""
+ status = str(status)
+ buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
+ "Content-Length: %s\r\n" % len(msg),
+ "Content-Type: text/plain\r\n"]
+
+ if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
+ # Request Entity Too Large
+ self.close_connection = True
+ buf.append("Connection: close\r\n")
+
+ buf.append("\r\n")
+ if msg:
+ buf.append(msg)
+
+ try:
+ self.wfile.sendall("".join(buf))
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+
+ def start_response(self, status, headers, exc_info = None):
+ """WSGI callable to begin the HTTP response."""
+ # "The application may call start_response more than once,
+ # if and only if the exc_info argument is provided."
+ if self.started_response and not exc_info:
+ raise AssertionError("WSGI start_response called a second "
+ "time with no exc_info.")
+
+ # "if exc_info is provided, and the HTTP headers have already been
+ # sent, start_response must raise an error, and should raise the
+ # exc_info tuple."
+ if self.sent_headers:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None
+
+ self.started_response = True
+ self.status = status
+ self.outheaders.extend(headers)
+ return self.write
+
+ def write(self, chunk):
+ """WSGI callable to write unbuffered data to the client.
+
+ This method is also used internally by start_response (to write
+ data from the iterable returned by the WSGI application).
+ """
+ if not self.started_response:
+ raise AssertionError("WSGI write called before start_response.")
+
+ if not self.sent_headers:
+ self.sent_headers = True
+ self.send_headers()
+
+ if self.chunked_write and chunk:
+ buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
+ self.wfile.sendall("".join(buf))
+ else:
+ self.wfile.sendall(chunk)
+
+ def send_headers(self):
+ """Assert, process, and send the HTTP response message-headers."""
+ hkeys = [key.lower() for key, value in self.outheaders]
+ status = int(self.status[:3])
+
+ if status == 413:
+ # Request Entity Too Large. Close conn to avoid garbage.
+ self.close_connection = True
+ elif "content-length" not in hkeys:
+ # "All 1xx (informational), 204 (no content),
+ # and 304 (not modified) responses MUST NOT
+ # include a message-body." So no point chunking.
+ if status < 200 or status in (204, 205, 304):
+ pass
+ else:
+ if (self.response_protocol == 'HTTP/1.1'
+ and self.environ["REQUEST_METHOD"] != 'HEAD'):
+ # Use the chunked transfer-coding
+ self.chunked_write = True
+ self.outheaders.append(("Transfer-Encoding", "chunked"))
+ else:
+ # Closing the conn is the only way to determine len.
+ self.close_connection = True
+
+ if "connection" not in hkeys:
+ if self.response_protocol == 'HTTP/1.1':
+ # Both server and client are HTTP/1.1 or better
+ if self.close_connection:
+ self.outheaders.append(("Connection", "close"))
+ else:
+ # Server and/or client are HTTP/1.0
+ if not self.close_connection:
+ self.outheaders.append(("Connection", "Keep-Alive"))
+
+ if (not self.close_connection) and (not self.chunked_read):
+ # Read any remaining request body data on the socket.
+ # "If an origin server receives a request that does not include an
+ # Expect request-header field with the "100-continue" expectation,
+ # the request includes a request body, and the server responds
+ # with a final status code before reading the entire request body
+ # from the transport connection, then the server SHOULD NOT close
+ # the transport connection until it has read the entire request,
+ # or until the client closes the connection. Otherwise, the client
+ # might not reliably receive the response message. However, this
+ # requirement is not be construed as preventing a server from
+ # defending itself against denial-of-service attacks, or from
+ # badly broken client implementations."
+ size = self.rfile.maxlen - self.rfile.bytes_read
+ if size > 0:
+ self.rfile.read(size)
+
+ if "date" not in hkeys:
+ self.outheaders.append(("Date", rfc822.formatdate()))
+
+ if "server" not in hkeys:
+ self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
+
+ buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
+ try:
+ buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
+ except TypeError:
+ if not isinstance(k, str):
+ raise TypeError("WSGI response header key %r is not a string.")
+ if not isinstance(v, str):
+ raise TypeError("WSGI response header value %r is not a string.")
+ else:
+ raise
+ buf.append("\r\n")
+ self.wfile.sendall("".join(buf))
+
+
+class NoSSLError(Exception):
+ """Exception raised when a client speaks HTTP to an HTTPS socket."""
+ pass
+
+
+class FatalSSLAlert(Exception):
+ """Exception raised when the SSL implementation signals a fatal alert."""
+ pass
+
+
+if not _fileobject_uses_str_type:
+ class CP_fileobject(socket._fileobject):
+ """Faux file object attached to a socket object."""
+
+ def sendall(self, data):
+ """Sendall for non-blocking sockets."""
+ while data:
+ try:
+ bytes_sent = self.send(data)
+ data = data[bytes_sent:]
+ except socket.error, e:
+ if e.args[0] not in socket_errors_nonblocking:
+ raise
+
+ def send(self, data):
+ return self._sock.send(data)
+
+ def flush(self):
+ if self._wbuf:
+ buffer = "".join(self._wbuf)
+ self._wbuf = []
+ self.sendall(buffer)
+
+ def recv(self, size):
+ while True:
+ try:
+ return self._sock.recv(size)
+ except socket.error, e:
+ if (e.args[0] not in socket_errors_nonblocking
+ and e.args[0] not in socket_error_eintr):
+ raise
+
+ def read(self, size=-1):
+ # Use max, disallow tiny reads in a loop as they are very inefficient.
+ # We never leave read() with any leftover data from a new recv() call
+ # in our internal buffer.
+ rbufsize = max(self._rbufsize, self.default_bufsize)
+ # Our use of StringIO rather than lists of string objects returned by
+ # recv() minimizes memory usage and fragmentation that occurs when
+ # rbufsize is large compared to the typical return value of recv().
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if size < 0:
+ # Read until EOF
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(rbufsize)
+ if not data:
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ buf_len = buf.tell()
+ if buf_len >= size:
+ # Already have size bytes in our buffer? Extract and return.
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ left = size - buf_len
+ # recv() will malloc the amount of memory given as its
+ # parameter even though it often returns much less data
+ # than that. The returned data string is short lived
+ # as we copy it into a StringIO and free it. This avoids
+ # fragmentation issues on many platforms.
+ data = self.recv(left)
+ if not data:
+ break
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid buffer data copies when:
+ # - We have no data in our buffer.
+ # AND
+ # - Our call to recv returned exactly the
+ # number of bytes we were asked to read.
+ return data
+ if n == left:
+ buf.write(data)
+ del data # explicit free
+ break
+ assert n <= left, "recv(%d) returned %d bytes" % (left, n)
+ buf.write(data)
+ buf_len += n
+ del data # explicit free
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+ def readline(self, size=-1):
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if buf.tell() > 0:
+ # check if we already have it in our buffer
+ buf.seek(0)
+ bline = buf.readline(size)
+ if bline.endswith('\n') or len(bline) == size:
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return bline
+ del bline
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ buf.seek(0)
+ buffers = [buf.read()]
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ data = None
+ recv = self.recv
+ while data != "\n":
+ data = recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+
+ buf.seek(0, 2) # seek end
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ buf.write(data[:nl])
+ self._rbuf.write(data[nl:])
+ del data
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ buf.seek(0, 2) # seek end
+ buf_len = buf.tell()
+ if buf_len >= size:
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ left = size - buf_len
+ # did we just receive a newline?
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ # save the excess data to _rbuf
+ self._rbuf.write(data[nl:])
+ if buf_len:
+ buf.write(data[:nl])
+ break
+ else:
+ # Shortcut. Avoid data copy through buf when returning
+ # a substring of our first recv().
+ return data[:nl]
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid data copy through buf when
+ # returning exactly all of our first recv().
+ return data
+ if n >= left:
+ buf.write(data[:left])
+ self._rbuf.write(data[left:])
+ break
+ buf.write(data)
+ buf_len += n
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+else:
+ class CP_fileobject(socket._fileobject):
+ """Faux file object attached to a socket object."""
+
+ def sendall(self, data):
+ """Sendall for non-blocking sockets."""
+ while data:
+ try:
+ bytes_sent = self.send(data)
+ data = data[bytes_sent:]
+ except socket.error, e:
+ if e.args[0] not in socket_errors_nonblocking:
+ raise
+
+ def send(self, data):
+ return self._sock.send(data)
+
+ def flush(self):
+ if self._wbuf:
+ buffer = "".join(self._wbuf)
+ self._wbuf = []
+ self.sendall(buffer)
+
+ def recv(self, size):
+ while True:
+ try:
+ return self._sock.recv(size)
+ except socket.error, e:
+ if (e.args[0] not in socket_errors_nonblocking
+ and e.args[0] not in socket_error_eintr):
+ raise
+
+ def read(self, size=-1):
+ if size < 0:
+ # Read until EOF
+ buffers = [self._rbuf]
+ self._rbuf = ""
+ if self._rbufsize <= 1:
+ recv_size = self.default_bufsize
+ else:
+ recv_size = self._rbufsize
+
+ while True:
+ data = self.recv(recv_size)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ data = self._rbuf
+ buf_len = len(data)
+ if buf_len >= size:
+ self._rbuf = data[size:]
+ return data[:size]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ left = size - buf_len
+ recv_size = max(self._rbufsize, left)
+ data = self.recv(recv_size)
+ if not data:
+ break
+ buffers.append(data)
+ n = len(data)
+ if n >= left:
+ self._rbuf = data[left:]
+ buffers[-1] = data[:left]
+ break
+ buf_len += n
+ return "".join(buffers)
+
+ def readline(self, size=-1):
+ data = self._rbuf
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ assert data == ""
+ buffers = []
+ while data != "\n":
+ data = self.recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ return data[:nl]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ buffers.append(data)
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ buffers[-1] = data[:nl]
+ break
+ return "".join(buffers)
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ nl = data.find('\n', 0, size)
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ return data[:nl]
+ buf_len = len(data)
+ if buf_len >= size:
+ self._rbuf = data[size:]
+ return data[:size]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ buffers.append(data)
+ left = size - buf_len
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ buffers[-1] = data[:nl]
+ break
+ n = len(data)
+ if n >= left:
+ self._rbuf = data[left:]
+ buffers[-1] = data[:left]
+ break
+ buf_len += n
+ return "".join(buffers)
+
+
+class SSL_fileobject(CP_fileobject):
+ """SSL file object attached to a socket object."""
+
+ ssl_timeout = 3
+ ssl_retry = .01
+
+ def _safe_call(self, is_reader, call, *args, **kwargs):
+ """Wrap the given call with SSL error-trapping.
+
+ is_reader: if False EOF errors will be raised. If True, EOF errors
+ will return "" (to emulate normal sockets).
+ """
+ start = time.time()
+ while True:
+ try:
+ return call(*args, **kwargs)
+ except SSL.WantReadError:
+ # Sleep and try again. This is dangerous, because it means
+ # the rest of the stack has no way of differentiating
+ # between a "new handshake" error and "client dropped".
+ # Note this isn't an endless loop: there's a timeout below.
+ time.sleep(self.ssl_retry)
+ except SSL.WantWriteError:
+ time.sleep(self.ssl_retry)
+ except SSL.SysCallError, e:
+ if is_reader and e.args == (-1, 'Unexpected EOF'):
+ return ""
+
+ errnum = e.args[0]
+ if is_reader and errnum in socket_errors_to_ignore:
+ return ""
+ raise socket.error(errnum)
+ except SSL.Error, e:
+ if is_reader and e.args == (-1, 'Unexpected EOF'):
+ return ""
+
+ thirdarg = None
+ try:
+ thirdarg = e.args[0][0][2]
+ except IndexError:
+ pass
+
+ if thirdarg == 'http request':
+ # The client is talking HTTP to an HTTPS server.
+ raise NoSSLError()
+ raise FatalSSLAlert(*e.args)
+ except:
+ raise
+
+ if time.time() - start > self.ssl_timeout:
+ raise socket.timeout("timed out")
+
+ def recv(self, *args, **kwargs):
+ buf = []
+ r = super(SSL_fileobject, self).recv
+ while True:
+ data = self._safe_call(True, r, *args, **kwargs)
+ buf.append(data)
+ p = self._sock.pending()
+ if not p:
+ return "".join(buf)
+
+ def sendall(self, *args, **kwargs):
+ return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
+
+ def send(self, *args, **kwargs):
+ return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
+
+
+class HTTPConnection(object):
+ """An HTTP connection (active socket).
+
+ socket: the raw socket object (usually TCP) for this connection.
+ wsgi_app: the WSGI application for this server/connection.
+ environ: a WSGI environ template. This will be copied for each request.
+
+ rfile: a fileobject for reading from the socket.
+ send: a function for writing (+ flush) to the socket.
+ """
+
+ rbufsize = -1
+ RequestHandlerClass = HTTPRequest
+ environ = {"wsgi.version": (1, 0),
+ "wsgi.url_scheme": "http",
+ "wsgi.multithread": True,
+ "wsgi.multiprocess": False,
+ "wsgi.run_once": False,
+ "wsgi.errors": sys.stderr,
+ }
+
+ def __init__(self, sock, wsgi_app, environ):
+ self.socket = sock
+ self.wsgi_app = wsgi_app
+
+ # Copy the class environ into self.
+ self.environ = self.environ.copy()
+ self.environ.update(environ)
+
+ if SSL and isinstance(sock, SSL.ConnectionType):
+ timeout = sock.gettimeout()
+ self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
+ self.rfile.ssl_timeout = timeout
+ self.wfile = SSL_fileobject(sock, "wb", -1)
+ self.wfile.ssl_timeout = timeout
+ else:
+ self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
+ self.wfile = CP_fileobject(sock, "wb", -1)
+
+ # Wrap wsgi.input but not HTTPConnection.rfile itself.
+ # We're also not setting maxlen yet; we'll do that separately
+ # for headers and body for each iteration of self.communicate
+ # (if maxlen is 0 the wrapper doesn't check length).
+ self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
+
+ def communicate(self):
+ """Read each request and respond appropriately."""
+ try:
+ while True:
+ # (re)set req to None so that if something goes wrong in
+ # the RequestHandlerClass constructor, the error doesn't
+ # get written to the previous request.
+ req = None
+ req = self.RequestHandlerClass(self.wfile, self.environ,
+ self.wsgi_app)
+
+ # This order of operations should guarantee correct pipelining.
+ req.parse_request()
+ if not req.ready:
+ return
+
+ req.respond()
+ if req.close_connection:
+ return
+
+ except socket.error, e:
+ errnum = e.args[0]
+ if errnum == 'timed out':
+ if req and not req.sent_headers:
+ req.simple_response("408 Request Timeout")
+ elif errnum not in socket_errors_to_ignore:
+ if req and not req.sent_headers:
+ req.simple_response("500 Internal Server Error",
+ format_exc())
+ return
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except FatalSSLAlert, e:
+ # Close the connection.
+ return
+ except NoSSLError:
+ if req and not req.sent_headers:
+ # Unwrap our wfile
+ req.wfile = CP_fileobject(self.socket._sock, "wb", -1)
+ req.simple_response("400 Bad Request",
+ "The client sent a plain HTTP request, but "
+ "this server only speaks HTTPS on this port.")
+ self.linger = True
+ except Exception, e:
+ if req and not req.sent_headers:
+ req.simple_response("500 Internal Server Error", format_exc())
+
+ linger = False
+
+ def close(self):
+ """Close the socket underlying this connection."""
+ self.rfile.close()
+
+ if not self.linger:
+ # Python's socket module does NOT call close on the kernel socket
+ # when you call socket.close(). We do so manually here because we
+ # want this server to send a FIN TCP segment immediately. Note this
+ # must be called *before* calling socket.close(), because the latter
+ # drops its reference to the kernel socket.
+ self.socket._sock.close()
+ self.socket.close()
+ else:
+ # On the other hand, sometimes we want to hang around for a bit
+ # to make sure the client has a chance to read our entire
+ # response. Skipping the close() calls here delays the FIN
+ # packet until the socket object is garbage-collected later.
+ # Someday, perhaps, we'll do the full lingering_close that
+ # Apache does, but not today.
+ pass
+
+
+def format_exc(limit=None):
+ """Like print_exc() but return a string. Backport for Python 2.3."""
+ try:
+ etype, value, tb = sys.exc_info()
+ return ''.join(traceback.format_exception(etype, value, tb, limit))
+ finally:
+ etype = value = tb = None
+
+
+_SHUTDOWNREQUEST = None
+
+class WorkerThread(threading.Thread):
+ """Thread which continuously polls a Queue for Connection objects.
+
+ server: the HTTP Server which spawned this thread, and which owns the
+ Queue and is placing active connections into it.
+ ready: a simple flag for the calling server to know when this thread
+ has begun polling the Queue.
+
+ Due to the timing issues of polling a Queue, a WorkerThread does not
+ check its own 'ready' flag after it has started. To stop the thread,
+ it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
+ (one for each running WorkerThread).
+ """
+
+ conn = None
+
+ def __init__(self, server):
+ self.ready = False
+ self.server = server
+ threading.Thread.__init__(self)
+
+ def run(self):
+ try:
+ self.ready = True
+ while True:
+ conn = self.server.requests.get()
+ if conn is _SHUTDOWNREQUEST:
+ return
+
+ self.conn = conn
+ try:
+ conn.communicate()
+ finally:
+ conn.close()
+ self.conn = None
+ except (KeyboardInterrupt, SystemExit), exc:
+ self.server.interrupt = exc
+
+
+class ThreadPool(object):
+ """A Request Queue for the CherryPyWSGIServer which pools threads.
+
+ ThreadPool objects must provide min, get(), put(obj), start()
+ and stop(timeout) attributes.
+ """
+
+ def __init__(self, server, min=10, max=-1):
+ self.server = server
+ self.min = min
+ self.max = max
+ self._threads = []
+ self._queue = Queue.Queue()
+ self.get = self._queue.get
+
+ def start(self):
+ """Start the pool of threads."""
+ for i in xrange(self.min):
+ self._threads.append(WorkerThread(self.server))
+ for worker in self._threads:
+ worker.setName("CP WSGIServer " + worker.getName())
+ worker.start()
+ for worker in self._threads:
+ while not worker.ready:
+ time.sleep(.1)
+
+ def _get_idle(self):
+ """Number of worker threads which are idle. Read-only."""
+ return len([t for t in self._threads if t.conn is None])
+ idle = property(_get_idle, doc=_get_idle.__doc__)
+
+ def put(self, obj):
+ self._queue.put(obj)
+ if obj is _SHUTDOWNREQUEST:
+ return
+
+ def grow(self, amount):
+ """Spawn new worker threads (not above self.max)."""
+ for i in xrange(amount):
+ if self.max > 0 and len(self._threads) >= self.max:
+ break
+ worker = WorkerThread(self.server)
+ worker.setName("CP WSGIServer " + worker.getName())
+ self._threads.append(worker)
+ worker.start()
+
+ def shrink(self, amount):
+ """Kill off worker threads (not below self.min)."""
+ # Grow/shrink the pool if necessary.
+ # Remove any dead threads from our list
+ for t in self._threads:
+ if not t.isAlive():
+ self._threads.remove(t)
+ amount -= 1
+
+ if amount > 0:
+ for i in xrange(min(amount, len(self._threads) - self.min)):
+ # Put a number of shutdown requests on the queue equal
+ # to 'amount'. Once each of those is processed by a worker,
+ # that worker will terminate and be culled from our list
+ # in self.put.
+ self._queue.put(_SHUTDOWNREQUEST)
+
+ def stop(self, timeout=5):
+ # Must shut down threads here so the code that calls
+ # this method can know when all threads are stopped.
+ for worker in self._threads:
+ self._queue.put(_SHUTDOWNREQUEST)
+
+ # Don't join currentThread (when stop is called inside a request).
+ current = threading.currentThread()
+ while self._threads:
+ worker = self._threads.pop()
+ if worker is not current and worker.isAlive():
+ try:
+ if timeout is None or timeout < 0:
+ worker.join()
+ else:
+ worker.join(timeout)
+ if worker.isAlive():
+ # We exhausted the timeout.
+ # Forcibly shut down the socket.
+ c = worker.conn
+ if c and not c.rfile.closed:
+ if SSL and isinstance(c.socket, SSL.ConnectionType):
+ # pyOpenSSL.socket.shutdown takes no args
+ c.socket.shutdown()
+ else:
+ c.socket.shutdown(socket.SHUT_RD)
+ worker.join()
+ except (AssertionError,
+ # Ignore repeated Ctrl-C.
+ # See http://www.cherrypy.org/ticket/691.
+ KeyboardInterrupt), exc1:
+ pass
+
+
+
+class SSLConnection:
+ """A thread-safe wrapper for an SSL.Connection.
+
+ *args: the arguments to create the wrapped SSL.Connection(*args).
+ """
+
+ def __init__(self, *args):
+ self._ssl_conn = SSL.Connection(*args)
+ self._lock = threading.RLock()
+
+ for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
+ 'renegotiate', 'bind', 'listen', 'connect', 'accept',
+ 'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
+ 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
+ 'makefile', 'get_app_data', 'set_app_data', 'state_string',
+ 'sock_shutdown', 'get_peer_certificate', 'want_read',
+ 'want_write', 'set_connect_state', 'set_accept_state',
+ 'connect_ex', 'sendall', 'settimeout'):
+ exec """def %s(self, *args):
+ self._lock.acquire()
+ try:
+ return self._ssl_conn.%s(*args)
+ finally:
+ self._lock.release()
+""" % (f, f)
+
+
+try:
+ import fcntl
+except ImportError:
+ try:
+ from ctypes import windll, WinError
+ except ImportError:
+ def prevent_socket_inheritance(sock):
+ """Dummy function, since neither fcntl nor ctypes are available."""
+ pass
+ else:
+ def prevent_socket_inheritance(sock):
+ """Mark the given socket fd as non-inheritable (Windows)."""
+ if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
+ raise WinError()
+else:
+ def prevent_socket_inheritance(sock):
+ """Mark the given socket fd as non-inheritable (POSIX)."""
+ fd = sock.fileno()
+ old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
+
+
+class CherryPyWSGIServer(object):
+ """An HTTP server for WSGI.
+
+ bind_addr: The interface on which to listen for connections.
+ For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+ or IPv6 address, or any valid hostname. The string 'localhost' is a
+ synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+ The string '0.0.0.0' is a special IPv4 entry meaning "any active
+ interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+ IPv6. The empty string or None are not allowed.
+
+ For UNIX sockets, supply the filename as a string.
+ wsgi_app: the WSGI 'application callable'; multiple WSGI applications
+ may be passed as (path_prefix, app) pairs.
+ numthreads: the number of worker threads to create (default 10).
+ server_name: the string to set for WSGI's SERVER_NAME environ entry.
+ Defaults to socket.gethostname().
+ max: the maximum number of queued requests (defaults to -1 = no limit).
+ request_queue_size: the 'backlog' argument to socket.listen();
+ specifies the maximum number of queued connections (default 5).
+ timeout: the timeout in seconds for accepted connections (default 10).
+
+ nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
+ option.
+
+ protocol: the version string to write in the Status-Line of all
+ HTTP responses. For example, "HTTP/1.1" (the default). This
+ also limits the supported features used in the response.
+
+
+ SSL/HTTPS
+ ---------
+ The OpenSSL module must be importable for SSL functionality.
+ You can obtain it from http://pyopenssl.sourceforge.net/
+
+ ssl_certificate: the filename of the server SSL certificate.
+ ssl_privatekey: the filename of the server's private key file.
+
+ If either of these is None (both are None by default), this server
+ will not use SSL. If both are given and are valid, they will be read
+ on server start and used in the SSL context for the listening socket.
+ """
+
+ protocol = "HTTP/1.1"
+ _bind_addr = "127.0.0.1"
+ version = "CherryPy/3.1.2"
+ ready = False
+ _interrupt = None
+
+ nodelay = True
+
+ ConnectionClass = HTTPConnection
+ environ = {}
+
+ # Paths to certificate and private key files
+ ssl_certificate = None
+ ssl_private_key = None
+
+ def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
+ max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
+ self.requests = ThreadPool(self, min=numthreads or 1, max=max)
+
+ if callable(wsgi_app):
+ # We've been handed a single wsgi_app, in CP-2.1 style.
+ # Assume it's mounted at "".
+ self.wsgi_app = wsgi_app
+ else:
+ # We've been handed a list of (path_prefix, wsgi_app) tuples,
+ # so that the server can call different wsgi_apps, and also
+ # correctly set SCRIPT_NAME.
+ warnings.warn("The ability to pass multiple apps is deprecated "
+ "and will be removed in 3.2. You should explicitly "
+ "include a WSGIPathInfoDispatcher instead.",
+ DeprecationWarning)
+ self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
+
+ self.bind_addr = bind_addr
+ if not server_name:
+ server_name = socket.gethostname()
+ self.server_name = server_name
+ self.request_queue_size = request_queue_size
+
+ self.timeout = timeout
+ self.shutdown_timeout = shutdown_timeout
+
+ def _get_numthreads(self):
+ return self.requests.min
+ def _set_numthreads(self, value):
+ self.requests.min = value
+ numthreads = property(_get_numthreads, _set_numthreads)
+
+ def __str__(self):
+ return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
+ self.bind_addr)
+
+ def _get_bind_addr(self):
+ return self._bind_addr
+ def _set_bind_addr(self, value):
+ if isinstance(value, tuple) and value[0] in ('', None):
+ # Despite the socket module docs, using '' does not
+ # allow AI_PASSIVE to work. Passing None instead
+ # returns '0.0.0.0' like we want. In other words:
+ # host AI_PASSIVE result
+ # '' Y 192.168.x.y
+ # '' N 192.168.x.y
+ # None Y 0.0.0.0
+ # None N 127.0.0.1
+ # But since you can get the same effect with an explicit
+ # '0.0.0.0', we deny both the empty string and None as values.
+ raise ValueError("Host values of '' or None are not allowed. "
+ "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
+ "to listen on all active interfaces.")
+ self._bind_addr = value
+ bind_addr = property(_get_bind_addr, _set_bind_addr,
+ doc="""The interface on which to listen for connections.
+
+ For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+ or IPv6 address, or any valid hostname. The string 'localhost' is a
+ synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+ The string '0.0.0.0' is a special IPv4 entry meaning "any active
+ interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+ IPv6. The empty string or None are not allowed.
+
+ For UNIX sockets, supply the filename as a string.""")
+
+ def start(self):
+ """Run the server forever."""
+ # We don't have to trap KeyboardInterrupt or SystemExit here,
+ # because cherrpy.server already does so, calling self.stop() for us.
+ # If you're using this server with another framework, you should
+ # trap those exceptions in whatever code block calls start().
+ self._interrupt = None
+
+ # Select the appropriate socket
+ if isinstance(self.bind_addr, basestring):
+ # AF_UNIX socket
+
+ # So we can reuse the socket...
+ try: os.unlink(self.bind_addr)
+ except: pass
+
+ # So everyone can access the socket...
+ try: os.chmod(self.bind_addr, 0777)
+ except: pass
+
+ info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+ else:
+ # AF_INET or AF_INET6 socket
+ # Get the correct address family for our host (allows IPv6 addresses)
+ host, port = self.bind_addr
+ try:
+ info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
+ except socket.gaierror:
+ # Probably a DNS issue. Assume IPv4.
+ info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+
+ self.socket = None
+ msg = "No socket could be created"
+ for res in info:
+ af, socktype, proto, canonname, sa = res
+ try:
+ self.bind(af, socktype, proto)
+ except socket.error, msg:
+ if self.socket:
+ self.socket.close()
+ self.socket = None
+ continue
+ break
+ if not self.socket:
+ raise socket.error, msg
+
+ # Timeout so KeyboardInterrupt can be caught on Win32
+ self.socket.settimeout(1)
+ self.socket.listen(self.request_queue_size)
+
+ # Create worker threads
+ self.requests.start()
+
+ self.ready = True
+ while self.ready:
+ self.tick()
+ if self.interrupt:
+ while self.interrupt is True:
+ # Wait for self.stop() to complete. See _set_interrupt.
+ time.sleep(0.1)
+ if self.interrupt:
+ raise self.interrupt
+
+ def bind(self, family, type, proto=0):
+ """Create (or recreate) the actual socket object."""
+ self.socket = socket.socket(family, type, proto)
+ prevent_socket_inheritance(self.socket)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if self.nodelay:
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ if self.ssl_certificate and self.ssl_private_key:
+ if SSL is None:
+ raise ImportError("You must install pyOpenSSL to use HTTPS.")
+
+ # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(self.ssl_private_key)
+ ctx.use_certificate_file(self.ssl_certificate)
+ self.socket = SSLConnection(ctx, self.socket)
+ self.populate_ssl_environ()
+
+ # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
+ # activate dual-stack. See http://www.cherrypy.org/ticket/871.
+ if (not isinstance(self.bind_addr, basestring)
+ and self.bind_addr[0] == '::' and family == socket.AF_INET6):
+ try:
+ self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+ except (AttributeError, socket.error):
+ # Apparently, the socket option is not available in
+ # this machine's TCP stack
+ pass
+
+ self.socket.bind(self.bind_addr)
+
+ def tick(self):
+ """Accept a new connection and put it on the Queue."""
+ try:
+ s, addr = self.socket.accept()
+ prevent_socket_inheritance(s)
+ if not self.ready:
+ return
+ if hasattr(s, 'settimeout'):
+ s.settimeout(self.timeout)
+
+ environ = self.environ.copy()
+ # SERVER_SOFTWARE is common for IIS. It's also helpful for
+ # us to pass a default value for the "Server" response header.
+ if environ.get("SERVER_SOFTWARE") is None:
+ environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
+ # set a non-standard environ entry so the WSGI app can know what
+ # the *real* server protocol is (and what features to support).
+ # See http://www.faqs.org/rfcs/rfc2145.html.
+ environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
+ environ["SERVER_NAME"] = self.server_name
+
+ if isinstance(self.bind_addr, basestring):
+ # AF_UNIX. This isn't really allowed by WSGI, which doesn't
+ # address unix domain sockets. But it's better than nothing.
+ environ["SERVER_PORT"] = ""
+ else:
+ environ["SERVER_PORT"] = str(self.bind_addr[1])
+ # optional values
+ # Until we do DNS lookups, omit REMOTE_HOST
+ environ["REMOTE_ADDR"] = addr[0]
+ environ["REMOTE_PORT"] = str(addr[1])
+
+ conn = self.ConnectionClass(s, self.wsgi_app, environ)
+ self.requests.put(conn)
+ except socket.timeout:
+ # The only reason for the timeout in start() is so we can
+ # notice keyboard interrupts on Win32, which don't interrupt
+ # accept() by default
+ return
+ except socket.error, x:
+ if x.args[0] in socket_error_eintr:
+ # I *think* this is right. EINTR should occur when a signal
+ # is received during the accept() call; all docs say retry
+ # the call, and I *think* I'm reading it right that Python
+ # will then go ahead and poll for and handle the signal
+ # elsewhere. See http://www.cherrypy.org/ticket/707.
+ return
+ if x.args[0] in socket_errors_nonblocking:
+ # Just try again. See http://www.cherrypy.org/ticket/479.
+ return
+ if x.args[0] in socket_errors_to_ignore:
+ # Our socket was closed.
+ # See http://www.cherrypy.org/ticket/686.
+ return
+ raise
+
+ def _get_interrupt(self):
+ return self._interrupt
+ def _set_interrupt(self, interrupt):
+ self._interrupt = True
+ self.stop()
+ self._interrupt = interrupt
+ interrupt = property(_get_interrupt, _set_interrupt,
+ doc="Set this to an Exception instance to "
+ "interrupt the server.")
+
+ def stop(self):
+ """Gracefully shutdown a server that is serving forever."""
+ self.ready = False
+
+ sock = getattr(self, "socket", None)
+ if sock:
+ if not isinstance(self.bind_addr, basestring):
+ # Touch our own socket to make accept() return immediately.
+ try:
+ host, port = sock.getsockname()[:2]
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+ else:
+ # Note that we're explicitly NOT using AI_PASSIVE,
+ # here, because we want an actual IP to touch.
+ # localhost won't work if we've bound to a public IP,
+ # but it will if we bound to '0.0.0.0' (INADDR_ANY).
+ for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ s = None
+ try:
+ s = socket.socket(af, socktype, proto)
+ # See http://groups.google.com/group/cherrypy-users/
+ # browse_frm/thread/bbfe5eb39c904fe0
+ s.settimeout(1.0)
+ s.connect((host, port))
+ s.close()
+ except socket.error:
+ if s:
+ s.close()
+ if hasattr(sock, "close"):
+ sock.close()
+ self.socket = None
+
+ self.requests.stop(self.shutdown_timeout)
+
+ def populate_ssl_environ(self):
+ """Create WSGI environ entries to be merged into each request."""
+ cert = open(self.ssl_certificate, 'rb').read()
+ cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+ ssl_environ = {
+ "wsgi.url_scheme": "https",
+ "HTTPS": "on",
+ # pyOpenSSL doesn't provide access to any of these AFAICT
+## 'SSL_PROTOCOL': 'SSLv2',
+## SSL_CIPHER string The cipher specification name
+## SSL_VERSION_INTERFACE string The mod_ssl program version
+## SSL_VERSION_LIBRARY string The OpenSSL program version
+ }
+
+ # Server certificate attributes
+ ssl_environ.update({
+ 'SSL_SERVER_M_VERSION': cert.get_version(),
+ 'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
+## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
+## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
+ })
+
+ for prefix, dn in [("I", cert.get_issuer()),
+ ("S", cert.get_subject())]:
+ # X509Name objects don't seem to have a way to get the
+ # complete DN string. Use str() and slice it instead,
+ # because str(dn) == "<X509Name object '/C=US/ST=...'>"
+ dnstr = str(dn)[18:-2]
+
+ wsgikey = 'SSL_SERVER_%s_DN' % prefix
+ ssl_environ[wsgikey] = dnstr
+
+ # The DN should be of the form: /k1=v1/k2=v2, but we must allow
+ # for any value to contain slashes itself (in a URL).
+ while dnstr:
+ pos = dnstr.rfind("=")
+ dnstr, value = dnstr[:pos], dnstr[pos + 1:]
+ pos = dnstr.rfind("/")
+ dnstr, key = dnstr[:pos], dnstr[pos + 1:]
+ if key and value:
+ wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
+ ssl_environ[wsgikey] = value
+
+ self.environ.update(ssl_environ)
+
diff --git a/pyload/network/Browser.py b/pyload/network/Browser.py
new file mode 100644
index 000000000..262adaebd
--- /dev/null
+++ b/pyload/network/Browser.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from logging import getLogger
+
+from HTTPRequest import HTTPRequest
+from HTTPDownload import HTTPDownload
+
+# @ Deprecated
+class Browser(object):
+ __slots__ = ("log", "options", "bucket", "cj", "_size", "http", "dl")
+
+ def __init__(self, bucket=None, options={}):
+ self.log = getLogger("log")
+
+ self.options = options #holds pycurl options
+ self.bucket = bucket
+
+ self.cj = None # needs to be set later
+ self._size = 0
+
+ self.renewHTTPRequest()
+ self.dl = None
+
+
+ def renewHTTPRequest(self):
+ if hasattr(self, "http"): self.http.close()
+ self.http = HTTPRequest(self.cj, self.options)
+
+ def setLastURL(self, val):
+ self.http.lastURL = val
+
+ # tunnel some attributes from HTTP Request to Browser
+ lastEffectiveURL = property(lambda self: self.http.lastEffectiveURL)
+ lastURL = property(lambda self: self.http.lastURL, setLastURL)
+ code = property(lambda self: self.http.code)
+ cookieJar = property(lambda self: self.cj)
+
+ def setCookieJar(self, cj):
+ self.cj = cj
+ self.http.cj = cj
+
+ @property
+ def speed(self):
+ if self.dl:
+ return self.dl.speed
+ return 0
+
+ @property
+ def size(self):
+ if self._size:
+ return self._size
+ if self.dl:
+ return self.dl.size
+ return 0
+
+ @property
+ def name(self):
+ if self.dl:
+ return self.dl.name
+ else:
+ return ""
+
+ @property
+ def arrived(self):
+ if self.dl:
+ return self.dl.arrived
+ return 0
+
+ @property
+ def percent(self):
+ if not self.size: return 0
+ return (self.arrived * 100) / self.size
+
+ def clearCookies(self):
+ if self.cj:
+ self.cj.clear()
+ self.http.clearCookies()
+
+ def clearReferer(self):
+ self.http.lastURL = None
+
+ def abortDownloads(self):
+ self.http.abort = True
+ if self.dl:
+ self._size = self.dl.size
+ self.dl.abort = True
+
+ def httpDownload(self, url, filename, get={}, post={}, ref=True, cookies=True, chunks=1, resume=False,
+ disposition=False):
+ """ this can also download ftp """
+ self._size = 0
+ self.dl = HTTPDownload(url, filename, get, post, self.lastEffectiveURL if ref else None,
+ self.cj if cookies else None, self.bucket, self.options, disposition)
+ name = self.dl.download(chunks, resume)
+ self._size = self.dl.size
+
+ self.dl = None
+
+ return name
+
+ def load(self, *args, **kwargs):
+ """ retrieves page """
+ return self.http.load(*args, **kwargs)
+
+ def putHeader(self, name, value):
+ """ add a header to the request """
+ self.http.putHeader(name, value)
+
+ def addAuth(self, pwd):
+ """Adds user and pw for http auth
+
+ :param pwd: string, user:password
+ """
+ self.options["auth"] = pwd
+ self.renewHTTPRequest() #we need a new request
+
+ def removeAuth(self):
+ if "auth" in self.options: del self.options["auth"]
+ self.renewHTTPRequest()
+
+ def setOption(self, name, value):
+ """Adds an option to the request, see HTTPRequest for existing ones"""
+ self.options[name] = value
+
+ def deleteOption(self, name):
+ if name in self.options: del self.options[name]
+
+ def clearHeaders(self):
+ self.http.clearHeaders()
+
+ def close(self):
+ """ cleanup """
+ if hasattr(self, "http"):
+ self.http.close()
+ del self.http
+ if hasattr(self, "dl"):
+ del self.dl
+ if hasattr(self, "cj"):
+ del self.cj
+
+if __name__ == "__main__":
+ browser = Browser()#proxies={"socks5": "localhost:5000"})
+ ip = "http://www.whatismyip.com/automation/n09230945.asp"
+ #browser.getPage("http://google.com/search?q=bar")
+ #browser.getPage("https://encrypted.google.com/")
+ #print browser.getPage(ip)
+ #print browser.getRedirectLocation("http://google.com/")
+ #browser.getPage("https://encrypted.google.com/")
+ #browser.getPage("http://google.com/search?q=bar")
+
+ browser.httpDownload("http://speedtest.netcologne.de/test_10mb.bin", "test_10mb.bin")
+
diff --git a/pyload/network/Bucket.py b/pyload/network/Bucket.py
new file mode 100644
index 000000000..db67faa4a
--- /dev/null
+++ b/pyload/network/Bucket.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from time import time
+from threading import Lock
+
+# 10kb minimum rate
+MIN_RATE = 10240
+
+class Bucket:
+ def __init__(self):
+ self.rate = 0 # bytes per second, maximum targeted throughput
+ self.tokens = 0
+ self.timestamp = time()
+ self.lock = Lock()
+
+ def __nonzero__(self):
+ return False if self.rate < MIN_RATE else True
+
+ def setRate(self, rate):
+ self.lock.acquire()
+ self.rate = int(rate)
+ self.lock.release()
+
+ def consumed(self, amount):
+ """ return the time the process has to sleep, after it consumed a specified amount """
+ if self.rate < MIN_RATE: return 0 #May become unresponsive otherwise
+ self.lock.acquire()
+
+ self.calc_tokens()
+ self.tokens -= amount
+
+ if self.tokens < 0:
+ time = -self.tokens/float(self.rate)
+ else:
+ time = 0
+
+ self.lock.release()
+ return time
+
+ def calc_tokens(self):
+ if self.tokens < self.rate:
+ now = time()
+ delta = self.rate * (now - self.timestamp)
+ self.tokens = min(self.rate, self.tokens + delta)
+ self.timestamp = now
+
diff --git a/pyload/network/CookieJar.py b/pyload/network/CookieJar.py
new file mode 100644
index 000000000..3d39c66b9
--- /dev/null
+++ b/pyload/network/CookieJar.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+from Cookie import SimpleCookie
+
+class CookieJar(SimpleCookie):
+
+ def getCookie(self, name):
+ return self[name].value
+
+ def setCookie(self, domain, name, value, path="/", exp=None, secure="FALSE"):
+ if not exp: exp = time() + 3600 * 24 * 180
+
+ self[name] = value
+ self[name]["domain"] = domain
+ self[name]["path"] = path
+ self[name]["expires"] = exp
+ if secure == "TRUE":
+ self[name]["secure"] = secure
diff --git a/pyload/network/HTTPChunk.py b/pyload/network/HTTPChunk.py
new file mode 100644
index 000000000..4389aef28
--- /dev/null
+++ b/pyload/network/HTTPChunk.py
@@ -0,0 +1,298 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+from os import remove, stat, fsync
+from os.path import exists
+from time import sleep
+from re import search
+
+import codecs
+import pycurl
+
+from pyload.utils import remove_chars
+from pyload.utils.fs import fs_encode
+
+from HTTPRequest import HTTPRequest
+
+class WrongFormat(Exception):
+ pass
+
+
+class ChunkInfo():
+ def __init__(self, name):
+ self.name = unicode(name)
+ self.size = 0
+ self.resume = False
+ self.chunks = []
+
+ def __repr__(self):
+ ret = "ChunkInfo: %s, %s\n" % (self.name, self.size)
+ for i, c in enumerate(self.chunks):
+ ret += "%s# %s\n" % (i, c[1])
+
+ return ret
+
+ def setSize(self, size):
+ self.size = int(size)
+
+ def addChunk(self, name, range):
+ self.chunks.append((name, range))
+
+ def clear(self):
+ self.chunks = []
+
+ def createChunks(self, chunks):
+ self.clear()
+ chunk_size = self.size / chunks
+
+ current = 0
+ for i in range(chunks):
+ end = self.size - 1 if (i == chunks - 1) else current + chunk_size
+ self.addChunk("%s.chunk%s" % (self.name, i), (current, end))
+ current += chunk_size + 1
+
+
+ def save(self):
+ fs_name = fs_encode("%s.chunks" % self.name)
+ fh = codecs.open(fs_name, "w", "utf_8")
+ fh.write("name:%s\n" % self.name)
+ fh.write("size:%s\n" % self.size)
+ for i, c in enumerate(self.chunks):
+ fh.write("#%d:\n" % i)
+ fh.write("\tname:%s\n" % c[0])
+ fh.write("\trange:%i-%i\n" % c[1])
+ fh.close()
+
+ @staticmethod
+ def load(name):
+ fs_name = fs_encode("%s.chunks" % name)
+ if not exists(fs_name):
+ raise IOError()
+ fh = codecs.open(fs_name, "r", "utf_8")
+ name = fh.readline()[:-1]
+ size = fh.readline()[:-1]
+ if name.startswith("name:") and size.startswith("size:"):
+ name = name[5:]
+ size = size[5:]
+ else:
+ fh.close()
+ raise WrongFormat()
+ ci = ChunkInfo(name)
+ ci.loaded = True
+ ci.setSize(size)
+ while True:
+ if not fh.readline(): #skip line
+ break
+ name = fh.readline()[1:-1]
+ range = fh.readline()[1:-1]
+ if name.startswith("name:") and range.startswith("range:"):
+ name = name[5:]
+ range = range[6:].split("-")
+ else:
+ raise WrongFormat()
+
+ ci.addChunk(name, (long(range[0]), long(range[1])))
+ fh.close()
+ return ci
+
+ def remove(self):
+ fs_name = fs_encode("%s.chunks" % self.name)
+ if exists(fs_name): remove(fs_name)
+
+ def getCount(self):
+ return len(self.chunks)
+
+ def getChunkName(self, index):
+ return self.chunks[index][0]
+
+ def getChunkRange(self, index):
+ return self.chunks[index][1]
+
+
+class HTTPChunk(HTTPRequest):
+ def __init__(self, id, parent, range=None, resume=False):
+ self.id = id
+ self.p = parent # HTTPDownload instance
+ self.range = range # tuple (start, end)
+ self.resume = resume
+ self.log = parent.log
+
+ self.size = range[1] - range[0] if range else -1
+ self.arrived = 0
+ self.lastURL = self.p.referer
+
+ self.c = pycurl.Curl()
+
+ self.header = ""
+ self.headerParsed = False #indicates if the header has been processed
+
+ self.fp = None #file handle
+
+ self.initHandle()
+ self.setInterface(self.p.options)
+
+ self.BOMChecked = False # check and remove byte order mark
+
+ self.rep = None
+
+ self.sleep = 0.000
+ self.lastSize = 0
+
+ def __repr__(self):
+ return "<HTTPChunk id=%d, size=%d, arrived=%d>" % (self.id, self.size, self.arrived)
+
+ @property
+ def cj(self):
+ return self.p.cj
+
+ def getHandle(self):
+ """ returns a Curl handle ready to use for perform/multiperform """
+
+ self.setRequestContext(self.p.url, self.p.get, self.p.post, self.p.referer, self.p.cj)
+ self.c.setopt(pycurl.WRITEFUNCTION, self.writeBody)
+ self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)
+
+ # request all bytes, since some servers in russia seems to have a defect arihmetic unit
+
+ fs_name = fs_encode(self.p.info.getChunkName(self.id))
+ if self.resume:
+ self.fp = open(fs_name, "ab")
+ self.arrived = self.fp.tell()
+ if not self.arrived:
+ self.arrived = stat(fs_name).st_size
+
+ if self.range:
+ #do nothing if chunk already finished
+ if self.arrived + self.range[0] >= self.range[1]: return None
+
+ if self.id == len(self.p.info.chunks) - 1: #as last chunk dont set end range, so we get everything
+ range = "%i-" % (self.arrived + self.range[0])
+ else:
+ range = "%i-%i" % (self.arrived + self.range[0], min(self.range[1] + 1, self.p.size - 1))
+
+ self.log.debug("Chunked resume with range %s" % range)
+ self.c.setopt(pycurl.RANGE, range)
+ else:
+ self.log.debug("Resume File from %i" % self.arrived)
+ self.c.setopt(pycurl.RESUME_FROM, self.arrived)
+
+ else:
+ if self.range:
+ if self.id == len(self.p.info.chunks) - 1: # see above
+ range = "%i-" % self.range[0]
+ else:
+ range = "%i-%i" % (self.range[0], min(self.range[1] + 1, self.p.size - 1))
+
+ self.log.debug("Chunked with range %s" % range)
+ self.c.setopt(pycurl.RANGE, range)
+
+ self.fp = open(fs_name, "wb")
+
+ return self.c
+
+ def writeHeader(self, buf):
+ self.header += buf
+ #@TODO forward headers?, this is possibly unneeded, when we just parse valid 200 headers
+ # as first chunk, we will parse the headers
+ if not self.range and self.header.endswith("\r\n\r\n"):
+ self.parseHeader()
+ elif not self.range and buf.startswith("150") and "data connection" in buf: #ftp file size parsing
+ size = search(r"(\d+) bytes", buf)
+ if size:
+ self.p.size = int(size.group(1))
+ self.p.chunkSupport = True
+
+ self.headerParsed = True
+
+ def writeBody(self, buf):
+ #ignore BOM, it confuses unrar
+ if not self.BOMChecked:
+ if [ord(b) for b in buf[:3]] == [239, 187, 191]:
+ buf = buf[3:]
+ self.BOMChecked = True
+
+ size = len(buf)
+
+ self.arrived += size
+
+ self.fp.write(buf)
+
+ if self.p.bucket:
+ sleep(self.p.bucket.consumed(size))
+ else:
+ # Avoid small buffers, increasing sleep time slowly if buffer size gets smaller
+ # otherwise reduce sleep time percentile (values are based on tests)
+ # So in general cpu time is saved without reducing bandwidth too much
+
+ if size < self.lastSize:
+ self.sleep += 0.002
+ else:
+ self.sleep *= 0.7
+
+ self.lastSize = size
+
+ sleep(self.sleep)
+
+ if self.range and self.arrived > self.size:
+ return 0 #close if we have enough data
+
+
+ def parseHeader(self):
+ """parse data from received header"""
+ for orgline in self.decodeResponse(self.header).splitlines():
+ line = orgline.strip().lower()
+ if line.startswith("accept-ranges") and "bytes" in line:
+ self.p.chunkSupport = True
+
+ if "content-disposition" in line:
+
+ m = search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", line)
+ if m:
+ name = remove_chars(m.groupdict()['name'], "\"';/").strip()
+ self.p._name = name
+ self.log.debug("Content-Disposition: %s" % name)
+
+ if not self.resume and line.startswith("content-length"):
+ self.p.size = int(line.split(":")[1])
+
+ self.headerParsed = True
+
+ def stop(self):
+ """The download will not proceed after next call of writeBody"""
+ self.range = [0,0]
+ self.size = 0
+
+ def resetRange(self):
+ """ Reset the range, so the download will load all data available """
+ self.range = None
+
+ def setRange(self, range):
+ self.range = range
+ self.size = range[1] - range[0]
+
+ def flushFile(self):
+ """ flush and close file """
+ self.fp.flush()
+ fsync(self.fp.fileno()) #make sure everything was written to disk
+ self.fp.close() #needs to be closed, or merging chunks will fail
+
+ def close(self):
+ """ closes everything, unusable after this """
+ if self.fp: self.fp.close()
+ self.c.close()
+ if hasattr(self, "p"): del self.p
diff --git a/pyload/network/HTTPDownload.py b/pyload/network/HTTPDownload.py
new file mode 100644
index 000000000..04bf2363a
--- /dev/null
+++ b/pyload/network/HTTPDownload.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from os import remove
+from os.path import dirname
+from time import time
+from shutil import move
+from logging import getLogger
+
+import pycurl
+
+from HTTPChunk import ChunkInfo, HTTPChunk
+from HTTPRequest import BadHeader
+
+from pyload.plugins.Base import Abort
+from pyload.utils.fs import save_join, fs_encode
+
+# TODO: save content-disposition for resuming
+
+class HTTPDownload():
+ """ loads an url, http + ftp supported """
+
+ def __init__(self, url, filename, get={}, post={}, referer=None, cj=None, bucket=None,
+ options={}, disposition=False):
+ self.url = url
+ self.filename = filename #complete file destination, not only name
+ self.get = get
+ self.post = post
+ self.referer = referer
+ self.cj = cj #cookiejar if cookies are needed
+ self.bucket = bucket
+ self.options = options
+ self.disposition = disposition
+ # all arguments
+
+ self.abort = False
+ self.size = 0
+ self._name = ""# will be parsed from content disposition
+
+ self.chunks = []
+
+ self.log = getLogger("log")
+
+ try:
+ self.info = ChunkInfo.load(filename)
+ self.info.resume = True #resume is only possible with valid info file
+ self.size = self.info.size
+ self.infoSaved = True
+ except IOError:
+ self.info = ChunkInfo(filename)
+
+ self.chunkSupport = None
+ self.m = pycurl.CurlMulti()
+
+ #needed for speed calculation
+ self.lastArrived = []
+ self.speeds = []
+ self.lastSpeeds = [0, 0]
+
+ @property
+ def speed(self):
+ last = [sum(x) for x in self.lastSpeeds if x]
+ return (sum(self.speeds) + sum(last)) / (1 + len(last))
+
+ @property
+ def arrived(self):
+ return sum([c.arrived for c in self.chunks])
+
+ @property
+ def percent(self):
+ if not self.size: return 0
+ return (self.arrived * 100) / self.size
+
+ @property
+ def name(self):
+ return self._name if self.disposition else ""
+
+ def _copyChunks(self):
+ init = fs_encode(self.info.getChunkName(0)) #initial chunk name
+
+ if self.info.getCount() > 1:
+ fo = open(init, "rb+") #first chunkfile
+ for i in range(1, self.info.getCount()):
+ #input file
+ fo.seek(
+ self.info.getChunkRange(i - 1)[1] + 1) #seek to beginning of chunk, to get rid of overlapping chunks
+ fname = fs_encode("%s.chunk%d" % (self.filename, i))
+ fi = open(fname, "rb")
+ buf = 32 * 1024
+ while True: #copy in chunks, consumes less memory
+ data = fi.read(buf)
+ if not data:
+ break
+ fo.write(data)
+ fi.close()
+ if fo.tell() < self.info.getChunkRange(i)[1]:
+ fo.close()
+ remove(init)
+ self.info.remove() #there are probably invalid chunks
+ raise Exception("Downloaded content was smaller than expected. Try to reduce download connections.")
+ remove(fname) #remove chunk
+ fo.close()
+
+ if self.name:
+ self.filename = save_join(dirname(self.filename), self.name)
+
+ move(init, fs_encode(self.filename))
+ self.info.remove() #remove info file
+
+ def download(self, chunks=1, resume=False):
+ """ returns new filename or None """
+
+ chunks = max(1, chunks)
+ resume = self.info.resume and resume
+
+ try:
+ self._download(chunks, resume)
+ except pycurl.error, e:
+ #code 33 - no resume
+ code = e.args[0]
+ if code == 33:
+ # try again without resume
+ self.log.debug("Errno 33 -> Restart without resume")
+
+ #remove old handles
+ for chunk in self.chunks:
+ self.closeChunk(chunk)
+
+ return self._download(chunks, False)
+ else:
+ raise
+ finally:
+ self.close()
+
+ return self.name
+
+ def _download(self, chunks, resume):
+ if not resume:
+ self.info.clear()
+ self.info.addChunk("%s.chunk0" % self.filename, (0, 0)) #create an initial entry
+
+ self.chunks = []
+
+ init = HTTPChunk(0, self, None, resume) #initial chunk that will load complete file (if needed)
+
+ self.chunks.append(init)
+ self.m.add_handle(init.getHandle())
+
+ lastFinishCheck = 0
+ lastTimeCheck = 0
+ chunksDone = set() # list of curl handles that are finished
+ chunksCreated = False
+ done = False
+ if self.info.getCount() > 1: # This is a resume, if we were chunked originally assume still can
+ self.chunkSupport = True
+
+ while 1:
+ #need to create chunks
+ if not chunksCreated and self.chunkSupport and self.size: #will be set later by first chunk
+
+ if not resume:
+ self.info.setSize(self.size)
+ self.info.createChunks(chunks)
+ self.info.save()
+
+ chunks = self.info.getCount()
+
+ init.setRange(self.info.getChunkRange(0))
+
+ for i in range(1, chunks):
+ c = HTTPChunk(i, self, self.info.getChunkRange(i), resume)
+
+ handle = c.getHandle()
+ if handle:
+ self.chunks.append(c)
+ self.m.add_handle(handle)
+ else:
+ #close immediately
+ self.log.debug("Invalid curl handle -> closed")
+ c.close()
+
+ chunksCreated = True
+
+ while 1:
+ ret, num_handles = self.m.perform()
+ if ret != pycurl.E_CALL_MULTI_PERFORM:
+ break
+
+ t = time()
+
+ # reduce these calls
+ # when num_q is 0, the loop is exited
+ while lastFinishCheck + 0.5 < t:
+ # list of failed curl handles
+ failed = []
+ ex = None # save only last exception, we can only raise one anyway
+
+ num_q, ok_list, err_list = self.m.info_read()
+ for c in ok_list:
+ chunk = self.findChunk(c)
+ try: # check if the header implies success, else add it to failed list
+ chunk.verifyHeader()
+ except BadHeader, e:
+ self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(e)))
+ failed.append(chunk)
+ ex = e
+ else:
+ chunksDone.add(c)
+
+ for c in err_list:
+ curl, errno, msg = c
+ chunk = self.findChunk(curl)
+ #test if chunk was finished
+ if errno != 23 or "0 !=" not in msg:
+ failed.append(chunk)
+ ex = pycurl.error(errno, msg)
+ self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(ex)))
+ continue
+
+ try: # check if the header implies success, else add it to failed list
+ chunk.verifyHeader()
+ except BadHeader, e:
+ self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(e)))
+ failed.append(chunk)
+ ex = e
+ else:
+ chunksDone.add(curl)
+ if not num_q: # no more info to get
+
+ # check if init is not finished so we reset download connections
+ # note that other chunks are closed and everything downloaded with initial connection
+ if failed and init not in failed and init.c not in chunksDone:
+ self.log.error(_("Download chunks failed, fallback to single connection | %s" % (str(ex))))
+
+ #list of chunks to clean and remove
+ to_clean = filter(lambda x: x is not init, self.chunks)
+ for chunk in to_clean:
+ self.closeChunk(chunk)
+ self.chunks.remove(chunk)
+ remove(fs_encode(self.info.getChunkName(chunk.id)))
+
+ #let first chunk load the rest and update the info file
+ init.resetRange()
+ self.info.clear()
+ self.info.addChunk("%s.chunk0" % self.filename, (0, self.size))
+ self.info.save()
+ elif failed:
+ raise ex
+
+ lastFinishCheck = t
+
+ if len(chunksDone) >= len(self.chunks):
+ if len(chunksDone) > len(self.chunks):
+ self.log.warning("Finished download chunks size incorrect, please report bug.")
+ done = True #all chunks loaded
+
+ break
+
+ if done:
+ break #all chunks loaded
+
+ # calc speed once per second, averaging over 3 seconds
+ if lastTimeCheck + 1 < t:
+ diff = [c.arrived - (self.lastArrived[i] if len(self.lastArrived) > i else 0) for i, c in
+ enumerate(self.chunks)]
+
+ self.lastSpeeds[1] = self.lastSpeeds[0]
+ self.lastSpeeds[0] = self.speeds
+ self.speeds = [float(a) / (t - lastTimeCheck) for a in diff]
+ self.lastArrived = [c.arrived for c in self.chunks]
+ lastTimeCheck = t
+
+ if self.abort:
+ raise Abort()
+
+ self.m.select(1)
+
+ for chunk in self.chunks:
+ chunk.flushFile() #make sure downloads are written to disk
+
+ self._copyChunks()
+
+ def findChunk(self, handle):
+ """ linear search to find a chunk (should be ok since chunk size is usually low) """
+ for chunk in self.chunks:
+ if chunk.c == handle: return chunk
+
+ def closeChunk(self, chunk):
+ try:
+ self.m.remove_handle(chunk.c)
+ except pycurl.error, e:
+ self.log.debug("Error removing chunk: %s" % str(e))
+ finally:
+ chunk.close()
+
+ def close(self):
+ """ cleanup """
+ for chunk in self.chunks:
+ self.closeChunk(chunk)
+
+ self.chunks = []
+ if hasattr(self, "m"):
+ self.m.close()
+ del self.m
+ if hasattr(self, "cj"):
+ del self.cj
+ if hasattr(self, "info"):
+ del self.info
+
+if __name__ == "__main__":
+ url = "http://speedtest.netcologne.de/test_100mb.bin"
+
+ from Bucket import Bucket
+
+ bucket = Bucket()
+ bucket.setRate(200 * 1024)
+ bucket = None
+
+ print "starting"
+
+ dwnld = HTTPDownload(url, "test_100mb.bin", bucket=bucket)
+ dwnld.download(chunks=3, resume=True)
diff --git a/pyload/network/HTTPRequest.py b/pyload/network/HTTPRequest.py
new file mode 100644
index 000000000..ebf2c3132
--- /dev/null
+++ b/pyload/network/HTTPRequest.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+import pycurl
+
+from codecs import getincrementaldecoder, lookup, BOM_UTF8
+from urllib import quote, urlencode
+from httplib import responses
+from logging import getLogger
+from cStringIO import StringIO
+
+from pyload.plugins.Base import Abort
+
+def myquote(url):
+ return quote(url.encode('utf8') if isinstance(url, unicode) else url, safe="%/:=&?~#+!$,;'@()*[]")
+
+def myurlencode(data):
+ data = dict(data)
+ return urlencode(dict((x.encode('utf8') if isinstance(x, unicode) else x, \
+ y.encode('utf8') if isinstance(y, unicode) else y ) for x, y in data.iteritems()))
+
+bad_headers = range(400, 404) + range(405, 418) + range(500, 506)
+
+class BadHeader(Exception):
+ def __init__(self, code, content=""):
+ Exception.__init__(self, "Bad server response: %s %s" % (code, responses.get(int(code), "Unknown Header")))
+ self.code = code
+ self.content = content
+
+
+class HTTPRequest():
+ def __init__(self, cookies=None, options=None):
+ self.c = pycurl.Curl()
+ self.rep = StringIO()
+
+ self.cj = cookies #cookiejar
+
+ self.lastURL = None
+ self.lastEffectiveURL = None
+ self.abort = False
+ self.code = 0 # last http code
+
+ self.header = ""
+
+ self.headers = [] #temporary request header
+
+ self.initHandle()
+ self.setInterface(options)
+ self.setOptions(options)
+
+ self.c.setopt(pycurl.WRITEFUNCTION, self.write)
+ self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)
+
+ self.log = getLogger("log")
+
+
+ def initHandle(self):
+ """ sets common options to curl handle """
+ self.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.c.setopt(pycurl.MAXREDIRS, 5)
+ self.c.setopt(pycurl.CONNECTTIMEOUT, 30)
+ self.c.setopt(pycurl.NOSIGNAL, 1)
+ self.c.setopt(pycurl.NOPROGRESS, 1)
+ if hasattr(pycurl, "AUTOREFERER"):
+ self.c.setopt(pycurl.AUTOREFERER, 1)
+ self.c.setopt(pycurl.SSL_VERIFYPEER, 0)
+ # Interval for low speed, detects connection loss, but can abort dl if hoster stalls the download
+ self.c.setopt(pycurl.LOW_SPEED_TIME, 45)
+ self.c.setopt(pycurl.LOW_SPEED_LIMIT, 5)
+
+ #self.c.setopt(pycurl.VERBOSE, 1)
+
+ self.c.setopt(pycurl.USERAGENT,
+ "Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0")
+ if pycurl.version_info()[7]:
+ self.c.setopt(pycurl.ENCODING, "gzip, deflate")
+ self.c.setopt(pycurl.HTTPHEADER, ["Accept: */*",
+ "Accept-Language: en-US,en",
+ "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7",
+ "Connection: keep-alive",
+ "Keep-Alive: 300",
+ "Expect:"])
+
+ def setInterface(self, options):
+
+ interface, proxy, ipv6 = options["interface"], options["proxies"], options["ipv6"]
+
+ if interface and interface.lower() != "none":
+ self.c.setopt(pycurl.INTERFACE, str(interface))
+
+ if proxy:
+ if proxy["type"] == "socks4":
+ self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS4)
+ elif proxy["type"] == "socks5":
+ self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
+ else:
+ self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP)
+
+ self.c.setopt(pycurl.PROXY, str(proxy["address"]))
+ self.c.setopt(pycurl.PROXYPORT, proxy["port"])
+
+ if proxy["username"]:
+ self.c.setopt(pycurl.PROXYUSERPWD, str("%s:%s" % (proxy["username"], proxy["password"])))
+
+ if ipv6:
+ self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
+ else:
+ self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
+
+ if "auth" in options:
+ self.c.setopt(pycurl.USERPWD, str(options["auth"]))
+
+ if "timeout" in options:
+ self.c.setopt(pycurl.LOW_SPEED_TIME, options["timeout"])
+
+ def setOptions(self, options):
+ """ Sets same options as available in pycurl """
+ for k, v in options.iteritems():
+ if hasattr(pycurl, k):
+ self.c.setopt(getattr(pycurl, k), v)
+
+ def addCookies(self):
+ """ put cookies from curl handle to cj """
+ if self.cj:
+ self.cj.addCookies(self.c.getinfo(pycurl.INFO_COOKIELIST))
+
+ def getCookies(self):
+ """ add cookies from cj to curl handle """
+ if self.cj:
+ for c in self.cj.getCookies():
+ self.c.setopt(pycurl.COOKIELIST, c)
+ return
+
+ def clearCookies(self):
+ self.c.setopt(pycurl.COOKIELIST, "")
+
+ def setRequestContext(self, url, get, post, referer, cookies, multipart=False):
+ """ sets everything needed for the request """
+
+ url = myquote(url)
+
+ if get:
+ get = urlencode(get)
+ url = "%s?%s" % (url, get)
+
+ self.c.setopt(pycurl.URL, url)
+ self.lastURL = url
+
+ if post:
+ self.c.setopt(pycurl.POST, 1)
+ if not multipart:
+ if type(post) == unicode:
+ post = str(post) #unicode not allowed
+ elif type(post) == str:
+ pass
+ else:
+ post = myurlencode(post)
+
+ self.c.setopt(pycurl.POSTFIELDS, post)
+ else:
+ post = [(x, y.encode('utf8') if type(y) == unicode else y ) for x, y in post.iteritems()]
+ self.c.setopt(pycurl.HTTPPOST, post)
+ else:
+ self.c.setopt(pycurl.POST, 0)
+
+ if referer and self.lastURL:
+ self.c.setopt(pycurl.REFERER, str(self.lastURL))
+
+ if cookies:
+ self.c.setopt(pycurl.COOKIEFILE, "")
+ self.c.setopt(pycurl.COOKIEJAR, "")
+ self.getCookies()
+
+
+ def load(self, url, get={}, post={}, referer=True, cookies=True, just_header=False, multipart=False, decode=False):
+ """ load and returns a given page """
+
+ self.setRequestContext(url, get, post, referer, cookies, multipart)
+
+ # TODO: use http/rfc message instead
+ self.header = ""
+
+ self.c.setopt(pycurl.HTTPHEADER, self.headers)
+
+ if just_header:
+ self.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.c.setopt(pycurl.NOBODY, 1) #TODO: nobody= no post?
+
+ # overwrite HEAD request, we want a common request type
+ if post:
+ self.c.setopt(pycurl.CUSTOMREQUEST, "POST")
+ else:
+ self.c.setopt(pycurl.CUSTOMREQUEST, "GET")
+
+ try:
+ self.c.perform()
+ rep = self.header
+ finally:
+ self.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.c.setopt(pycurl.NOBODY, 0)
+ self.c.unsetopt(pycurl.CUSTOMREQUEST)
+
+ else:
+ self.c.perform()
+ rep = self.getResponse()
+
+ self.c.setopt(pycurl.POSTFIELDS, "")
+ self.lastEffectiveURL = self.c.getinfo(pycurl.EFFECTIVE_URL)
+ self.code = self.verifyHeader()
+
+ self.addCookies()
+
+ if decode:
+ rep = self.decodeResponse(rep)
+
+ return rep
+
+ def verifyHeader(self):
+ """ raise an exceptions on bad headers """
+ code = int(self.c.getinfo(pycurl.RESPONSE_CODE))
+ # TODO: raise anyway to be consistent, also rename exception
+ if code in bad_headers:
+ #404 will NOT raise an exception
+ raise BadHeader(code, self.getResponse())
+ return code
+
+ def checkHeader(self):
+ """ check if header indicates failure"""
+ return int(self.c.getinfo(pycurl.RESPONSE_CODE)) not in bad_headers
+
+ def getResponse(self):
+ """ retrieve response from string io """
+ if self.rep is None: return ""
+ value = self.rep.getvalue()
+ self.rep.close()
+ self.rep = StringIO()
+ return value
+
+ def decodeResponse(self, rep):
+ """ decode with correct encoding, relies on header """
+ header = self.header.splitlines()
+ encoding = "utf8" # default encoding
+
+ for line in header:
+ line = line.lower().replace(" ", "")
+ if not line.startswith("content-type:") or\
+ ("text" not in line and "application" not in line):
+ continue
+
+ none, delemiter, charset = line.rpartition("charset=")
+ if delemiter:
+ charset = charset.split(";")
+ if charset:
+ encoding = charset[0]
+
+ try:
+ #self.log.debug("Decoded %s" % encoding )
+ if lookup(encoding).name == 'utf-8' and rep.startswith(BOM_UTF8):
+ encoding = 'utf-8-sig'
+
+ decoder = getincrementaldecoder(encoding)("replace")
+ rep = decoder.decode(rep, True)
+
+ #TODO: html_unescape as default
+
+ except LookupError:
+ self.log.debug("No Decoder found for %s" % encoding)
+ except Exception:
+ self.log.debug("Error when decoding string from %s." % encoding)
+
+ return rep
+
+ def write(self, buf):
+ """ writes response """
+ if self.rep.tell() > 1000000 or self.abort:
+ rep = self.getResponse()
+ if self.abort: raise Abort()
+ f = open("response.dump", "wb")
+ f.write(rep)
+ f.close()
+ raise Exception("Loaded Url exceeded limit")
+
+ self.rep.write(buf)
+
+ def writeHeader(self, buf):
+ """ writes header """
+ self.header += buf
+
+ def putHeader(self, name, value):
+ self.headers.append("%s: %s" % (name, value))
+
+ def clearHeaders(self):
+ self.headers = []
+
+ def close(self):
+ """ cleanup, unusable after this """
+ self.rep.close()
+ if hasattr(self, "cj"):
+ del self.cj
+ if hasattr(self, "c"):
+ self.c.close()
+ del self.c
+
+if __name__ == "__main__":
+ url = "http://pyload.org"
+ c = HTTPRequest()
+ print c.load(url)
+
diff --git a/pyload/network/RequestFactory.py b/pyload/network/RequestFactory.py
new file mode 100644
index 000000000..e6015219f
--- /dev/null
+++ b/pyload/network/RequestFactory.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN
+"""
+
+from Bucket import Bucket
+from CookieJar import CookieJar
+
+from pyload.plugins.network.DefaultRequest import DefaultRequest, DefaultDownload
+
+class RequestFactory():
+ def __init__(self, core):
+ self.core = core
+ self.bucket = Bucket()
+ self.updateBucket()
+
+ def getURL(self, *args, **kwargs):
+ """ see HTTPRequest for argument list """
+ h = DefaultRequest(self.getConfig())
+ try:
+ rep = h.load(*args, **kwargs)
+ finally:
+ h.close()
+
+ return rep
+
+ ########## old api methods above
+
+ def getRequest(self, context=None, klass=DefaultRequest):
+ """ Creates a request with new or given context """
+ # also accepts cookiejars, not so nice, since it depends on implementation
+ if isinstance(context, CookieJar):
+ return klass(self.getConfig(), context)
+ elif context:
+ return klass(*context)
+ else:
+ return klass(self.getConfig())
+
+ def getDownloadRequest(self, request=None, klass=DefaultDownload):
+ """ Instantiates a instance for downloading """
+ # TODO: load with plugin manager
+ return klass(self.bucket, request)
+
+ def getInterface(self):
+ return self.core.config["download"]["interface"]
+
+ def getProxies(self):
+ """ returns a proxy list for the request classes """
+ if not self.core.config["proxy"]["proxy"]:
+ return {}
+ else:
+ type = "http"
+ setting = self.core.config["proxy"]["type"].lower()
+ if setting == "socks4":
+ type = "socks4"
+ elif setting == "socks5":
+ type = "socks5"
+
+ username = None
+ if self.core.config["proxy"]["username"] and self.core.config["proxy"]["username"].lower() != "none":
+ username = self.core.config["proxy"]["username"]
+
+ pw = None
+ if self.core.config["proxy"]["password"] and self.core.config["proxy"]["password"].lower() != "none":
+ pw = self.core.config["proxy"]["password"]
+
+ return {
+ "type": type,
+ "address": self.core.config["proxy"]["address"],
+ "port": self.core.config["proxy"]["port"],
+ "username": username,
+ "password": pw,
+ }
+
+ def getConfig(self):
+ """returns options needed for pycurl"""
+ return {"interface": self.getInterface(),
+ "proxies": self.getProxies(),
+ "ipv6": self.core.config["download"]["ipv6"]}
+
+ def updateBucket(self):
+ """ set values in the bucket according to settings"""
+ if not self.core.config["download"]["limit_speed"]:
+ self.bucket.setRate(-1)
+ else:
+ self.bucket.setRate(self.core.config["download"]["max_speed"] * 1024)
+
+# needs pyreq in global namespace
+def getURL(*args, **kwargs):
+ return pyreq.getURL(*args, **kwargs)
+
+
+def getRequest(*args, **kwargs):
+ return pyreq.getRequest(*args, **kwargs)
diff --git a/pyload/network/XDCCRequest.py b/pyload/network/XDCCRequest.py
new file mode 100644
index 000000000..89c4f3b73
--- /dev/null
+++ b/pyload/network/XDCCRequest.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: jeix
+"""
+
+import socket
+import re
+
+from os import remove
+from os.path import exists
+
+from time import time
+
+import struct
+from select import select
+
+from pyload.plugins.Plugin import Abort
+
+
+class XDCCRequest():
+ def __init__(self, timeout=30, proxies={}):
+
+ self.proxies = proxies
+ self.timeout = timeout
+
+ self.filesize = 0
+ self.recv = 0
+ self.speed = 0
+
+ self.abort = False
+
+
+ def createSocket(self):
+ # proxytype = None
+ # proxy = None
+ # if self.proxies.has_key("socks5"):
+ # proxytype = socks.PROXY_TYPE_SOCKS5
+ # proxy = self.proxies["socks5"]
+ # elif self.proxies.has_key("socks4"):
+ # proxytype = socks.PROXY_TYPE_SOCKS4
+ # proxy = self.proxies["socks4"]
+ # if proxytype:
+ # sock = socks.socksocket()
+ # t = _parse_proxy(proxy)
+ # sock.setproxy(proxytype, addr=t[3].split(":")[0], port=int(t[3].split(":")[1]), username=t[1], password=t[2])
+ # else:
+ # sock = socket.socket()
+ # return sock
+
+ return socket.socket()
+
+ def download(self, ip, port, filename, irc, progressNotify=None):
+
+ ircbuffer = ""
+ lastUpdate = time()
+ cumRecvLen = 0
+
+ dccsock = self.createSocket()
+
+ dccsock.settimeout(self.timeout)
+ dccsock.connect((ip, port))
+
+ if exists(filename):
+ i = 0
+ nameParts = filename.rpartition(".")
+ while True:
+ newfilename = "%s-%d%s%s" % (nameParts[0], i, nameParts[1], nameParts[2])
+ i += 1
+
+ if not exists(newfilename):
+ filename = newfilename
+ break
+
+ fh = open(filename, "wb")
+
+ # recv loop for dcc socket
+ while True:
+ if self.abort:
+ dccsock.close()
+ fh.close()
+ remove(filename)
+ raise Abort()
+
+ self._keepAlive(irc, ircbuffer)
+
+ data = dccsock.recv(4096)
+ dataLen = len(data)
+ self.recv += dataLen
+
+ cumRecvLen += dataLen
+
+ now = time()
+ timespan = now - lastUpdate
+ if timespan > 1:
+ self.speed = cumRecvLen / timespan
+ cumRecvLen = 0
+ lastUpdate = now
+
+ if progressNotify:
+ progressNotify(self.percent)
+
+
+ if not data:
+ break
+
+ fh.write(data)
+
+ # acknowledge data by sending number of received bytes
+ dccsock.send(struct.pack('!I', self.recv))
+
+ dccsock.close()
+ fh.close()
+
+ return filename
+
+ def _keepAlive(self, sock, readbuffer):
+ fdset = select([sock], [], [], 0)
+ if sock not in fdset[0]:
+ return
+
+ readbuffer += sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ line = line.rstrip()
+ first = line.split()
+ if first[0] == "PING":
+ sock.send("PONG %s\r\n" % first[1])
+
+ def abortDownloads(self):
+ self.abort = True
+
+ @property
+ def size(self):
+ return self.filesize
+
+ @property
+ def arrived(self):
+ return self.recv
+
+ @property
+ def percent(self):
+ if not self.filesize: return 0
+ return (self.recv * 100) / self.filesize
+
+ def close(self):
+ pass
diff --git a/pyload/network/__init__.py b/pyload/network/__init__.py
new file mode 100644
index 000000000..8b1378917
--- /dev/null
+++ b/pyload/network/__init__.py
@@ -0,0 +1 @@
+
diff --git a/pyload/plugins/Account.py b/pyload/plugins/Account.py
new file mode 100644
index 000000000..4492dfa18
--- /dev/null
+++ b/pyload/plugins/Account.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+from traceback import print_exc
+from threading import RLock
+
+from pyload.utils import compare_time, format_size, parseFileSize, lock, to_bool
+from pyload.Api import AccountInfo
+from pyload.network.CookieJar import CookieJar
+
+from Base import Base
+
+
+class WrongPassword(Exception):
+ pass
+
+#noinspection PyUnresolvedReferences
+class Account(Base):
+ """
+ Base class for every account plugin.
+ Just overwrite `login` and cookies will be stored and the account becomes accessible in\
+ associated hoster plugin. Plugin should also provide `loadAccountInfo`. \
+ An instance of this class is created for every entered account, it holds all \
+ fields of AccountInfo ttype, and can be set easily at runtime.
+ """
+
+ # constants for special values
+ UNKNOWN = -1
+ UNLIMITED = -2
+
+ # Default values
+ owner = None
+ valid = True
+ validuntil = -1
+ trafficleft = -1
+ maxtraffic = -1
+ premium = True
+ activated = True
+ shared = False
+
+ #: after that time [in minutes] pyload will relogin the account
+ login_timeout = 600
+ #: account data will be reloaded after this time
+ info_threshold = 600
+
+ # known options
+ known_opt = ("time", "limitDL")
+
+ def __init__(self, manager, loginname, password, options):
+ Base.__init__(self, manager.core)
+
+ if "activated" in options:
+ self.activated = to_bool(options["activated"])
+ else:
+ self.activated = Account.activated
+
+ for opt in self.known_opt:
+ if opt not in options:
+ options[opt] = ""
+
+ for opt in options.keys():
+ if opt not in self.known_opt:
+ del options[opt]
+
+ self.loginname = loginname
+ self.options = options
+
+ self.manager = manager
+
+ self.lock = RLock()
+ self.timestamp = 0
+ self.login_ts = 0 # timestamp for login
+ self.cj = CookieJar()
+ self.password = password
+ self.error = None
+
+ self.init()
+
+ def toInfoData(self):
+ return AccountInfo(self.__name__, self.loginname, self.owner, self.valid, self.validuntil, self.trafficleft,
+ self.maxtraffic,
+ self.premium, self.activated, self.shared, self.options)
+
+ def init(self):
+ pass
+
+ def login(self, req):
+ """login into account, the cookies will be saved so the user can be recognized
+
+ :param req: `Request` instance
+ """
+ raise NotImplemented
+
+ def relogin(self):
+ """ Force a login. """
+ req = self.getAccountRequest()
+ try:
+ return self._login(req)
+ finally:
+ req.close()
+
+ @lock
+ def _login(self, req):
+ # set timestamp for login
+ self.login_ts = time()
+
+ try:
+ try:
+ self.login(req)
+ except TypeError: #TODO: temporary
+ self.logDebug("Deprecated .login(...) signature omit user, data")
+ self.login(self.loginname, {"password": self.password}, req)
+
+ self.valid = True
+ except WrongPassword:
+ self.logWarning(
+ _("Could not login with account %(user)s | %(msg)s") % {"user": self.loginname
+ , "msg": _("Wrong Password")})
+ self.valid = False
+
+ except Exception, e:
+ self.logWarning(
+ _("Could not login with account %(user)s | %(msg)s") % {"user": self.loginname
+ , "msg": e})
+ self.valid = False
+ if self.core.debug:
+ print_exc()
+
+ return self.valid
+
+ def restoreDefaults(self):
+ self.validuntil = Account.validuntil
+ self.trafficleft = Account.trafficleft
+ self.maxtraffic = Account.maxtraffic
+ self.premium = Account.premium
+
+ def update(self, password=None, options=None):
+ """ updates the account and returns true if anything changed """
+
+ self.login_ts = 0
+ self.valid = True #set valid, so the login will be retried
+
+ if "activated" in options:
+ self.activated = True if options["activated"] == "True" else False
+
+ if password:
+ self.password = password
+ self.relogin()
+ return True
+ if options:
+ # remove unknown options
+ for opt in options.keys():
+ if opt not in self.known_opt:
+ del options[opt]
+
+ before = self.options
+ self.options.update(options)
+ return self.options != before
+
+ def getAccountRequest(self):
+ return self.core.requestFactory.getRequest(self.cj)
+
+ def getDownloadSettings(self):
+ """ Can be overwritten to change download settings. Default is no chunkLimit, max dl limit, resumeDownload
+
+ :return: (chunkLimit, limitDL, resumeDownload) / (int, int ,bool)
+ """
+ return -1, 0, True
+
+ @lock
+ def getAccountInfo(self, force=False):
+ """retrieve account info's for an user, do **not** overwrite this method!\\
+ just use it to retrieve info's in hoster plugins. see `loadAccountInfo`
+
+ :param name: username
+ :param force: reloads cached account information
+ :return: dictionary with information
+ """
+ if force or self.timestamp + self.info_threshold * 60 < time():
+
+ # make sure to login
+ req = self.getAccountRequest()
+ self.checkLogin(req)
+ self.logInfo(_("Get Account Info for %s") % self.loginname)
+ try:
+ try:
+ infos = self.loadAccountInfo(req)
+ except TypeError: #TODO: temporary
+ self.logDebug("Deprecated .loadAccountInfo(...) signature, omit user argument.")
+ infos = self.loadAccountInfo(self.loginname, req)
+ except Exception, e:
+ infos = {"error": str(e)}
+ self.logError(_("Error: %s") % e)
+ finally:
+ req.close()
+
+ self.logDebug("Account Info: %s" % str(infos))
+ self.timestamp = time()
+
+ self.restoreDefaults() # reset to initial state
+ if type(infos) == dict: # copy result from dict to class
+ for k, v in infos.iteritems():
+ if hasattr(self, k):
+ setattr(self, k, v)
+ else:
+ self.logDebug("Unknown attribute %s=%s" % (k, v))
+
+ #TODO: remove user
+ def loadAccountInfo(self, req):
+ """ Overwrite this method and set account attributes within this method.
+
+ :param user: Deprecated
+ :param req: Request instance
+ :return:
+ """
+ pass
+
+ def getAccountCookies(self, user):
+ self.logDebug("Deprecated method .getAccountCookies -> use account.cj")
+ return self.cj
+
+ def getAccountData(self, user):
+ self.logDebug("Deprecated method .getAccountData -> use fields directly")
+ return {"password": self.password}
+
+ def isPremium(self, user=None):
+ if user: self.logDebug("Deprecated Argument user for .isPremium()", user)
+ return self.premium
+
+ def isUsable(self):
+ """Check several constraints to determine if account should be used"""
+ if not self.valid or not self.activated: return False
+
+ if self.options["time"]:
+ time_data = ""
+ try:
+ time_data = self.options["time"]
+ start, end = time_data.split("-")
+ if not compare_time(start.split(":"), end.split(":")):
+ return False
+ except:
+ self.logWarning(_("Your Time %s has a wrong format, use: 1:22-3:44") % time_data)
+
+ if 0 <= self.validuntil < time():
+ return False
+ if self.trafficleft is 0: # test explicitly for 0
+ return False
+
+ return True
+
+ def parseTraffic(self, string): #returns kbyte
+ return parseFileSize(string) / 1024
+
+ def formatTrafficleft(self):
+ if self.trafficleft is None:
+ self.getAccountInfo(force=True)
+ return format_size(self.trafficleft * 1024)
+
+ def wrongPassword(self):
+ raise WrongPassword
+
+ def empty(self, user=None):
+ if user: self.logDebug("Deprecated argument user for .empty()", user)
+
+ self.logWarning(_("Account %s has not enough traffic, checking again in 30min") % self.login)
+
+ self.trafficleft = 0
+ self.scheduleRefresh(30 * 60)
+
+ def expired(self, user=None):
+ if user: self.logDebug("Deprecated argument user for .expired()", user)
+
+ self.logWarning(_("Account %s is expired, checking again in 1h") % user)
+
+ self.validuntil = time() - 1
+ self.scheduleRefresh(60 * 60)
+
+ def scheduleRefresh(self, time=0, force=True):
+ """ add a task for refreshing the account info to the scheduler """
+ self.logDebug("Scheduled Account refresh for %s in %s seconds." % (self.loginname, time))
+ self.core.scheduler.addJob(time, self.getAccountInfo, [force])
+
+ @lock
+ def checkLogin(self, req):
+ """ checks if the user is still logged in """
+ if self.login_ts + self.login_timeout * 60 < time():
+ if self.login_ts: # separate from fresh login to have better debug logs
+ self.logDebug("Reached login timeout for %s" % self.loginname)
+ else:
+ self.logInfo(_("Login with %s") % self.loginname)
+
+ self._login(req)
+ return False
+
+ return True
diff --git a/pyload/plugins/Addon.py b/pyload/plugins/Addon.py
new file mode 100644
index 000000000..940339bfb
--- /dev/null
+++ b/pyload/plugins/Addon.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from traceback import print_exc
+
+#from functools import wraps
+from module.utils import has_method, to_list
+
+from Base import Base
+
+def class_name(p):
+ return p.rpartition(".")[2]
+
+
+def AddEventListener(event):
+ """ Used to register method for events. Arguments needs to match parameter of event
+
+ :param event: Name of event or list of them.
+ """
+ class _klass(object):
+ def __new__(cls, f, *args, **kwargs):
+ for ev in to_list(event):
+ addonManager.addEventListener(class_name(f.__module__), f.func_name, ev)
+ return f
+ return _klass
+
+
+def AddonHandler(desc, media=None):
+ """ Register Handler for files, packages, or arbitrary callable methods.
+ To let the method work on packages/files, media must be set and the argument named pid or fid.
+
+ :param desc: verbose description
+ :param media: if True or bits of media type
+ """
+ pass
+
+def AddonInfo(desc):
+ """ Called to retrieve information about the current state.
+ Decorated method must return anything convertable into string.
+
+ :param desc: verbose description
+ """
+ pass
+
+def threaded(f):
+ """ Decorator to run method in a thread. """
+
+ #@wraps(f)
+ def run(*args,**kwargs):
+ addonManager.startThread(f, *args, **kwargs)
+ return run
+
+class Addon(Base):
+ """
+ Base class for addon plugins. Use @threaded decorator for all longer running tasks.
+
+ Decorate methods with @Expose, @AddEventListener, @ConfigHandler
+
+ """
+
+ #: automatically register event listeners for functions, attribute will be deleted don't use it yourself
+ event_map = None
+
+ #: periodic call interval in seconds
+ interval = 60
+
+ def __init__(self, core, manager, user=None):
+ Base.__init__(self, core, user)
+
+ #: Provide information in dict here, usable by API `getInfo`
+ self.info = None
+
+ #: Callback of periodical job task, used by addonmanager
+ self.cb = None
+
+ #: `AddonManager`
+ self.manager = manager
+
+ #register events
+ if self.event_map:
+ for event, funcs in self.event_map.iteritems():
+ if type(funcs) in (list, tuple):
+ for f in funcs:
+ self.evm.listenTo(event, getattr(self,f))
+ else:
+ self.evm.listenTo(event, getattr(self,funcs))
+
+ #delete for various reasons
+ self.event_map = None
+
+ self.initPeriodical()
+ self.init()
+
+ def initPeriodical(self):
+ if self.interval >=1:
+ self.cb = self.core.scheduler.addJob(0, self._periodical, threaded=False)
+
+ def _periodical(self):
+ try:
+ if self.isActivated(): self.periodical()
+ except Exception, e:
+ self.core.log.error(_("Error executing addons: %s") % str(e))
+ if self.core.debug:
+ print_exc()
+
+ self.cb = self.core.scheduler.addJob(self.interval, self._periodical, threaded=False)
+
+ def __repr__(self):
+ return "<Addon %s>" % self.__name__
+
+ def isActivated(self):
+ """ checks if addon is activated"""
+ return True if self.__internal__ else self.getConfig("activated")
+
+ def getCategory(self):
+ return self.core.pluginManager.getCategory(self.__name__)
+
+ def init(self):
+ pass
+
+ def activate(self):
+ """ Used to activate the addon """
+ if has_method(self.__class__, "coreReady"):
+ self.logDebug("Deprecated method .coreReady() use activate() instead")
+ self.coreReady()
+
+ def deactivate(self):
+ """ Used to deactivate the addon. """
+ pass
+
+ def periodical(self):
+ pass
+
+ def newInteractionTask(self, task):
+ """ new interaction task for the plugin, it MUST set the handler and timeout or will be ignored """
+ pass
+
+ def taskCorrect(self, task):
+ pass
+
+ def taskInvalid(self, task):
+ pass
+
+ # public events starts from here
+ def downloadPreparing(self, pyfile):
+ pass
+
+ def downloadFinished(self, pyfile):
+ pass
+
+ def downloadFailed(self, pyfile):
+ pass
+
+ def packageFinished(self, pypack):
+ pass \ No newline at end of file
diff --git a/pyload/plugins/Base.py b/pyload/plugins/Base.py
new file mode 100644
index 000000000..cd4831d82
--- /dev/null
+++ b/pyload/plugins/Base.py
@@ -0,0 +1,348 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+import sys
+from time import time, sleep
+from random import randint
+
+from pyload.utils import decode
+from pyload.utils.fs import exists, makedirs, join, remove
+
+# TODO
+# more attributes if needed
+# get rid of catpcha & container plugins ?! (move to crypter & internals)
+# adapt old plugins as needed
+
+class Fail(Exception):
+ """ raised when failed """
+
+class Retry(Exception):
+ """ raised when start again from beginning """
+
+class Abort(Exception):
+ """ raised when aborted """
+
+class Base(object):
+ """
+ The Base plugin class with all shared methods and every possible attribute for plugin definition.
+ """
+ __version__ = "0.1"
+ #: Regexp pattern which will be matched for download/crypter plugins
+ __pattern__ = r""
+ #: Internal addon plugin which is always loaded
+ __internal__ = False
+ #: When True this addon can be enabled by every user
+ __user_context__ = False
+ #: Config definition: list of (name, type, label, default_value) or
+ #: (name, label, desc, Input(...))
+ __config__ = tuple()
+ #: Short description, one liner
+ __description__ = ""
+ #: More detailed text
+ __explanation__ = """"""
+ #: List of needed modules
+ __dependencies__ = tuple()
+ #: Used to assign a category for addon plugins
+ __category__ = ""
+ #: Tags to categorize the plugin, see documentation for further info
+ __tags__ = tuple()
+ #: Base64 encoded .png icon, should be 32x32, please don't use sizes above ~2KB, for bigger icons use url.
+ __icon__ = ""
+ #: Alternative, link to png icon
+ __icon_url__ = ""
+ #: Domain name of the service
+ __domain__ = ""
+ #: Url with general information/support/discussion
+ __url__ = ""
+ #: Url to term of content, user is accepting these when using the plugin
+ __toc_url__ = ""
+ #: Url to service (to buy premium) for accounts
+ __ref_url__ = ""
+
+ __author_name__ = tuple()
+ __author_mail__ = tuple()
+
+
+ def __init__(self, core, user=None):
+ self.__name__ = self.__class__.__name__
+
+ #: Core instance
+ self.core = core
+ #: logging instance
+ self.log = core.log
+ #: core config
+ self.config = core.config
+ #: :class:`EventManager`
+ self.evm = core.eventManager
+ #: :class:`InteractionManager`
+ self.im = core.interactionManager
+ if user:
+ #: :class:`Api`, user api when user is set
+ self.api = self.core.api.withUserContext(user)
+ if not self.api:
+ raise Exception("Plugin running with invalid user")
+
+ #: :class:`User`, user related to this plugin
+ self.user = self.api.user
+ else:
+ self.api = self.core.api
+ self.user = None
+
+ #: last interaction task
+ self.task = None
+
+ def __getitem__(self, item):
+ """ Retrieves meta data attribute """
+ return getattr(self, "__%s__" % item)
+
+ def logInfo(self, *args, **kwargs):
+ """ Print args to log at specific level
+
+ :param args: Arbitrary object which should be logged
+ :param kwargs: sep=(how to separate arguments), default = " | "
+ """
+ self._log("info", *args, **kwargs)
+
+ def logWarning(self, *args, **kwargs):
+ self._log("warning", *args, **kwargs)
+
+ def logError(self, *args, **kwargs):
+ self._log("error", *args, **kwargs)
+
+ def logDebug(self, *args, **kwargs):
+ self._log("debug", *args, **kwargs)
+
+ def _log(self, level, *args, **kwargs):
+ if "sep" in kwargs:
+ sep = "%s" % kwargs["sep"]
+ else:
+ sep = " | "
+
+ strings = []
+ for obj in args:
+ if type(obj) == unicode:
+ strings.append(obj)
+ elif type(obj) == str:
+ strings.append(decode(obj))
+ else:
+ strings.append(str(obj))
+
+ getattr(self.log, level)("%s: %s" % (self.__name__, sep.join(strings)))
+
+ def getName(self):
+ """ Name of the plugin class """
+ return self.__name__
+
+ def setConfig(self, option, value):
+ """ Set config value for current plugin """
+ self.core.config.set(self.__name__, option, value)
+
+ def getConf(self, option):
+ """ see `getConfig` """
+ return self.getConfig(option)
+
+ def getConfig(self, option):
+ """ Returns config value for current plugin """
+ return self.core.config.get(self.__name__, option)
+
+ def setStorage(self, key, value):
+ """ Saves a value persistently to the database """
+ self.core.db.setStorage(self.__name__, key, value)
+
+ def store(self, key, value):
+ """ same as `setStorage` """
+ self.core.db.setStorage(self.__name__, key, value)
+
+ def getStorage(self, key=None, default=None):
+ """ Retrieves saved value or dict of all saved entries if key is None """
+ if key is not None:
+ return self.core.db.getStorage(self.__name__, key) or default
+ return self.core.db.getStorage(self.__name__, key)
+
+ def retrieve(self, *args, **kwargs):
+ """ same as `getStorage` """
+ return self.getStorage(*args, **kwargs)
+
+ def delStorage(self, key):
+ """ Delete entry in db """
+ self.core.db.delStorage(self.__name__, key)
+
+ def shell(self):
+ """ open ipython shell """
+ if self.core.debug:
+ from IPython import embed
+ #noinspection PyUnresolvedReferences
+ sys.stdout = sys._stdout
+ embed()
+
+ def abort(self):
+ """ Check if plugin is in an abort state, is overwritten by subtypes"""
+ return False
+
+ def checkAbort(self):
+ """ Will be overwritten to determine if control flow should be aborted """
+ if self.abort(): raise Abort()
+
+ def load(self, url, get={}, post={}, ref=True, cookies=True, just_header=False, decode=False):
+ """Load content at url and returns it
+
+ :param url: url as string
+ :param get: GET as dict
+ :param post: POST as dict, list or string
+ :param ref: Set HTTP_REFERER header
+ :param cookies: use saved cookies
+ :param just_header: if True only the header will be retrieved and returned as dict
+ :param decode: Whether to decode the output according to http header, should be True in most cases
+ :return: Loaded content
+ """
+ if not hasattr(self, "req"): raise Exception("Plugin type does not have Request attribute.")
+ self.checkAbort()
+
+ res = self.req.load(url, get, post, ref, cookies, just_header, decode=decode)
+
+ if self.core.debug:
+ from inspect import currentframe
+
+ frame = currentframe()
+ if not exists(join("tmp", self.__name__)):
+ makedirs(join("tmp", self.__name__))
+
+ f = open(
+ join("tmp", self.__name__, "%s_line%s.dump.html" % (frame.f_back.f_code.co_name, frame.f_back.f_lineno))
+ , "wb")
+ del frame # delete the frame or it wont be cleaned
+
+ try:
+ tmp = res.encode("utf8")
+ except:
+ tmp = res
+
+ f.write(tmp)
+ f.close()
+
+ if just_header:
+ #parse header
+ header = {"code": self.req.code}
+ for line in res.splitlines():
+ line = line.strip()
+ if not line or ":" not in line: continue
+
+ key, none, value = line.partition(":")
+ key = key.lower().strip()
+ value = value.strip()
+
+ if key in header:
+ if type(header[key]) == list:
+ header[key].append(value)
+ else:
+ header[key] = [header[key], value]
+ else:
+ header[key] = value
+ res = header
+
+ return res
+
+ def invalidTask(self):
+ if self.task:
+ self.task.invalid()
+
+ def invalidCaptcha(self):
+ self.logDebug("Deprecated method .invalidCaptcha, use .invalidTask")
+ self.invalidTask()
+
+ def correctTask(self):
+ if self.task:
+ self.task.correct()
+
+ def correctCaptcha(self):
+ self.logDebug("Deprecated method .correctCaptcha, use .correctTask")
+ self.correctTask()
+
+ def decryptCaptcha(self, url, get={}, post={}, cookies=False, forceUser=False, imgtype='jpg',
+ result_type='textual'):
+ """ Loads a captcha and decrypts it with ocr, plugin, user input
+
+ :param url: url of captcha image
+ :param get: get part for request
+ :param post: post part for request
+ :param cookies: True if cookies should be enabled
+ :param forceUser: if True, ocr is not used
+ :param imgtype: Type of the Image
+ :param result_type: 'textual' if text is written on the captcha\
+ or 'positional' for captcha where the user have to click\
+ on a specific region on the captcha
+
+ :return: result of decrypting
+ """
+
+ img = self.load(url, get=get, post=post, cookies=cookies)
+
+ id = ("%.2f" % time())[-6:].replace(".", "")
+ temp_file = open(join("tmp", "tmpCaptcha_%s_%s.%s" % (self.__name__, id, imgtype)), "wb")
+ temp_file.write(img)
+ temp_file.close()
+
+ name = "%sOCR" % self.__name__
+ has_plugin = name in self.core.pluginManager.getPlugins("internal")
+
+ if self.core.captcha:
+ OCR = self.core.pluginManager.loadClass("internal", name)
+ else:
+ OCR = None
+
+ if OCR and not forceUser:
+ sleep(randint(3000, 5000) / 1000.0)
+ self.checkAbort()
+
+ ocr = OCR()
+ result = ocr.get_captcha(temp_file.name)
+ else:
+ task = self.im.createCaptchaTask(img, imgtype, temp_file.name, self.__name__, result_type)
+ self.task = task
+
+ while task.isWaiting():
+ if self.abort():
+ self.im.removeTask(task)
+ raise Abort()
+ sleep(1)
+
+ #TODO task handling
+ self.im.removeTask(task)
+
+ if task.error and has_plugin: #ignore default error message since the user could use OCR
+ self.fail(_("Pil and tesseract not installed and no Client connected for captcha decrypting"))
+ elif task.error:
+ self.fail(task.error)
+ elif not task.result:
+ self.fail(_("No captcha result obtained in appropriate time."))
+
+ result = task.result
+ self.log.debug("Received captcha result: %s" % str(result))
+
+ if not self.core.debug:
+ try:
+ remove(temp_file.name)
+ except:
+ pass
+
+ return result
+
+ def fail(self, reason):
+ """ fail and give reason """
+ raise Fail(reason) \ No newline at end of file
diff --git a/pyload/plugins/Crypter.py b/pyload/plugins/Crypter.py
new file mode 100644
index 000000000..1401d68b8
--- /dev/null
+++ b/pyload/plugins/Crypter.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+
+from traceback import print_exc
+
+from pyload.utils import to_list, has_method, uniqify
+from pyload.utils.fs import exists, remove, fs_encode
+from pyload.utils.packagetools import parseNames
+
+from Base import Base, Retry
+
+class Package:
+ """ Container that indicates that a new package should be created """
+ def __init__(self, name, urls=None, folder=None):
+ self.name = name
+ self.urls = urls if urls else []
+ self.folder = folder
+
+ # nested packages
+ self.packs = []
+
+ def addURL(self, url):
+ self.urls.append(url)
+
+ def addPackage(self, pack):
+ self.packs.append(pack)
+
+ def getAllURLs(self):
+ urls = self.urls
+ for p in self.packs:
+ urls.extend(p.getAllURLs())
+ return urls
+
+ # same name and urls is enough to be equal for packages
+ def __eq__(self, other):
+ return self.name == other.name and self.urls == other.urls
+
+ def __repr__(self):
+ return u"<CrypterPackage name=%s, links=%s, packs=%s" % (self.name, self.urls, self.packs)
+
+ def __hash__(self):
+ return hash(self.name) ^ hash(frozenset(self.urls)) ^ hash(self.name)
+
+class PyFileMockup:
+ """ Legacy class needed by old crypter plugins """
+ def __init__(self, url, pack):
+ self.url = url
+ self.name = url
+ self._package = pack
+ self.packageid = pack.id if pack else -1
+
+ def package(self):
+ return self._package
+
+class Crypter(Base):
+ """
+ Base class for (de)crypter plugins. Overwrite decrypt* methods.
+
+ How to use decrypt* methods:
+
+ You have to overwrite at least one method of decryptURL, decryptURLs, decryptFile.
+
+ After decrypting and generating urls/packages you have to return the result.
+ Valid return Data is:
+
+ :class:`Package` instance Crypter.Package
+ A **new** package will be created with the name and the urls of the object.
+
+ List of urls and `Package` instances
+ All urls in the list will be added to the **current** package. For each `Package`\
+ instance a new package will be created.
+
+ """
+
+ #: Prefix to annotate that the submited string for decrypting is indeed file content
+ CONTENT_PREFIX = "filecontent:"
+
+ @classmethod
+ def decrypt(cls, core, url_or_urls):
+ """Static method to decrypt urls or content. Can be used by other plugins.
+ To decrypt file content prefix the string with ``CONTENT_PREFIX `` as seen above.
+
+ :param core: pyLoad `Core`, needed in decrypt context
+ :param url_or_urls: List of urls or single url/ file content
+ :return: List of decrypted urls, all package info removed
+ """
+ urls = to_list(url_or_urls)
+ p = cls(core)
+ try:
+ result = p.processDecrypt(urls)
+ finally:
+ p.clean()
+
+ ret = []
+
+ for url_or_pack in result:
+ if isinstance(url_or_pack, Package): #package
+ ret.extend(url_or_pack.getAllURLs())
+ else: # single url
+ ret.append(url_or_pack)
+ # eliminate duplicates
+ return uniqify(ret)
+
+ def __init__(self, core, package=None, password=None):
+ Base.__init__(self, core)
+ self.req = core.requestFactory.getRequest()
+
+ # Package the plugin was initialized for, don't use this, its not guaranteed to be set
+ self.package = package
+ #: Password supplied by user
+ self.password = password
+ #: Propose a renaming of the owner package
+ self.rename = None
+
+ # For old style decrypter, do not use these!
+ self.packages = []
+ self.urls = []
+ self.pyfile = None
+
+ self.init()
+
+ def init(self):
+ """More init stuff if needed"""
+
+ def setup(self):
+ """Called everytime before decrypting. A Crypter plugin will be most likely used for several jobs."""
+
+ def decryptURL(self, url):
+ """Decrypt a single url
+
+ :param url: url to decrypt
+ :return: See :class:`Crypter` Documentation
+ """
+ if url.startswith("http"): # basic method to redirect
+ return self.decryptFile(self.load(url))
+ else:
+ self.fail(_("Not existing file or unsupported protocol"))
+
+ def decryptURLs(self, urls):
+ """Decrypt a bunch of urls
+
+ :param urls: list of urls
+ :return: See :class:`Crypter` Documentation
+ """
+ raise NotImplementedError
+
+ def decryptFile(self, content):
+ """Decrypt file content
+
+ :param content: content to decrypt as string
+ :return: See :class:`Crypter` Documentation
+ """
+ raise NotImplementedError
+
+ def generatePackages(self, urls):
+ """Generates :class:`Package` instances and names from urls. Useful for many different links and no\
+ given package name.
+
+ :param urls: list of urls
+ :return: list of `Package`
+ """
+ return [Package(name, purls) for name, purls in parseNames([(url,url) for url in urls]).iteritems()]
+
+ def _decrypt(self, urls):
+ """ Internal method to select decrypting method
+
+ :param urls: List of urls/content
+ :return:
+ """
+ cls = self.__class__
+
+ # separate local and remote files
+ content, urls = self.getLocalContent(urls)
+
+ if has_method(cls, "decryptURLs"):
+ self.setup()
+ result = to_list(self.decryptURLs(urls))
+ elif has_method(cls, "decryptURL"):
+ result = []
+ for url in urls:
+ self.setup()
+ result.extend(to_list(self.decryptURL(url)))
+ elif has_method(cls, "decrypt"):
+ self.logDebug("Deprecated .decrypt() method in Crypter plugin")
+ result = []
+ for url in urls:
+ self.pyfile = PyFileMockup(url, self.package)
+ self.setup()
+ self.decrypt(self.pyfile)
+ result.extend(self.convertPackages())
+ else:
+ if not has_method(cls, "decryptFile") or urls:
+ self.logDebug("No suited decrypting method was overwritten in plugin")
+ result = []
+
+ if has_method(cls, "decryptFile"):
+ for f, c in content:
+ self.setup()
+ result.extend(to_list(self.decryptFile(c)))
+ try:
+ if f.startswith("tmp_"): remove(f)
+ except :
+ pass
+
+ return result
+
+ def processDecrypt(self, urls):
+ """Catches all exceptions in decrypt methods and return results
+
+ :return: Decrypting results
+ """
+ try:
+ return self._decrypt(urls)
+ except Exception:
+ if self.core.debug:
+ print_exc()
+ return []
+
+ def getLocalContent(self, urls):
+ """Load files from disk and separate to file content and url list
+
+ :param urls:
+ :return: list of (filename, content), remote urls
+ """
+ content = []
+ # do nothing if no decryptFile method
+ if hasattr(self.__class__, "decryptFile"):
+ remote = []
+ for url in urls:
+ path = None
+ if url.startswith("http"): # skip urls directly
+ pass
+ elif url.startswith(self.CONTENT_PREFIX):
+ path = url
+ elif exists(url):
+ path = url
+ elif exists(self.core.path(url)):
+ path = self.core.path(url)
+
+ if path:
+ try:
+ if path.startswith(self.CONTENT_PREFIX):
+ content.append(("", path[len(self.CONTENT_PREFIX)]))
+ else:
+ f = open(fs_encode(path), "rb")
+ content.append((f.name, f.read()))
+ f.close()
+ except IOError, e:
+ self.logError("IOError", e)
+ else:
+ remote.append(url)
+
+ #swap filtered url list
+ urls = remote
+
+ return content, urls
+
+ def retry(self):
+ """ Retry decrypting, will only work once. Somewhat deprecated method, should be avoided. """
+ raise Retry()
+
+ def convertPackages(self):
+ """ Deprecated """
+ self.logDebug("Deprecated method .convertPackages()")
+ res = [Package(name, urls) for name, urls in self.packages]
+ res.extend(self.urls)
+ return res
+
+ def clean(self):
+ if hasattr(self, "req"):
+ self.req.close()
+ del self.req \ No newline at end of file
diff --git a/pyload/plugins/Download.py b/pyload/plugins/Download.py
new file mode 100644
index 000000000..e86089dc3
--- /dev/null
+++ b/pyload/plugins/Download.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+from Request import Request
+
+class Download(Request):
+ """ Abstract class for download request """
+
+ __version__ = "0.1"
+
+ def __init__(self, bucket, request=None):
+ # Copies the context
+ context = request.getContext() if request else [{}]
+ Request.__init__(self, *context)
+
+ self._running = False
+ self._name = None
+ self._size = 0
+
+ #: bucket used for rate limiting
+ self.bucket = bucket
+
+ def download(self, uri, path, *args, **kwargs):
+ """ Downloads the resource with additional options depending on implementation """
+ raise NotImplementedError
+
+ @property
+ def running(self):
+ return self._running
+
+ @property
+ def size(self):
+ """ Size in bytes """
+ return self._size
+
+ @property
+ def name(self):
+ """ Name of the resource if known """
+ return self._name
+
+ @property
+ def speed(self):
+ """ Download rate in bytes per second """
+ return 0
+
+ @property
+ def arrived(self):
+ """ Number of bytes already loaded """
+ return 0 \ No newline at end of file
diff --git a/pyload/plugins/Hoster.py b/pyload/plugins/Hoster.py
new file mode 100644
index 000000000..a37102aff
--- /dev/null
+++ b/pyload/plugins/Hoster.py
@@ -0,0 +1,416 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN, spoob, mkaay
+"""
+
+import os
+from time import time
+
+if os.name != "nt":
+ from pyload.utils.fs import chown
+ from pwd import getpwnam
+ from grp import getgrnam
+
+from pyload.utils import chunks as _chunks
+from pyload.utils.fs import save_join, save_filename, fs_encode, fs_decode, \
+ remove, makedirs, chmod, stat, exists, join
+
+from Base import Base, Fail, Retry
+from network.DefaultRequest import DefaultRequest, DefaultDownload
+
+# Import for Hoster Plugins
+chunks = _chunks
+
+
+class Reconnect(Exception):
+ """ raised when reconnected """
+
+
+class SkipDownload(Exception):
+ """ raised when download should be skipped """
+
+
+class Hoster(Base):
+ """
+ Base plugin for hoster plugin. Overwrite getInfo for online status retrieval, process for downloading.
+ """
+
+ #: Class used to make requests with `self.load`
+ REQUEST_CLASS = DefaultRequest
+
+ #: Class used to make download
+ DOWNLOAD_CLASS = DefaultDownload
+
+ @staticmethod
+ def getInfo(urls):
+ """This method is used to retrieve the online status of files for hoster plugins.
+ It has to *yield* list of tuples with the result in this format (name, size, status, url),
+ where status is one of API pyfile statuses.
+
+ :param urls: List of urls
+ :return: yield list of tuple with results (name, size, status, url)
+ """
+ pass
+
+ def __init__(self, pyfile):
+ Base.__init__(self, pyfile.m.core)
+
+ self.wantReconnect = False
+ #: enables simultaneous processing of multiple downloads
+ self.limitDL = 0
+ #: chunk limit
+ self.chunkLimit = 1
+ #: enables resume (will be ignored if server dont accept chunks)
+ self.resumeDownload = False
+
+ #: plugin is waiting
+ self.waiting = False
+
+ self.ocr = None #captcha reader instance
+ #: account handler instance, see :py:class:`Account`
+ self.account = self.core.accountManager.getAccountForPlugin(self.__name__)
+
+ #: premium status
+ self.premium = False
+ #: username/login
+ self.user = None
+
+ if self.account and not self.account.isUsable(): self.account = None
+ if self.account:
+ self.user = self.account.loginname
+ #: Browser instance, see `network.Browser`
+ self.req = self.account.getAccountRequest()
+ # Default: -1, True, True
+ self.chunkLimit, self.limitDL, self.resumeDownload = self.account.getDownloadSettings()
+ self.premium = self.account.isPremium()
+ else:
+ self.req = self.core.requestFactory.getRequest(klass=self.REQUEST_CLASS)
+
+ #: Will hold the download class
+ self.dl = None
+
+ #: associated pyfile instance, see `PyFile`
+ self.pyfile = pyfile
+ self.thread = None # holds thread in future
+
+ #: location where the last call to download was saved
+ self.lastDownload = ""
+ #: re match of the last call to `checkDownload`
+ self.lastCheck = None
+ #: js engine, see `JsEngine`
+ self.js = self.core.js
+
+ self.retries = 0 # amount of retries already made
+ self.html = None # some plugins store html code here
+
+ self.init()
+
+ def getMultiDL(self):
+ return self.limitDL <= 0
+
+ def setMultiDL(self, val):
+ self.limitDL = 0 if val else 1
+
+ #: virtual attribute using self.limitDL on behind
+ multiDL = property(getMultiDL, setMultiDL)
+
+ def getChunkCount(self):
+ if self.chunkLimit <= 0:
+ return self.config["download"]["chunks"]
+ return min(self.config["download"]["chunks"], self.chunkLimit)
+
+ def getDownloadLimit(self):
+ if self.account:
+ limit = self.account.options.get("limitDL", 0)
+ if limit == "": limit = 0
+ if self.limitDL > 0: # a limit is already set, we use the minimum
+ return min(int(limit), self.limitDL)
+ else:
+ return int(limit)
+ else:
+ return self.limitDL
+
+
+ def __call__(self):
+ return self.__name__
+
+ def init(self):
+ """initialize the plugin (in addition to `__init__`)"""
+ pass
+
+ def setup(self):
+ """ setup for environment and other things, called before downloading (possibly more than one time)"""
+ pass
+
+ def preprocessing(self, thread):
+ """ handles important things to do before starting """
+ self.thread = thread
+
+ if self.account:
+ # will force a re-login or reload of account info if necessary
+ self.account.getAccountInfo()
+ else:
+ self.req.reset()
+
+ self.setup()
+
+ self.pyfile.setStatus("starting")
+
+ return self.process(self.pyfile)
+
+ def process(self, pyfile):
+ """the 'main' method of every plugin, you **have to** overwrite it"""
+ raise NotImplementedError
+
+ def abort(self):
+ return self.pyfile.abort
+
+ def resetAccount(self):
+ """ don't use account and retry download """
+ self.account = None
+ self.req = self.core.requestFactory.getRequest(self.__name__)
+ self.retry()
+
+ def checksum(self, local_file=None):
+ """
+ return codes:
+ 0 - checksum ok
+ 1 - checksum wrong
+ 5 - can't get checksum
+ 10 - not implemented
+ 20 - unknown error
+ """
+ #@TODO checksum check addon
+
+ return True, 10
+
+
+ def setWait(self, seconds, reconnect=False):
+ """Set a specific wait time later used with `wait`
+
+ :param seconds: wait time in seconds
+ :param reconnect: True if a reconnect would avoid wait time
+ """
+ if reconnect:
+ self.wantReconnect = True
+ self.pyfile.waitUntil = time() + int(seconds)
+
+ def wait(self):
+ """ waits the time previously set """
+ self.waiting = True
+ self.pyfile.setStatus("waiting")
+
+ while self.pyfile.waitUntil > time():
+ self.thread.m.reconnecting.wait(2)
+
+ self.checkAbort()
+ if self.thread.m.reconnecting.isSet():
+ self.waiting = False
+ self.wantReconnect = False
+ raise Reconnect
+
+ self.waiting = False
+ self.pyfile.setStatus("starting")
+
+ def offline(self):
+ """ fail and indicate file is offline """
+ raise Fail("offline")
+
+ def tempOffline(self):
+ """ fail and indicates file ist temporary offline, the core may take consequences """
+ raise Fail("temp. offline")
+
+ def retry(self, max_tries=3, wait_time=1, reason=""):
+ """Retries and begin again from the beginning
+
+ :param max_tries: number of maximum retries
+ :param wait_time: time to wait in seconds
+ :param reason: reason for retrying, will be passed to fail if max_tries reached
+ """
+ if 0 < max_tries <= self.retries:
+ if not reason: reason = "Max retries reached"
+ raise Fail(reason)
+
+ self.wantReconnect = False
+ self.setWait(wait_time)
+ self.wait()
+
+ self.retries += 1
+ raise Retry(reason)
+
+
+ def download(self, url, get={}, post={}, ref=True, cookies=True, disposition=False):
+ """Downloads the content at url to download folder
+
+ :param disposition: if True and server provides content-disposition header\
+ the filename will be changed if needed
+ :return: The location where the file was saved
+ """
+ self.checkForSameFiles()
+ self.checkAbort()
+
+ self.pyfile.setStatus("downloading")
+
+ download_folder = self.config['general']['download_folder']
+
+ location = save_join(download_folder, self.pyfile.package().folder)
+
+ if not exists(location):
+ makedirs(location, int(self.core.config["permission"]["folder"], 8))
+
+ if self.core.config["permission"]["change_dl"] and os.name != "nt":
+ try:
+ uid = getpwnam(self.config["permission"]["user"])[2]
+ gid = getgrnam(self.config["permission"]["group"])[2]
+
+ chown(location, uid, gid)
+ except Exception, e:
+ self.log.warning(_("Setting User and Group failed: %s") % str(e))
+
+ # convert back to unicode
+ location = fs_decode(location)
+ name = save_filename(self.pyfile.name)
+
+ filename = join(location, name)
+
+ self.core.addonManager.dispatchEvent("download:start", self.pyfile, url, filename)
+
+ # Create the class used for downloading
+ self.dl = self.core.requestFactory.getDownloadRequest(self.req, self.DOWNLOAD_CLASS)
+ try:
+ # TODO: hardcoded arguments
+ newname = self.dl.download(url, filename, get=get, post=post, referer=ref, chunks=self.getChunkCount(),
+ resume=self.resumeDownload, disposition=disposition)
+ finally:
+ self.dl.close()
+ self.pyfile.size = self.dl.size
+
+ if disposition and newname and newname != name: #triple check, just to be sure
+ self.log.info("%(name)s saved as %(newname)s" % {"name": name, "newname": newname})
+ self.pyfile.name = newname
+ filename = join(location, newname)
+
+ fs_filename = fs_encode(filename)
+
+ if self.core.config["permission"]["change_file"]:
+ chmod(fs_filename, int(self.core.config["permission"]["file"], 8))
+
+ if self.core.config["permission"]["change_dl"] and os.name != "nt":
+ try:
+ uid = getpwnam(self.config["permission"]["user"])[2]
+ gid = getgrnam(self.config["permission"]["group"])[2]
+
+ chown(fs_filename, uid, gid)
+ except Exception, e:
+ self.log.warning(_("Setting User and Group failed: %s") % str(e))
+
+ self.lastDownload = filename
+ return self.lastDownload
+
+ def checkDownload(self, rules, api_size=0, max_size=50000, delete=True, read_size=0):
+ """ checks the content of the last downloaded file, re match is saved to `lastCheck`
+
+ :param rules: dict with names and rules to match (compiled regexp or strings)
+ :param api_size: expected file size
+ :param max_size: if the file is larger then it wont be checked
+ :param delete: delete if matched
+ :param read_size: amount of bytes to read from files larger then max_size
+ :return: dictionary key of the first rule that matched
+ """
+ lastDownload = fs_encode(self.lastDownload)
+ if not exists(lastDownload): return None
+
+ size = stat(lastDownload)
+ size = size.st_size
+
+ if api_size and api_size <= size:
+ return None
+ elif size > max_size and not read_size:
+ return None
+ self.log.debug("Download Check triggered")
+ f = open(lastDownload, "rb")
+ content = f.read(read_size if read_size else -1)
+ f.close()
+ #produces encoding errors, better log to other file in the future?
+ #self.log.debug("Content: %s" % content)
+ for name, rule in rules.iteritems():
+ if type(rule) in (str, unicode):
+ if rule in content:
+ if delete:
+ remove(lastDownload)
+ return name
+ elif hasattr(rule, "search"):
+ m = rule.search(content)
+ if m:
+ if delete:
+ remove(lastDownload)
+ self.lastCheck = m
+ return name
+
+
+ def getPassword(self):
+ """ get the password the user provided in the package"""
+ password = self.pyfile.package().password
+ if not password: return ""
+ return password
+
+
+ def checkForSameFiles(self, starting=False):
+ """ checks if same file was/is downloaded within same package
+
+ :param starting: indicates that the current download is going to start
+ :raises SkipDownload:
+ """
+
+ pack = self.pyfile.package()
+
+ for pyfile in self.core.files.cachedFiles():
+ if pyfile != self.pyfile and pyfile.name == self.pyfile.name and pyfile.package().folder == pack.folder:
+ if pyfile.status in (0, 12): #finished or downloading
+ raise SkipDownload(pyfile.pluginname)
+ elif pyfile.status in (
+ 5, 7) and starting: #a download is waiting/starting and was apparently started before
+ raise SkipDownload(pyfile.pluginname)
+
+ download_folder = self.config['general']['download_folder']
+ location = save_join(download_folder, pack.folder, self.pyfile.name)
+
+ if starting and self.core.config['download']['skip_existing'] and exists(location):
+ size = os.stat(location).st_size
+ if size >= self.pyfile.size:
+ raise SkipDownload("File exists.")
+
+ pyfile = self.core.db.findDuplicates(self.pyfile.id, self.pyfile.package().folder, self.pyfile.name)
+ if pyfile:
+ if exists(location):
+ raise SkipDownload(pyfile[0])
+
+ self.log.debug("File %s not skipped, because it does not exists." % self.pyfile.name)
+
+ def clean(self):
+ """ clean everything and remove references """
+ if hasattr(self, "pyfile"):
+ del self.pyfile
+ if hasattr(self, "req"):
+ self.req.close()
+ del self.req
+ if hasattr(self, "dl"):
+ del self.dl
+ if hasattr(self, "thread"):
+ del self.thread
+ if hasattr(self, "html"):
+ del self.html
diff --git a/pyload/plugins/MultiHoster.py b/pyload/plugins/MultiHoster.py
new file mode 100644
index 000000000..bc7a0de75
--- /dev/null
+++ b/pyload/plugins/MultiHoster.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+from time import time
+
+from pyload.utils import remove_chars
+
+from Account import Account
+
+
+def normalize(domain):
+ """ Normalize domain/plugin name, so they are comparable """
+ return remove_chars(domain.strip().lower(), "-.")
+
+#noinspection PyUnresolvedReferences
+class MultiHoster(Account):
+ """
+ Base class for MultiHoster services.
+ This is also an Account instance so you should see :class:`Account` and overwrite necessary methods.
+ Multihoster becomes only active when an Account was entered and the MultiHoster addon was activated.
+ You need to overwrite `loadHosterList` and a corresponding :class:`Hoster` plugin with the same name should
+ be available to make your service working.
+ """
+
+ #: List of hoster names that will be replaced so pyLoad will recognize them: (orig_name, pyload_name)
+ replacements = [("freakshare.net", "freakshare.com"), ("uploaded.net", "uploaded.to")]
+
+ #: Load new hoster list every x seconds
+ hoster_timeout = 300
+
+ def __init__(self, *args, **kwargs):
+
+ # Hoster list
+ self.hoster = []
+ # Timestamp
+ self.ts = 0
+
+ Account.__init__(self, *args, **kwargs)
+
+ def loadHosterList(self, req):
+ """Load list of supported hoster
+
+ :return: List of domain names
+ """
+ raise NotImplementedError
+
+
+ def isHosterUsuable(self, domain):
+ """ Determine before downloading if hoster should be used.
+
+ :param domain: domain name
+ :return: True to let the MultiHoster download, False to fallback to default plugin
+ """
+ return True
+
+ def getHosterList(self, force=False):
+ if self.ts + self.hoster_timeout < time() or force:
+ req = self.getAccountRequest()
+ try:
+ self.hoster = self.loadHosterList(req)
+ except Exception, e:
+ self.logError(e)
+ return []
+ finally:
+ req.close()
+
+ for rep in self.replacements:
+ if rep[0] in self.hoster:
+ self.hoster.remove(rep[0])
+ if rep[1] not in self.hoster:
+ self.hoster.append(rep[1])
+
+ self.ts = time()
+
+ return self.hoster \ No newline at end of file
diff --git a/pyload/plugins/Plugin.py b/pyload/plugins/Plugin.py
new file mode 100644
index 000000000..0abb2644f
--- /dev/null
+++ b/pyload/plugins/Plugin.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+
+print "Deprecated usage of plugins.Plugin -> use plugins.Base"
+from .Base import *
+from pyload.utils import chunks
+
+Plugin = Base
+
diff --git a/pyload/plugins/ReCaptcha.py b/pyload/plugins/ReCaptcha.py
new file mode 100644
index 000000000..e47522b4a
--- /dev/null
+++ b/pyload/plugins/ReCaptcha.py
@@ -0,0 +1,22 @@
+import re
+
+class ReCaptcha():
+ def __init__(self, plugin):
+ self.plugin = plugin
+ self.plugin.logDebug("Deprecated usage of ReCaptcha: Use CaptchaService instead")
+
+ def challenge(self, id):
+ js = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={"k":id}, cookies=True)
+
+ try:
+ challenge = re.search("challenge : '(.*?)',", js).group(1)
+ server = re.search("server : '(.*?)',", js).group(1)
+ except:
+ self.plugin.fail("recaptcha error")
+ result = self.result(server,challenge)
+
+ return challenge, result
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%simage"%server, get={"c":challenge}, cookies=True, imgtype="jpg")
+
diff --git a/pyload/plugins/Request.py b/pyload/plugins/Request.py
new file mode 100644
index 000000000..5652b6425
--- /dev/null
+++ b/pyload/plugins/Request.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+from logging import getLogger
+
+
+class ResponseException(Exception):
+ def __init__(self, code, content=""):
+ Exception.__init__(self, "Server response error: %s %s" % (code, content))
+ self.code = code
+
+class Request(object):
+ """ Abstract class to support different types of request, most methods should be overwritten """
+
+ __version__ = "0.1"
+
+ #: Class that will be instantiated and associated with the request, and if needed copied and reused
+ CONTEXT_CLASS = None
+
+ def __init__(self, config, context=None, options=None):
+ self.log = getLogger("log")
+
+ # Global config, holds some configurable parameter
+ self.config = config
+
+ # Create a new context if not given
+ if context is None and self.CONTEXT_CLASS is not None:
+ self.context = self.CONTEXT_CLASS()
+ else:
+ self.context = context
+
+ # Store options in dict
+ self.options = {} if options is None else options
+
+ # Last response code
+ self.code = 0
+ self.doAbort = False
+ self.initContext()
+
+ # TODO: content encoding? Could be handled globally
+
+ @property
+ def http(self):
+ print "Deprecated usage of req.http, just use req instead"
+ return self
+
+ def initContext(self):
+ """ Should be used to initialize everything from given context and options """
+ pass
+
+ def getContext(self):
+ """ Retrieves complete state that is needed to copy the request context """
+ return self.config, self.context, self.options
+
+ def setContext(self, *args):
+ """ Sets request context """
+ self.config, self.context, self.options = args
+
+ def setOption(self, name, value):
+ """ Sets an option """
+ self.options[name] = value
+
+ def unsetOption(self, name):
+ """ Removes a specific option or reset everything on empty string """
+ if name == "":
+ self.options.clear()
+ else:
+ del self.options[name]
+
+ def load(self, uri, *args, **kwargs):
+ """ Loads given resource from given uri. Args and kwargs depends on implementation"""
+ raise NotImplementedError
+
+ def abort(self):
+ self.doAbort = True
+
+ def reset(self):
+ """ Resets the context to initial state """
+ self.unsetOption("")
+
+ def close(self):
+ """ Close and clean everything """
+ pass \ No newline at end of file
diff --git a/pyload/plugins/__init__.py b/pyload/plugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/__init__.py
diff --git a/pyload/plugins/accounts/AlldebridCom.py b/pyload/plugins/accounts/AlldebridCom.py
new file mode 100644
index 000000000..baaa9d264
--- /dev/null
+++ b/pyload/plugins/accounts/AlldebridCom.py
@@ -0,0 +1,49 @@
+from module.plugins.Account import Account
+import xml.dom.minidom as dom
+from BeautifulSoup import BeautifulSoup
+from time import time
+import re
+import urllib
+
+class AlldebridCom(Account):
+ __name__ = "AlldebridCom"
+ __version__ = "0.21"
+ __type__ = "account"
+ __description__ = """AllDebrid.com account plugin"""
+ __author_name__ = ("Andy, Voigt")
+ __author_mail__ = ("spamsales@online.de")
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ page = req.load("http://www.alldebrid.com/account/")
+ soup=BeautifulSoup(page)
+ #Try to parse expiration date directly from the control panel page (better accuracy)
+ try:
+ time_text=soup.find('div',attrs={'class':'remaining_time_text'}).strong.string
+ self.log.debug("Account expires in: %s" % time_text)
+ p = re.compile('\d+')
+ exp_data=p.findall(time_text)
+ exp_time=time()+int(exp_data[0])*24*60*60+int(exp_data[1])*60*60+(int(exp_data[2])-1)*60
+ #Get expiration date from API
+ except:
+ data = self.getAccountData(user)
+ page = req.load("http://www.alldebrid.com/api.php?action=info_user&login=%s&pw=%s" % (user, data["password"]))
+ self.log.debug(page)
+ xml = dom.parseString(page)
+ exp_time=time()+int(xml.getElementsByTagName("date")[0].childNodes[0].nodeValue)*86400
+ account_info = {"validuntil": exp_time, "trafficleft": -1}
+ return account_info
+
+ def login(self, user, data, req):
+
+ urlparams = urllib.urlencode({'action':'login','login_login':user,'login_password':data["password"]})
+ page = req.load("http://www.alldebrid.com/register/?%s" % (urlparams))
+
+ if "This login doesn't exist" in page:
+ self.wrongPassword()
+
+ if "The password is not valid" in page:
+ self.wrongPassword()
+
+ if "Invalid captcha" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/BayfilesCom.py b/pyload/plugins/accounts/BayfilesCom.py
new file mode 100644
index 000000000..0d036488b
--- /dev/null
+++ b/pyload/plugins/accounts/BayfilesCom.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+from module.common.json_layer import json_loads
+import re
+from time import time, mktime, strptime
+
+class BayfilesCom(Account):
+ __name__ = "BayfilesCom"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """bayfiles.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def loadAccountInfo(self, user, req):
+ for i in range(2):
+ response = json_loads(req.load("http://api.bayfiles.com/v1/account/info"))
+ self.logDebug(response)
+ if not response["error"]:
+ break
+ self.logWarning(response["error"])
+ self.relogin()
+
+ return {"premium": bool(response['premium']), \
+ "trafficleft": -1, \
+ "validuntil": response['expires'] if response['expires'] >= int(time()) else -1}
+
+ def login(self, user, data, req):
+ response = json_loads(req.load("http://api.bayfiles.com/v1/account/login/%s/%s" % (user, data["password"])))
+ self.logDebug(response)
+ if response["error"]:
+ self.logError(response["error"])
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/BitshareCom.py b/pyload/plugins/accounts/BitshareCom.py
new file mode 100644
index 000000000..a4f56e31c
--- /dev/null
+++ b/pyload/plugins/accounts/BitshareCom.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: pking
+"""
+
+from module.plugins.Account import Account
+
+class BitshareCom(Account):
+ __name__ = "BitshareCom"
+ __version__ = "0.11"
+ __type__ = "account"
+ __description__ = """Bitshare account plugin"""
+ __author_name__ = ("Paul King")
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://bitshare.com/mysettings.html")
+
+ if "\"http://bitshare.com/myupgrade.html\">Free" in page:
+ return {"validuntil": -1, "trafficleft":-1, "premium": False}
+
+ if not '<input type="checkbox" name="directdownload" checked="checked" />' in page:
+ self.core.log.warning(_("Activate direct Download in your Bitshare Account"))
+
+ return {"validuntil": -1, "trafficleft": -1, "premium": True}
+
+
+ def login(self, user, data, req):
+ page = req.load("http://bitshare.com/login.html", post={ "user" : user, "password" : data["password"], "submit" :"Login"}, cookies=True)
+ if "login" in req.lastEffectiveURL:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/BoltsharingCom.py b/pyload/plugins/accounts/BoltsharingCom.py
new file mode 100644
index 000000000..678591d1d
--- /dev/null
+++ b/pyload/plugins/accounts/BoltsharingCom.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class BoltsharingCom(XFSPAccount):
+ __name__ = "BoltsharingCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """Boltsharing.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = "http://boltsharing.com/"
diff --git a/pyload/plugins/accounts/CramitIn.py b/pyload/plugins/accounts/CramitIn.py
new file mode 100644
index 000000000..182c9d647
--- /dev/null
+++ b/pyload/plugins/accounts/CramitIn.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class CramitIn(XFSPAccount):
+ __name__ = "CramitIn"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """cramit.in account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = "http://cramit.in/" \ No newline at end of file
diff --git a/pyload/plugins/accounts/CyberlockerCh.py b/pyload/plugins/accounts/CyberlockerCh.py
new file mode 100644
index 000000000..31e0c3e24
--- /dev/null
+++ b/pyload/plugins/accounts/CyberlockerCh.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+from module.plugins.internal.SimpleHoster import parseHtmlForm
+
+class CyberlockerCh(XFSPAccount):
+ __name__ = "CyberlockerCh"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """CyberlockerCh account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ MAIN_PAGE = "http://cyberlocker.ch/"
+
+ def login(self, user, data, req):
+ html = req.load(self.MAIN_PAGE + 'login.html', decode = True)
+
+ action, inputs = parseHtmlForm('name="FL"', html)
+ if not inputs:
+ inputs = {"op": "login",
+ "redirect": self.MAIN_PAGE}
+
+ inputs.update({"login": user,
+ "password": data['password']})
+
+ # Without this a 403 Forbidden is returned
+ req.http.lastURL = self.MAIN_PAGE + 'login.html'
+ html = req.load(self.MAIN_PAGE, post = inputs, decode = True)
+
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/CzshareCom.py b/pyload/plugins/accounts/CzshareCom.py
new file mode 100644
index 000000000..7b1a8edc5
--- /dev/null
+++ b/pyload/plugins/accounts/CzshareCom.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from time import mktime, strptime
+import re
+
+from module.plugins.Account import Account
+
+
+class CzshareCom(Account):
+ __name__ = "CzshareCom"
+ __version__ = "0.13"
+ __type__ = "account"
+ __description__ = """czshare.com account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ CREDIT_LEFT_PATTERN = r'<tr class="active">\s*<td>([0-9 ,]+) (KiB|MiB|GiB)</td>\s*<td>([^<]*)</td>\s*</tr>'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://czshare.com/prehled_kreditu/")
+
+ found = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if not found:
+ return {"validuntil": 0, "trafficleft": 0}
+ else:
+ credits = float(found.group(1).replace(' ', '').replace(',', '.'))
+ credits = credits * 1024 ** {'KiB': 0, 'MiB': 1, 'GiB': 2}[found.group(2)]
+ validuntil = mktime(strptime(found.group(3), '%d.%m.%y %H:%M'))
+ return {"validuntil": validuntil, "trafficleft": credits}
+
+ def login(self, user, data, req):
+
+ html = req.load('https://czshare.com/index.php', post={
+ "Prihlasit": "Prihlasit",
+ "login-password": data["password"],
+ "login-name": user
+ })
+
+ if '<div class="login' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/DdlstorageCom.py b/pyload/plugins/accounts/DdlstorageCom.py
new file mode 100644
index 000000000..01d165f23
--- /dev/null
+++ b/pyload/plugins/accounts/DdlstorageCom.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class DdlstorageCom(XFSPAccount):
+ __name__ = "DdlstorageCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """DDLStorage.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = "http://ddlstorage.com/" \ No newline at end of file
diff --git a/pyload/plugins/accounts/DebridItaliaCom.py b/pyload/plugins/accounts/DebridItaliaCom.py
new file mode 100644
index 000000000..91dd3787f
--- /dev/null
+++ b/pyload/plugins/accounts/DebridItaliaCom.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+import re
+import _strptime
+import time
+
+from module.plugins.Account import Account
+
+
+class DebridItaliaCom(Account):
+ __name__ = "DebridItaliaCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """debriditalia.com account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ WALID_UNTIL_PATTERN = r"Premium valid till: (?P<D>[^|]+) \|"
+
+ def loadAccountInfo(self, user, req):
+ if 'Account premium not activated' in self.html:
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ m = re.search(self.WALID_UNTIL_PATTERN, self.html)
+ if m:
+ validuntil = int(time.mktime(time.strptime(m.group('D'), "%d/%m/%Y %H:%M")))
+ return {"premium": True, "validuntil": validuntil, "trafficleft": -1}
+ else:
+ self.logError('Unable to retrieve account information - Plugin may be out of date')
+
+ def login(self, user, data, req):
+ self.html = req.load("http://debriditalia.com/login.php",
+ get={"u": user, "p": data["password"]})
+ if 'NO' in self.html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/DepositfilesCom.py b/pyload/plugins/accounts/DepositfilesCom.py
new file mode 100644
index 000000000..b0730de8e
--- /dev/null
+++ b/pyload/plugins/accounts/DepositfilesCom.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Account import Account
+import re
+from time import strptime, mktime
+
+class DepositfilesCom(Account):
+ __name__ = "DepositfilesCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """depositfiles.com account plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def loadAccountInfo(self, user, req):
+
+ src = req.load("http://depositfiles.com/de/gold/")
+ validuntil = re.search("noch den Gold-Zugriff: <b>(.*?)</b></div>", src).group(1)
+
+ validuntil = int(mktime(strptime(validuntil, "%Y-%m-%d %H:%M:%S")))
+
+ tmp = {"validuntil":validuntil, "trafficleft":-1}
+ return tmp
+
+ def login(self, user, data, req):
+
+ req.load("http://depositfiles.com/de/gold/payment.php")
+ src = req.load("http://depositfiles.com/de/login.php", get={"return": "/de/gold/payment.php"}, post={"login": user, "password": data["password"]})
+ if r'<div class="error_message">Sie haben eine falsche Benutzername-Passwort-Kombination verwendet.</div>' in src:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/EasybytezCom.py b/pyload/plugins/accounts/EasybytezCom.py
new file mode 100644
index 000000000..ba7829b83
--- /dev/null
+++ b/pyload/plugins/accounts/EasybytezCom.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+from module.plugins.internal.SimpleHoster import parseHtmlForm
+import re
+from module.utils import parseFileSize
+from time import mktime, strptime
+
+class EasybytezCom(Account):
+ __name__ = "EasybytezCom"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """EasyBytez.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ VALID_UNTIL_PATTERN = r'<TR><TD>Premium account expire:</TD><TD><b>([^<]+)</b>'
+ TRAFFIC_LEFT_PATTERN = r'<TR><TD>Traffic available today:</TD><TD><b>(?P<S>[^<]+)</b>'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.easybytez.com/?op=my_account", decode = True)
+
+ validuntil = trafficleft = None
+ premium = False
+
+ found = re.search(self.VALID_UNTIL_PATTERN, html)
+ if found:
+ premium = True
+ trafficleft = -1
+ try:
+ self.logDebug(found.group(1))
+ validuntil = mktime(strptime(found.group(1), "%d %B %Y"))
+ except Exception, e:
+ self.logError(e)
+ else:
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if found:
+ trafficleft = found.group(1)
+ if "Unlimited" in trafficleft:
+ premium = True
+ else:
+ trafficleft = parseFileSize(trafficleft) / 1024
+
+ return ({"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium})
+
+ def login(self, user, data, req):
+ html = req.load('http://www.easybytez.com/login.html', decode = True)
+ action, inputs = parseHtmlForm('name="FL"', html)
+ inputs.update({"login": user,
+ "password": data['password'],
+ "redirect": "http://www.easybytez.com/"})
+
+ html = req.load(action, post = inputs, decode = True)
+
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/EgoFilesCom.py b/pyload/plugins/accounts/EgoFilesCom.py
new file mode 100644
index 000000000..da1ed03ad
--- /dev/null
+++ b/pyload/plugins/accounts/EgoFilesCom.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Account import Account
+import re
+import time
+from module.utils import parseFileSize
+
+class EgoFilesCom(Account):
+ __name__ = "EgoFilesCom"
+ __version__ = "0.2"
+ __type__ = "account"
+ __description__ = """egofiles.com account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ PREMIUM_ACCOUNT_PATTERN = '<br/>\s*Premium: (?P<P>[^/]*) / Traffic left: (?P<T>[\d.]*) (?P<U>\w*)\s*\\n\s*<br/>'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://egofiles.com")
+ if 'You are logged as a Free User' in html:
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ m = re.search(self.PREMIUM_ACCOUNT_PATTERN, html)
+ if m:
+ validuntil = int(time.mktime(time.strptime(m.group('P'), "%Y-%m-%d %H:%M:%S")))
+ trafficleft = parseFileSize(m.group('T'), m.group('U')) / 1024
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ else:
+ self.logError('Unable to retrieve account information - Plugin may be out of date')
+
+ def login(self, user, data, req):
+ # Set English language
+ req.load("https://egofiles.com/ajax/lang.php?lang=en", just_header=True)
+
+ html = req.load("http://egofiles.com/ajax/register.php",
+ post={"log": 1,
+ "loginV": user,
+ "passV": data["password"]})
+ if 'Login successful' not in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/EuroshareEu.py b/pyload/plugins/accounts/EuroshareEu.py
new file mode 100644
index 000000000..42967d975
--- /dev/null
+++ b/pyload/plugins/accounts/EuroshareEu.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+from time import mktime, strptime
+from string import replace
+import re
+
+class EuroshareEu(Account):
+ __name__ = "EuroshareEu"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """euroshare.eu account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def loadAccountInfo(self, user, req):
+ self.relogin(user)
+ html = req.load("http://euroshare.eu/customer-zone/settings/")
+
+ found = re.search('id="input_expire_date" value="(\d+\.\d+\.\d+ \d+:\d+)"', html)
+ if found is None:
+ premium, validuntil = False, -1
+ else:
+ premium = True
+ validuntil = mktime(strptime(found.group(1), "%d.%m.%Y %H:%M"))
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+ def login(self, user, data, req):
+
+ html = req.load('http://euroshare.eu/customer-zone/login/', post={
+ "trvale": "1",
+ "login": user,
+ "password": data["password"]
+ }, decode=True)
+
+ if u">Nesprávne prihlasovacie meno alebo heslo" in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/FastshareCz.py b/pyload/plugins/accounts/FastshareCz.py
new file mode 100644
index 000000000..69bbb0827
--- /dev/null
+++ b/pyload/plugins/accounts/FastshareCz.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.Account import Account
+from module.utils import parseFileSize
+
+
+class FastshareCz(Account):
+ __name__ = "FastshareCz"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """fastshare.cz account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.fastshare.cz/user", decode=True)
+
+ found = re.search(r'(?:Kredit|Credit)\s*: </td><td>(.+?)&nbsp;', html)
+ if found:
+ trafficleft = parseFileSize(found.group(1)) / 1024
+ premium = True if trafficleft else False
+ else:
+ trafficleft = None
+ premium = False
+
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ req.load('http://www.fastshare.cz/login') # Do not remove or it will not login
+ html = req.load('http://www.fastshare.cz/sql.php', post={
+ "heslo": data['password'],
+ "login": user
+ }, decode=True)
+
+ if u'>Špatné uşivatelské jméno nebo heslo.<' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/FilebeerInfo.py b/pyload/plugins/accounts/FilebeerInfo.py
new file mode 100644
index 000000000..40ab70519
--- /dev/null
+++ b/pyload/plugins/accounts/FilebeerInfo.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from time import mktime, strptime
+from module.plugins.Account import Account
+from module.utils import parseFileSize
+
+class FilebeerInfo(Account):
+ __name__ = "FilebeerInfo"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """filebeer.info account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ VALID_UNTIL_PATTERN = r'Reverts To Free Account:\s</td>\s*<td>\s*(.*?)\s*</td>'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://filebeer.info/upgrade.php", decode = True)
+ premium = not 'Free User </td>' in html
+
+ validuntil = None
+ if premium:
+ try:
+ validuntil = mktime(strptime(re.search(self.VALID_UNTIL_PATTERN, html).group(1), "%d/%m/%Y %H:%M:%S"))
+ except Exception, e:
+ self.logError("Unable to parse account info", e)
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://filebeer.info/login.php', post = {
+ "submit": 'Login',
+ "loginPassword": data['password'],
+ "loginUsername": user,
+ "submitme": '1'
+ }, decode = True)
+
+ if "<ul class='pageErrors'>" in html or ">Your username and password are invalid<" in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/FilecloudIo.py b/pyload/plugins/accounts/FilecloudIo.py
new file mode 100644
index 000000000..cf9f92209
--- /dev/null
+++ b/pyload/plugins/accounts/FilecloudIo.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+
+class FilecloudIo(Account):
+ __name__ = "FilecloudIo"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """FilecloudIo account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def loadAccountInfo(self, user, req):
+ return ({"validuntil": -1, "trafficleft": -1, "premium": False})
+
+ def login(self, user, data, req):
+ req.cj.setCookie("secure.filecloud.io", "lang", "en")
+ html = req.load('https://secure.filecloud.io/user-login.html')
+
+ if not hasattr(self, "form_data"):
+ self.form_data = {}
+
+ self.form_data["username"] = user
+ self.form_data["password"] = data['password']
+
+ html = req.load('https://secure.filecloud.io/user-login_p.html',
+ post = self.form_data,
+ multipart = True)
+
+ self.logged_in = True if "you have successfully logged in - filecloud.io" in html else False
+ self.form_data = {}
+ \ No newline at end of file
diff --git a/pyload/plugins/accounts/FilefactoryCom.py b/pyload/plugins/accounts/FilefactoryCom.py
new file mode 100644
index 000000000..8e163e2f6
--- /dev/null
+++ b/pyload/plugins/accounts/FilefactoryCom.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+from time import mktime, strptime
+
+class FilefactoryCom(Account):
+ __name__ = "FilefactoryCom"
+ __version__ = "0.13"
+ __type__ = "account"
+ __description__ = """filefactory.com account plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ ACCOUNT_INFO_PATTERN = r'<time datetime="([\d-]+)">'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.filefactory.com/member/")
+
+ found = re.search(self.ACCOUNT_INFO_PATTERN, html)
+ if found:
+ premium = True
+ validuntil = mktime(strptime(found.group(1),"%Y-%m-%d"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ html = req.load("http://www.filefactory.com/member/login.php", post={
+ "email": user,
+ "password": data["password"],
+ "redirect": "/"})
+
+ if '/member/login.php?err=1' in req.http.header:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FilejungleCom.py b/pyload/plugins/accounts/FilejungleCom.py
new file mode 100644
index 000000000..8ac25c201
--- /dev/null
+++ b/pyload/plugins/accounts/FilejungleCom.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+from time import mktime, strptime
+
+class FilejungleCom(Account):
+ __name__ = "FilejungleCom"
+ __version__ = "0.11"
+ __type__ = "account"
+ __description__ = """filejungle.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ login_timeout = 60
+
+ URL = "http://filejungle.com/"
+ TRAFFIC_LEFT_PATTERN = r'"/extend_premium\.php">Until (\d+ [A-Za-z]+ \d+)<br'
+ LOGIN_FAILED_PATTERN = r'<span htmlfor="loginUser(Name|Password)" generated="true" class="fail_info">'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load(self.URL + "dashboard.php")
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if found:
+ premium = True
+ validuntil = mktime(strptime(found.group(1), "%d %b %Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ html = req.load(self.URL + "login.php", post={
+ "loginUserName": user,
+ "loginUserPassword": data["password"],
+ "loginFormSubmit": "Login",
+ "recaptcha_challenge_field": "",
+ "recaptcha_response_field": "",
+ "recaptcha_shortencode_field": ""})
+
+ if re.search(self.LOGIN_FAILED_PATTERN, html):
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/FilerNet.py b/pyload/plugins/accounts/FilerNet.py
new file mode 100644
index 000000000..7afd00f25
--- /dev/null
+++ b/pyload/plugins/accounts/FilerNet.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+import re
+import _strptime
+import time
+
+from module.plugins.Account import Account
+from module.utils import parseFileSize
+
+
+class FilerNet(Account):
+ __name__ = "FilerNet"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """Filer.net account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ TOKEN_PATTERN = r'_csrf_token" value="([^"]+)" />'
+ WALID_UNTIL_PATTERN = r"Der Premium-Zugang ist gÃŒltig bis (.+)\.\s*</td>"
+ TRAFFIC_PATTERN = r'Traffic</th>\s*<td>([^<]+)</td>'
+ FREE_PATTERN = r'Account Status</th>\s*<td>\s*Free'
+
+ def loadAccountInfo(self, user, req):
+ self.html = req.load("https://filer.net/profile")
+
+ # Free user
+ if re.search(self.FREE_PATTERN, self.html):
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ until = re.search(self.WALID_UNTIL_PATTERN, self.html)
+ traffic = re.search(self.TRAFFIC_PATTERN, self.html)
+ if until and traffic:
+ validuntil = int(time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S")))
+ trafficleft = parseFileSize(traffic.group(1)) / 1024
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+ else:
+ self.logError('Unable to retrieve account information - Plugin may be out of date')
+ return {"premium": False, "validuntil": None, "trafficleft": None}
+
+ def login(self, user, data, req):
+ self.html = req.load("https://filer.net/login")
+ token = re.search(self.TOKEN_PATTERN, self.html).group(1)
+ self.html = req.load("https://filer.net/login_check",
+ post={"_username": user, "_password": data["password"],
+ "_remember_me": "on", "_csrf_token": token, "_target_path": "https://filer.net/"})
+ if 'Logout' not in self.html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FilerioCom.py b/pyload/plugins/accounts/FilerioCom.py
new file mode 100644
index 000000000..feacacaf5
--- /dev/null
+++ b/pyload/plugins/accounts/FilerioCom.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class FilerioCom(XFSPAccount):
+ __name__ = "FilerioCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """FileRio.in account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = "http://filerio.in/" \ No newline at end of file
diff --git a/pyload/plugins/accounts/FilesMailRu.py b/pyload/plugins/accounts/FilesMailRu.py
new file mode 100644
index 000000000..98fe13248
--- /dev/null
+++ b/pyload/plugins/accounts/FilesMailRu.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from module.plugins.Account import Account
+import re
+from time import time
+
+class FilesMailRu(Account):
+ __name__ = "FilesMailRu"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """filesmail.ru account plugin"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ def loadAccountInfo(self, user, req):
+ return {"validuntil": None, "trafficleft": None}
+
+ def login(self, user, data,req):
+ user, domain = user.split("@")
+
+ page = req.load("http://swa.mail.ru/cgi-bin/auth", None, { "Domain" : domain, "Login": user, "Password" : data['password'], "Page" : "http://files.mail.ru/"}, cookies=True)
+
+ if "НеверМПе ОЌя пПльзПвателя ОлО парПль" in page: # @TODO seems not to work
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FileserveCom.py b/pyload/plugins/accounts/FileserveCom.py
new file mode 100644
index 000000000..5e5068f22
--- /dev/null
+++ b/pyload/plugins/accounts/FileserveCom.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from time import mktime, strptime
+
+from module.plugins.Account import Account
+from module.common.json_layer import json_loads
+
+class FileserveCom(Account):
+ __name__ = "FileserveCom"
+ __version__ = "0.2"
+ __type__ = "account"
+ __description__ = """fileserve.com account plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+
+ page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data["password"],
+ "submit": "Submit+Query"})
+ res = json_loads(page)
+
+ if res["type"] == "premium":
+ validuntil = mktime(strptime(res["expireTime"], "%Y-%m-%d %H:%M:%S"))
+ return {"trafficleft": res["traffic"], "validuntil": validuntil}
+ else:
+ return {"premium": False, "trafficleft": None, "validuntil": None}
+
+
+ def login(self, user, data, req):
+ page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data["password"],
+ "submit": "Submit+Query"})
+ res = json_loads(page)
+
+ if not res["type"]:
+ self.wrongPassword()
+
+ #login at fileserv page
+ req.load("http://www.fileserve.com/login.php",
+ post={"loginUserName": user, "loginUserPassword": data["password"], "autoLogin": "checked",
+ "loginFormSubmit": "Login"})
diff --git a/pyload/plugins/accounts/FourSharedCom.py b/pyload/plugins/accounts/FourSharedCom.py
new file mode 100644
index 000000000..bd3820277
--- /dev/null
+++ b/pyload/plugins/accounts/FourSharedCom.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+from module.common.json_layer import json_loads
+
+class FourSharedCom(Account):
+ __name__ = "FourSharedCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """FourSharedCom account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def loadAccountInfo(self, user, req):
+ #fixme
+ return ({"validuntil": -1, "trafficleft": -1, "premium": False})
+
+ def login(self, user, data, req):
+ req.cj.setCookie("www.4shared.com", "4langcookie", "en")
+ response = req.load('http://www.4shared.com/login',
+ post = {"login": user,
+ "password": data['password'],
+ "remember": "false",
+ "doNotRedirect": "true"})
+ self.logDebug(response)
+ response = json_loads(response)
+
+ if not "ok" in response or response['ok'] != True:
+ if "rejectReason" in response and response['rejectReason'] != True:
+ self.logError(response['rejectReason'])
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/FreakshareCom.py b/pyload/plugins/accounts/FreakshareCom.py
new file mode 100644
index 000000000..732f9e203
--- /dev/null
+++ b/pyload/plugins/accounts/FreakshareCom.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+import re
+from time import strptime, mktime
+
+from module.plugins.Account import Account
+
+class FreakshareCom(Account):
+ __name__ = "FreakshareCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """freakshare.com account plugin"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://freakshare.com/")
+
+ validuntil = r"ltig bis:</td>\s*<td><b>([0-9 \-:.]+)</b></td>"
+ validuntil = re.search(validuntil, page, re.MULTILINE)
+ validuntil = validuntil.group(1).strip()
+ validuntil = mktime(strptime(validuntil, "%d.%m.%Y - %H:%M"))
+
+ traffic = r"Traffic verbleibend:</td>\s*<td>([^<]+)"
+ traffic = re.search(traffic, page, re.MULTILINE)
+ traffic = traffic.group(1).strip()
+ traffic = self.parseTraffic(traffic)
+
+ return {"validuntil": validuntil, "trafficleft": traffic}
+
+ def login(self, user, data, req):
+ page = req.load("http://freakshare.com/login.html", None, { "submit" : "Login", "user" : user, "pass" : data['password']}, cookies=True)
+
+ if "Falsche Logindaten!" in page or "Wrong Username or Password!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/FshareVn.py b/pyload/plugins/accounts/FshareVn.py
new file mode 100644
index 000000000..9b22cbafb
--- /dev/null
+++ b/pyload/plugins/accounts/FshareVn.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+from time import mktime, strptime
+from pycurl import REFERER
+import re
+
+class FshareVn(Account):
+ __name__ = "FshareVn"
+ __version__ = "0.04"
+ __type__ = "account"
+ __description__ = """fshare.vn account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ VALID_UNTIL_PATTERN = ur'<dt>Thời hạn dùng:</dt>\s*<dd>([^<]+)</dd>'
+ TRAFFIC_LEFT_PATTERN = ur'<dt>Tổng Dung Lượng Tài Khoản</dt>\s*<dd[^>]*>([0-9.]+) ([kKMG])B</dd>'
+ DIRECT_DOWNLOAD_PATTERN = ur'<input type="checkbox"\s*([^=>]*)[^>]*/>Kích hoạt download trực tiếp</dt>'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.fshare.vn/account_info.php", decode = True)
+ found = re.search(self.VALID_UNTIL_PATTERN, html)
+ if found:
+ premium = True
+ validuntil = mktime(strptime(found.group(1), '%I:%M:%S %p %d-%m-%Y'))
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = float(found.group(1)) * 1024 ** {'k': 0, 'K': 0, 'M': 1, 'G': 2}[found.group(2)] if found else 0
+ else:
+ premium = False
+ validuntil = None
+ trafficleft = None
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ req.http.c.setopt(REFERER, "https://www.fshare.vn/login.php")
+
+ html = req.load('https://www.fshare.vn/login.php', post = {
+ "login_password" : data['password'],
+ "login_useremail" : user,
+ "url_refe" : "https://www.fshare.vn/login.php"
+ }, referer = True, decode = True)
+
+ if not '<img alt="VIP"' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/Ftp.py b/pyload/plugins/accounts/Ftp.py
new file mode 100644
index 000000000..9c1081662
--- /dev/null
+++ b/pyload/plugins/accounts/Ftp.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Account import Account
+
+class Ftp(Account):
+ __name__ = "Ftp"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """Ftp dummy account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ login_timeout = info_threshold = 1000000 \ No newline at end of file
diff --git a/pyload/plugins/accounts/HellshareCz.py b/pyload/plugins/accounts/HellshareCz.py
new file mode 100644
index 000000000..c7a918dec
--- /dev/null
+++ b/pyload/plugins/accounts/HellshareCz.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+import time
+
+class HellshareCz(Account):
+ __name__ = "HellshareCz"
+ __version__ = "0.14"
+ __type__ = "account"
+ __description__ = """hellshare.cz account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ CREDIT_LEFT_PATTERN = r'<div class="credit-link">\s*<table>\s*<tr>\s*<th>(\d+|\d\d\.\d\d\.)</th>'
+
+ def loadAccountInfo(self, user, req):
+ self.relogin(user)
+ html = req.load("http://www.hellshare.com/")
+
+ found = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if found is None:
+ trafficleft = None
+ validuntil = None
+ premium = False
+ else:
+ credit = found.group(1)
+ premium = True
+ try:
+ if "." in credit:
+ #Time-based account
+ vt = [int(x) for x in credit.split('.')[:2]]
+ lt = time.localtime()
+ year = lt.tm_year + int(vt[1] < lt.tm_mon or (vt[1] == lt.tm_mon and vt[0] < lt.tm_mday))
+ validuntil = time.mktime(time.strptime("%s%d 23:59:59" % (credit,year), "%d.%m.%Y %H:%M:%S"))
+ trafficleft = -1
+ else:
+ #Traffic-based account
+ trafficleft = int(credit) * 1024
+ validuntil = -1
+ except Exception, e:
+ self.logError('Unable to parse credit info', e)
+ validuntil = -1
+ trafficleft = -1
+
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://www.hellshare.com/')
+ if req.lastEffectiveURL != 'http://www.hellshare.com/':
+ #Switch to English
+ self.logDebug('Switch lang - URL: %s' % req.lastEffectiveURL)
+ json = req.load("%s?do=locRouter-show" % req.lastEffectiveURL)
+ hash = re.search(r"(--[0-9a-f]+-)", json).group(1)
+ self.logDebug('Switch lang - HASH: %s' % hash)
+ html = req.load('http://www.hellshare.com/%s/' % hash)
+
+ if re.search(self.CREDIT_LEFT_PATTERN, html):
+ self.logDebug('Already logged in')
+ return
+
+ html = req.load('http://www.hellshare.com/login?do=loginForm-submit', post={
+ "login": "Log in",
+ "password": data["password"],
+ "username": user,
+ "perm_login": "on"
+ })
+
+ if "<p>You input a wrong user name or wrong password</p>" in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/HellspyCz.py b/pyload/plugins/accounts/HellspyCz.py
new file mode 100644
index 000000000..5f14a093e
--- /dev/null
+++ b/pyload/plugins/accounts/HellspyCz.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+import string
+
+class HellspyCz(Account):
+ __name__ = "HellspyCz"
+ __version__ = "0.2"
+ __type__ = "account"
+ __description__ = """hellspy.cz account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ CREDIT_LEFT_PATTERN = r'<strong>Credits: </strong>\s*(\d+)'
+ WRONG_PASSWORD_PATTERN = r'<p class="block-error-3 marg-tb-050">\s*Wrong user or password was entered<br />'
+
+ phpsessid = ''
+
+ def loadAccountInfo(self, user, req):
+ cj = self.getAccountCookies(user)
+ cj.setCookie(".hellspy.com", "PHPSESSID", self.phpsessid)
+
+ html = req.load("http://www.hellspy.com/")
+
+ found = re.search(self.CREDIT_LEFT_PATTERN, html)
+ if found is None:
+ credits = 0
+ else:
+ credits = int(found.group(1)) * 1024
+
+ return {"validuntil": -1, "trafficleft": credits}
+
+ def login(self, user, data,req):
+ header = req.load('http://www.hellspy.com/', just_header = True)
+ self.phpsessid = re.search(r'PHPSESSID=(\w+)', header).group(1)
+ self.logDebug("PHPSESSID:" + self.phpsessid)
+
+ html = req.load("http://www.hellspy.com/--%s-" % self.phpsessid)
+
+ html = req.load("http://www.hell-share.com/user/login/?do=apiLoginForm-submit&api_hash=hellspy_iq&user_hash=%s" % self.phpsessid, post={
+ "login": "1",
+ "password": data["password"],
+ "username": user,
+ "redir_url": 'http://www.hellspy.com/?do=loginBox-login',
+ "permanent_login": "1"
+ })
+
+ cj = self.getAccountCookies(user)
+ cj.setCookie(".hellspy.com", "PHPSESSID", self.phpsessid)
+
+ if not re.search(self.CREDIT_LEFT_PATTERN, html):
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/HotfileCom.py b/pyload/plugins/accounts/HotfileCom.py
new file mode 100644
index 000000000..23e42dacf
--- /dev/null
+++ b/pyload/plugins/accounts/HotfileCom.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, JoKoT3
+"""
+
+from module.plugins.Account import Account
+from time import strptime, mktime
+import hashlib
+
+class HotfileCom(Account):
+ __name__ = "HotfileCom"
+ __version__ = "0.2"
+ __type__ = "account"
+ __description__ = """hotfile.com account plugin"""
+ __author_name__ = ("mkaay","JoKoT3")
+ __author_mail__ = ("mkaay@mkaay.de","jokot3@gmail.com")
+
+ def loadAccountInfo(self, user, req):
+ resp = self.apiCall("getuserinfo", user=user)
+ if resp.startswith("."):
+ self.core.debug("HotfileCom API Error: %s" % resp)
+ raise Exception
+ info = {}
+ for p in resp.split("&"):
+ key, value = p.split("=")
+ info[key] = value
+
+ if info['is_premium'] == '1':
+ info["premium_until"] = info["premium_until"].replace("T"," ")
+ zone = info["premium_until"][19:]
+ info["premium_until"] = info["premium_until"][:19]
+ zone = int(zone[:3])
+
+ validuntil = int(mktime(strptime(info["premium_until"], "%Y-%m-%d %H:%M:%S"))) + (zone*3600)
+ tmp = {"validuntil":validuntil, "trafficleft":-1, "premium":True}
+
+ elif info['is_premium'] == '0':
+ tmp = {"premium":False}
+
+ return tmp
+
+ def apiCall(self, method, post={}, user=None):
+ if user:
+ data = self.getAccountData(user)
+ else:
+ user, data = self.selectAccount()
+
+ req = self.getAccountRequest(user)
+
+ digest = req.load("http://api.hotfile.com/", post={"action":"getdigest"})
+ h = hashlib.md5()
+ h.update(data["password"])
+ hp = h.hexdigest()
+ h = hashlib.md5()
+ h.update(hp)
+ h.update(digest)
+ pwhash = h.hexdigest()
+
+ post.update({"action": method})
+ post.update({"username":user, "passwordmd5dig":pwhash, "digest":digest})
+ resp = req.load("http://api.hotfile.com/", post=post)
+ req.close()
+ return resp
+
+ def login(self, user, data, req):
+ cj = self.getAccountCookies(user)
+ cj.setCookie("hotfile.com", "lang", "en")
+ req.load("http://hotfile.com/", cookies=True)
+ page = req.load("http://hotfile.com/login.php", post={"returnto": "/", "user": user, "pass": data["password"]}, cookies=True)
+
+ if "Bad username/password" in page:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/Http.py b/pyload/plugins/accounts/Http.py
new file mode 100644
index 000000000..805d19900
--- /dev/null
+++ b/pyload/plugins/accounts/Http.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Account import Account
+
+class Http(Account):
+ __name__ = "Http"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """Http dummy account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ login_timeout = info_threshold = 1000000 \ No newline at end of file
diff --git a/pyload/plugins/accounts/MegasharesCom.py b/pyload/plugins/accounts/MegasharesCom.py
new file mode 100644
index 000000000..91601fc95
--- /dev/null
+++ b/pyload/plugins/accounts/MegasharesCom.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Account import Account
+import re
+from time import mktime, strptime
+
+class MegasharesCom(Account):
+ __name__ = "MegasharesCom"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """megashares.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ VALID_UNTIL_PATTERN = r'<p class="premium_info_box">Period Ends: (\w{3} \d{1,2}, \d{4})</p>'
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://d01.megashares.com/myms.php", decode = True)
+
+ premium = False if '>Premium Upgrade<' in html else True
+
+ validuntil = trafficleft = -1
+ try:
+ timestr = re.search(self.VALID_UNTIL_PATTERN, html).group(1)
+ self.logDebug(timestr)
+ validuntil = mktime(strptime(timestr, "%b %d, %Y"))
+ except Exception, e:
+ self.logError(e)
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://d01.megashares.com/myms_login.php', post = {
+ "httpref": "",
+ "myms_login": "Login",
+ "mymslogin_name": user,
+ "mymspassword": data['password']
+ }, decode = True)
+
+ if not '<span class="b ml">%s</span>' % user in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/MultiDebridCom.py b/pyload/plugins/accounts/MultiDebridCom.py
new file mode 100644
index 000000000..904be5ee7
--- /dev/null
+++ b/pyload/plugins/accounts/MultiDebridCom.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from time import time
+
+from module.plugins.Account import Account
+from module.common.json_layer import json_loads
+
+
+class MultiDebridCom(Account):
+ __name__ = "MultiDebridCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """Multi-debrid.com account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def loadAccountInfo(self, user, req):
+ if 'days_left' in self.json_data:
+ validuntil = int(time() + self.json_data['days_left'] * 86400)
+ return {"premium": True, "validuntil": validuntil, "trafficleft": -1}
+ else:
+ self.logError('Unable to get account information')
+
+ def login(self, user, data, req):
+ # Password to use is the API-Password written in http://multi-debrid.com/myaccount
+ self.html = req.load("http://multi-debrid.com/api.php",
+ get={"user": user, "pass": data["password"]})
+ self.logDebug('JSON data: ' + self.html)
+ self.json_data = json_loads(self.html)
+ if self.json_data['status'] != 'ok':
+ self.logError('Invalid login. The password to use is the API-Password you find in your "My Account" page')
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/MultishareCz.py b/pyload/plugins/accounts/MultishareCz.py
new file mode 100644
index 000000000..39439cbbe
--- /dev/null
+++ b/pyload/plugins/accounts/MultishareCz.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+#from time import mktime, strptime
+#from pycurl import REFERER
+import re
+from module.utils import parseFileSize
+
+class MultishareCz(Account):
+ __name__ = "MultishareCz"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """multishare.cz account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ TRAFFIC_LEFT_PATTERN = r'<span class="profil-zvyrazneni">Kredit:</span>\s*<strong>(?P<S>[0-9,]+)&nbsp;(?P<U>\w+)</strong>'
+ ACCOUNT_INFO_PATTERN = r'<input type="hidden" id="(u_ID|u_hash)" name="[^"]*" value="([^"]+)">'
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://www.multishare.cz/profil/", decode = True)
+
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = parseFileSize(found.group('S'), found.group('U')) / 1024 if found else 0
+ self.premium = True if trafficleft else False
+
+ html = req.load("http://www.multishare.cz/", decode = True)
+ mms_info = dict(re.findall(self.ACCOUNT_INFO_PATTERN, html))
+
+ return dict(mms_info, **{"validuntil": -1, "trafficleft": trafficleft})
+
+ def login(self, user, data, req):
+ html = req.load('http://www.multishare.cz/html/prihlaseni_process.php', post = {
+ "akce": "Přihlásit",
+ "heslo": data['password'],
+ "jmeno": user
+ }, decode = True)
+
+ if '<div class="akce-chyba akce">' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/NetloadIn.py b/pyload/plugins/accounts/NetloadIn.py
new file mode 100755
index 000000000..cef3e298b
--- /dev/null
+++ b/pyload/plugins/accounts/NetloadIn.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Account import Account
+import re
+from time import time
+
+class NetloadIn(Account):
+ __name__ = "NetloadIn"
+ __version__ = "0.22"
+ __type__ = "account"
+ __description__ = """netload.in account plugin"""
+ __author_name__ = ("RaNaN", "CryNickSystems")
+ __author_mail__ = ("RaNaN@pyload.org", "webmaster@pcProfil.de")
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://netload.in/index.php?id=2&lang=de")
+ left = r">(\d+) (Tag|Tage), (\d+) Stunden<"
+ left = re.search(left, page)
+ if left:
+ validuntil = time() + int(left.group(1)) * 24 * 60 * 60 + int(left.group(3)) * 60 * 60
+ trafficleft = -1
+ premium = True
+ else:
+ validuntil = None
+ premium = False
+ trafficleft = None
+ return {"validuntil": validuntil, "trafficleft": trafficleft, "premium" : premium}
+
+ def login(self, user, data,req):
+ page = req.load("http://netload.in/index.php", None, { "txtuser" : user, "txtpass" : data['password'], "txtcheck" : "login", "txtlogin" : "Login"}, cookies=True)
+ if "password or it might be invalid!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/Premium4Me.py b/pyload/plugins/accounts/Premium4Me.py
new file mode 100644
index 000000000..3b0d24488
--- /dev/null
+++ b/pyload/plugins/accounts/Premium4Me.py
@@ -0,0 +1,28 @@
+
+from pyload.plugins.MultiHoster import MultiHoster
+
+class Premium4Me(MultiHoster):
+ __name__ = "Premium4Me"
+ __version__ = "0.03"
+ __type__ = "account"
+ __description__ = """Premium.to account plugin"""
+ __author_name__ = ("RaNaN", "zoidberg", "stickell")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ def loadAccountInfo(self, user, req):
+ traffic = req.load("http://premium.to/api/traffic.php?authcode=%s" % self.authcode)
+
+ account_info = {"trafficleft": int(traffic) / 1024,
+ "validuntil": -1}
+
+ return account_info
+
+ def login(self, user, data, req):
+ self.authcode = req.load("http://premium.to/api/getauthcode.php?username=%s&password=%s" % (user, data["password"])).strip()
+
+ if "wrong username" in self.authcode:
+ self.wrongPassword()
+
+ def loadHosterList(self, req):
+ page = req.load("http://premium.to/api/hosters.php?authcode=%s" % self.authcode)
+ return [x.strip() for x in page.replace("\"", "").split(";")] \ No newline at end of file
diff --git a/pyload/plugins/accounts/PremiumizeMe.py b/pyload/plugins/accounts/PremiumizeMe.py
new file mode 100644
index 000000000..e8c2d277f
--- /dev/null
+++ b/pyload/plugins/accounts/PremiumizeMe.py
@@ -0,0 +1,61 @@
+from pyload.plugins.MultiHoster import MultiHoster
+from pyload.utils import json_loads
+
+
+class PremiumizeMe(MultiHoster):
+ __name__ = "PremiumizeMe"
+ __version__ = "0.11"
+ __type__ = "account"
+ __description__ = """Premiumize.Me account plugin"""
+
+ __author_name__ = ("Florian Franzen")
+ __author_mail__ = ("FlorianFranzen@gmail.com")
+
+ def loadAccountInfo(self, user, req):
+
+ # Get user data from premiumize.me
+ status = self.getAccountStatus(user, req)
+ self.logDebug(status)
+
+ # Parse account info
+ account_info = {"validuntil": float(status['result']['expires']),
+ "trafficleft": max(0, status['result']['trafficleft_bytes'] / 1024)}
+
+ if status['result']['type'] == 'free':
+ account_info['premium'] = False
+
+ return account_info
+
+ def login(self, user, data, req):
+
+ # Get user data from premiumize.me
+ status = self.getAccountStatus(user, req)
+
+ # Check if user and password are valid
+ if status['status'] != 200:
+ self.wrongPassword()
+
+
+ def getAccountStatus(self, user, req):
+
+ # Use premiumize.me API v1 (see https://secure.premiumize.me/?show=api) to retrieve account info and return the parsed json answer
+ answer = req.load(
+ "https://api.premiumize.me/pm-api/v1.php?method=accountstatus&params[login]=%s&params[pass]=%s" % (
+ user, self.password))
+ return json_loads(answer)
+
+
+ def loadHosterList(self, req):
+
+ # Get supported hosters list from premiumize.me using the json API v1 (see https://secure.premiumize.me/?show=api)
+ answer = req.load(
+ "https://api.premiumize.me/pm-api/v1.php?method=hosterlist&params[login]=%s&params[pass]=%s" % (
+ self.loginname, self.password))
+ data = json_loads(answer)
+
+ # If account is not valid thera are no hosters available
+ if data['status'] != 200:
+ return []
+
+ # Extract hosters from json file
+ return data['result']['hosterlist']
diff --git a/pyload/plugins/accounts/QuickshareCz.py b/pyload/plugins/accounts/QuickshareCz.py
new file mode 100644
index 000000000..94649cc43
--- /dev/null
+++ b/pyload/plugins/accounts/QuickshareCz.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.Account import Account
+from module.utils import parseFileSize
+
+class QuickshareCz(Account):
+ __name__ = "QuickshareCz"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """quickshare.cz account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.quickshare.cz/premium", decode = True)
+
+ found = re.search(r'Stav kreditu: <strong>(.+?)</strong>', html)
+ if found:
+ trafficleft = parseFileSize(found.group(1)) / 1024
+ premium = True if trafficleft else False
+ else:
+ trafficleft = None
+ premium = False
+
+ return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
+
+ def login(self, user, data, req):
+ html = req.load('http://www.quickshare.cz/html/prihlaseni_process.php', post = {
+ "akce": u'Přihlásit',
+ "heslo": data['password'],
+ "jmeno": user
+ }, decode = True)
+
+ if u'>TakovÜ uşivatel neexistuje.<' in html or u'>Špatné heslo.<' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/RapidgatorNet.py b/pyload/plugins/accounts/RapidgatorNet.py
new file mode 100644
index 000000000..85adc71a3
--- /dev/null
+++ b/pyload/plugins/accounts/RapidgatorNet.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.Account import Account
+from module.utils import parseFileSize
+from module.common.json_layer import json_loads
+
+class RapidgatorNet(Account):
+ __name__ = "RapidgatorNet"
+ __version__ = "0.04"
+ __type__ = "account"
+ __description__ = """rapidgator.net account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ API_URL = 'http://rapidgator.net/api/user'
+
+ def loadAccountInfo(self, user, req):
+ try:
+ sid = self.getAccountData(user).get('SID')
+ assert sid
+
+ json = req.load("%s/info?sid=%s" % (self.API_URL, sid))
+ self.logDebug("API:USERINFO", json)
+ json = json_loads(json)
+
+ if json['response_status'] == 200:
+ if "reset_in" in json['response']:
+ self.scheduleRefresh(user, json['response']['reset_in'])
+
+ return {"validuntil": json['response']['expire_date'],
+ "trafficleft": int(json['response']['traffic_left']) / 1024,
+ "premium": True}
+ else:
+ self.logError(json['response_details'])
+ except Exception, e:
+ self.logError(e)
+
+ return {"validuntil": None, "trafficleft": None, "premium": False}
+
+ def login(self, user, data, req):
+ try:
+ json = req.load('%s/login' % self.API_URL,
+ post = {"username": user,
+ "password": data['password']})
+ self.logDebug("API:LOGIN", json)
+ json = json_loads(json)
+
+ if json['response_status'] == 200:
+ data['SID'] = str(json['response']['session_id'])
+ return
+ else:
+ self.logError(json['response_details'])
+ except Exception, e:
+ self.logError(e)
+
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/RapidshareCom.py b/pyload/plugins/accounts/RapidshareCom.py
new file mode 100644
index 000000000..15722e099
--- /dev/null
+++ b/pyload/plugins/accounts/RapidshareCom.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Account import Account
+
+class RapidshareCom(Account):
+ __name__ = "RapidshareCom"
+ __version__ = "0.22"
+ __type__ = "account"
+ __description__ = """Rapidshare.com account plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAccountData(user)
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_prem = {"sub": "getaccountdetails", "type": "prem", "login": user, "password": data["password"], "withcookie": 1}
+ src = req.load(api_url_base, cookies=False, get=api_param_prem)
+ if src.startswith("ERROR"):
+ raise Exception(src)
+ fields = src.split("\n")
+ info = {}
+ for t in fields:
+ if not t.strip():
+ continue
+ k, v = t.split("=")
+ info[k] = v
+
+ validuntil = int(info["billeduntil"])
+ premium = True if validuntil else False
+
+ tmp = {"premium": premium, "validuntil": validuntil, "trafficleft":-1, "maxtraffic":-1}
+
+ return tmp
+
+ def login(self, user, data, req):
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_prem = {"sub": "getaccountdetails", "type": "prem", "login": user, "password": data["password"], "withcookie": 1}
+ src = req.load(api_url_base, cookies=False, get=api_param_prem)
+ if src.startswith("ERROR"):
+ raise Exception(src+"### Note you have to use your account number for login, instead of name.")
+ fields = src.split("\n")
+ info = {}
+ for t in fields:
+ if not t.strip():
+ continue
+ k, v = t.split("=")
+ info[k] = v
+ cj = self.getAccountCookies(user)
+ cj.setCookie("rapidshare.com", "enc", info["cookie"])
+
+
diff --git a/pyload/plugins/accounts/RarefileNet.py b/pyload/plugins/accounts/RarefileNet.py
new file mode 100644
index 000000000..90ad02d43
--- /dev/null
+++ b/pyload/plugins/accounts/RarefileNet.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class RarefileNet(XFSPAccount):
+ __name__ = "RarefileNet"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """RareFile.net account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = "http://rarefile.net/"
diff --git a/pyload/plugins/accounts/RealdebridCom.py b/pyload/plugins/accounts/RealdebridCom.py
new file mode 100644
index 000000000..9460fc815
--- /dev/null
+++ b/pyload/plugins/accounts/RealdebridCom.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.MultiHoster import MultiHoster
+import xml.dom.minidom as dom
+
+class RealdebridCom(MultiHoster):
+ __name__ = "RealdebridCom"
+ __version__ = "0.5"
+ __type__ = "account"
+ __description__ = """Real-Debrid.com account plugin"""
+ __author_name__ = ("Devirex, Hazzard")
+ __author_mail__ = ("naibaf_11@yahoo.de")
+
+ def loadAccountInfo(self, req):
+ page = req.load("http://real-debrid.com/api/account.php")
+ xml = dom.parseString(page)
+ account_info = {"validuntil": int(xml.getElementsByTagName("expiration")[0].childNodes[0].nodeValue),
+ "trafficleft": -1}
+
+ return account_info
+
+ def login(self, req):
+ page = req.load("https://real-debrid.com/ajax/login.php?user=%s&pass=%s" % (self.loginname, self.password))
+ #page = req.load("https://real-debrid.com/login.html", post={"user": user, "pass": data["password"]}, cookies=True)
+
+ if "Your login informations are incorrect" in page:
+ self.wrongPassword()
+
+
+ def loadHosterList(self, req):
+ https = "https" if self.getConfig("https") else "http"
+ page = req.load(https + "://real-debrid.com/api/hosters.php").replace("\"","").strip()
+
+ return[x.strip() for x in page.split(",") if x.strip()] \ No newline at end of file
diff --git a/pyload/plugins/accounts/RehostTo.py b/pyload/plugins/accounts/RehostTo.py
new file mode 100644
index 000000000..e1cb2668f
--- /dev/null
+++ b/pyload/plugins/accounts/RehostTo.py
@@ -0,0 +1,37 @@
+from module.plugins.Account import Account
+
+
+class RehostTo(Account):
+ __name__ = "RehostTo"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """Rehost.to account plugin"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+
+ def loadAccountInfo(self, user, req):
+
+ data = self.getAccountData(user)
+ page = req.load("http://rehost.to/api.php?cmd=login&user=%s&pass=%s" % (user, data["password"]))
+ data = [x.split("=") for x in page.split(",")]
+ ses = data[0][1]
+ long_ses = data[1][1]
+
+ page = req.load("http://rehost.to/api.php?cmd=get_premium_credits&long_ses=%s" % long_ses)
+ traffic, valid = page.split(",")
+
+ account_info = {"trafficleft": int(traffic) * 1024,
+ "validuntil": int(valid),
+ "long_ses": long_ses,
+ "ses": ses}
+
+ return account_info
+
+ def login(self, user, data, req):
+ page = req.load("http://rehost.to/api.php?cmd=login&user=%s&pass=%s" % (user, data["password"]))
+
+ if "Login failed." in page:
+ self.wrongPassword()
+
+
diff --git a/pyload/plugins/accounts/ReloadCc.py b/pyload/plugins/accounts/ReloadCc.py
new file mode 100644
index 000000000..e4cb32c42
--- /dev/null
+++ b/pyload/plugins/accounts/ReloadCc.py
@@ -0,0 +1,73 @@
+from module.plugins.Account import Account
+
+from module.common.json_layer import json_loads
+
+from module.network.HTTPRequest import BadHeader
+
+class ReloadCc(Account):
+ __name__ = "ReloadCc"
+ __version__ = "0.3"
+ __type__ = "account"
+ __description__ = """Reload.Cc account plugin"""
+
+ __author_name__ = ("Reload Team")
+ __author_mail__ = ("hello@reload.cc")
+
+ def loadAccountInfo(self, user, req):
+
+ # Get user data from reload.cc
+ status = self.getAccountStatus(user, req)
+
+ # Parse account info
+ account_info = {"validuntil": float(status['msg']['expires']),
+ "pwdhash": status['msg']['hash'],
+ "trafficleft": -1}
+
+ return account_info
+
+ def login(self, user, data, req):
+
+ # Get user data from reload.cc
+ status = self.getAccountStatus(user, req)
+
+ if not status:
+ raise Exception("There was an error upon logging in to Reload.cc!")
+
+ # Check if user and password are valid
+ if status['status'] != "ok":
+ self.wrongPassword()
+
+
+ def getAccountStatus(self, user, req):
+ # Use reload.cc API v1 to retrieve account info and return the parsed json answer
+ query_params = dict(
+ via='pyload',
+ v=1,
+ get_traffic='true',
+ user=user
+ )
+
+ try:
+ query_params.update(dict(hash=self.infos[user]['pwdhash']))
+ except Exception:
+ query_params.update(dict(pwd=self.accounts[user]['password']))
+
+ try:
+ answer = req.load("http://api.reload.cc/login", get=query_params)
+ except BadHeader, e:
+ if e.code == 400:
+ raise Exception("There was an unknown error within the Reload.cc plugin.")
+ elif e.code == 401:
+ self.wrongPassword()
+ elif e.code == 402:
+ self.expired(user)
+ elif e.code == 403:
+ raise Exception("Your account is disabled. Please contact the Reload.cc support!")
+ elif e.code == 409:
+ self.empty(user)
+ elif e.code == 503:
+ self.logInfo("Reload.cc is currently in maintenance mode! Please check again later.")
+ self.wrongPassword()
+ return None
+
+ return json_loads(answer)
diff --git a/pyload/plugins/accounts/RyushareCom.py b/pyload/plugins/accounts/RyushareCom.py
new file mode 100644
index 000000000..f734eb11b
--- /dev/null
+++ b/pyload/plugins/accounts/RyushareCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class RyushareCom(XFSPAccount):
+ __name__ = "RyushareCom"
+ __version__ = "0.03"
+ __type__ = "account"
+ __description__ = """ryushare.com account plugin"""
+ __author_name__ = ("zoidberg", "trance4us")
+ __author_mail__ = ("zoidberg@mujmail.cz", "")
+
+ MAIN_PAGE = "http://ryushare.com/"
+
+ def login(self, user, data, req):
+ req.lastURL = "http://ryushare.com/login.python"
+ html = req.load("http://ryushare.com/login.python", post={"login": user, "password": data["password"], "op": "login"})
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/Share76Com.py b/pyload/plugins/accounts/Share76Com.py
new file mode 100644
index 000000000..9c946ae50
--- /dev/null
+++ b/pyload/plugins/accounts/Share76Com.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class Share76Com(XFSPAccount):
+ __name__ = "Share76Com"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """Share76.com account plugin"""
+ __author_name__ = ("me")
+
+ MAIN_PAGE = "http://Share76.com/"
diff --git a/pyload/plugins/accounts/ShareFilesCo.py b/pyload/plugins/accounts/ShareFilesCo.py
new file mode 100644
index 000000000..0d8ea6635
--- /dev/null
+++ b/pyload/plugins/accounts/ShareFilesCo.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class ShareFilesCo(XFSPAccount):
+ __name__ = "ShareFilesCo"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """ShareFilesCo account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ MAIN_PAGE = "http://sharefiles.co/"
diff --git a/pyload/plugins/accounts/ShareRapidCom.py b/pyload/plugins/accounts/ShareRapidCom.py
new file mode 100644
index 000000000..f8043449c
--- /dev/null
+++ b/pyload/plugins/accounts/ShareRapidCom.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import mktime, strptime
+from module.plugins.Account import Account
+
+class ShareRapidCom(Account):
+ __name__ = "ShareRapidCom"
+ __version__ = "0.32"
+ __type__ = "account"
+ __description__ = """ShareRapid account plugin"""
+ __author_name__ = ("MikyWoW", "zoidberg")
+
+ login_timeout = 60
+
+ def loadAccountInfo(self, user, req):
+ src = req.load("http://share-rapid.com/mujucet/", decode=True)
+
+ found = re.search(ur'<td>Max. počet paralelních stahování: </td><td>(\d+)', src)
+ if found:
+ data = self.getAccountData(user)
+ data["options"]["limitDL"] = [int(found.group(1))]
+
+ found = re.search(ur'<td>Paušální stahování aktivní. Vyprší </td><td><strong>(.*?)</strong>', src)
+ if found:
+ validuntil = mktime(strptime(found.group(1), "%d.%m.%Y - %H:%M"))
+ return {"premium": True, "trafficleft": -1, "validuntil": validuntil}
+
+ found = re.search(r'<tr><td>GB:</td><td>(.*?) GB', src)
+ if found:
+ trafficleft = float(found.group(1)) * (1 << 20)
+ return {"premium": True, "trafficleft": trafficleft, "validuntil": -1}
+
+ return {"premium": False, "trafficleft": None, "validuntil": None}
+
+ def login(self, user, data, req):
+ htm = req.load("http://share-rapid.com/prihlaseni/", cookies=True)
+ if "Heslo:" in htm:
+ start = htm.index('id="inp_hash" name="hash" value="')
+ htm = htm[start+33:]
+ hashes = htm[0:32]
+ htm = req.load("http://share-rapid.com/prihlaseni/",
+ post={"hash": hashes,
+ "login": user,
+ "pass1": data["password"],
+ "remember": 0,
+ "sbmt": u"Přihlásit"}, cookies=True) \ No newline at end of file
diff --git a/pyload/plugins/accounts/ShareonlineBiz.py b/pyload/plugins/accounts/ShareonlineBiz.py
new file mode 100644
index 000000000..fe2b412db
--- /dev/null
+++ b/pyload/plugins/accounts/ShareonlineBiz.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Account import Account
+
+class ShareonlineBiz(Account):
+ __name__ = "ShareonlineBiz"
+ __version__ = "0.24"
+ __type__ = "account"
+ __description__ = """share-online.biz account plugin"""
+ __author_name__ = ("mkaay", "zoidberg")
+ __author_mail__ = ("mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+ def getUserAPI(self, user, req):
+ return req.load("http://api.share-online.biz/account.php",
+ {"username": user, "password": self.accounts[user]["password"], "act": "userDetails"})
+
+ def loadAccountInfo(self, user, req):
+ src = self.getUserAPI(user, req)
+
+ info = {}
+ for line in src.splitlines():
+ if "=" in line:
+ key, value = line.split("=")
+ info[key] = value
+ self.logDebug(info)
+
+ if "dl" in info and info["dl"].lower() != "not_available":
+ req.cj.setCookie("share-online.biz", "dl", info["dl"])
+ if "a" in info and info["a"].lower() != "not_available":
+ req.cj.setCookie("share-online.biz", "a", info["a"])
+
+ return {"validuntil": int(info["expire_date"]) if "expire_date" in info else -1,
+ "trafficleft": -1,
+ "premium": True if ("dl" in info or "a" in info) and (info["group"] != "Sammler") else False}
+
+ def login(self, user, data, req):
+ src = self.getUserAPI(user, req)
+ if "EXCEPTION" in src:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/SpeedLoadOrg.py b/pyload/plugins/accounts/SpeedLoadOrg.py
new file mode 100644
index 000000000..4eb2b52de
--- /dev/null
+++ b/pyload/plugins/accounts/SpeedLoadOrg.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class SpeedLoadOrg(XFSPAccount):
+ __name__ = "SpeedLoadOrg"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """SpeedLoadOrg account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ MAIN_PAGE = "http://speedload.org/"
diff --git a/pyload/plugins/accounts/StahnuTo.py b/pyload/plugins/accounts/StahnuTo.py
new file mode 100644
index 000000000..8a4523bc5
--- /dev/null
+++ b/pyload/plugins/accounts/StahnuTo.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+from module.utils import parseFileSize
+import re
+
+class StahnuTo(Account):
+ __name__ = "StahnuTo"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """StahnuTo account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ #login_timeout = 60
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://www.stahnu.to/")
+
+ found = re.search(r'>VIP: (\d+.*)<', html)
+ trafficleft = parseFileSize(found.group(1)) * 1024 if found else 0
+
+ return {"premium": trafficleft > (512 * 1024), "trafficleft": trafficleft, "validuntil": -1}
+
+ def login(self, user, data, req):
+ html = req.load("http://www.stahnu.to/login.php", post={
+ "username": user,
+ "password": data["password"],
+ "submit": "Login"})
+
+ if not '<a href="logout.php">' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/TurbobitNet.py b/pyload/plugins/accounts/TurbobitNet.py
new file mode 100644
index 000000000..c4b819131
--- /dev/null
+++ b/pyload/plugins/accounts/TurbobitNet.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+from time import mktime, strptime
+
+class TurbobitNet(Account):
+ __name__ = "TurbobitNet"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """TurbobitNet account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ #login_timeout = 60
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("http://turbobit.net")
+
+ found = re.search(r'<u>Turbo Access</u> to ([0-9.]+)', html)
+ if found:
+ premium = True
+ validuntil = mktime(strptime(found.group(1), "%d.%m.%Y"))
+ else:
+ premium = False
+ validuntil = -1
+
+ return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("turbobit.net", "user_lang", "en")
+
+ html = req.load("http://turbobit.net/user/login", post={
+ "user[login]": user,
+ "user[pass]": data["password"],
+ "user[submit]": "Login"})
+
+ if not '<div class="menu-item user-name">' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/UlozTo.py b/pyload/plugins/accounts/UlozTo.py
new file mode 100644
index 000000000..6652c8b7c
--- /dev/null
+++ b/pyload/plugins/accounts/UlozTo.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Account import Account
+import re
+
+class UlozTo(Account):
+ __name__ = "UlozTo"
+ __version__ = "0.04"
+ __type__ = "account"
+ __description__ = """uloz.to account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ TRAFFIC_LEFT_PATTERN = r'<li class="menu-kredit"><a href="http://www.ulozto.net/kredit" title="[^"]*?GB = ([0-9.]+) MB"'
+
+ def loadAccountInfo(self, user, req):
+ #this cookie gets lost somehow after each request
+ self.phpsessid = req.cj.getCookie("ULOSESSID")
+ html = req.load("http://www.ulozto.net/", decode = True)
+ req.cj.setCookie("www.ulozto.net", "ULOSESSID", self.phpsessid)
+
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ trafficleft = int(float(found.group(1).replace(' ','').replace(',','.')) * 1000 / 1.024) if found else 0
+ self.premium = True if trafficleft else False
+
+ return {"validuntil": -1, "trafficleft": trafficleft}
+
+ def login(self, user, data, req):
+ html = req.load('http://www.ulozto.net/login?do=loginForm-submit', post = {
+ "login": "Submit",
+ "password": data['password'],
+ "username": user
+ }, decode = True)
+
+ if '<ul class="error">' in html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UnrestrictLi.py b/pyload/plugins/accounts/UnrestrictLi.py
new file mode 100644
index 000000000..9ec2ea996
--- /dev/null
+++ b/pyload/plugins/accounts/UnrestrictLi.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.Account import Account
+from module.common.json_layer import json_loads
+
+
+class UnrestrictLi(Account):
+ __name__ = "UnrestrictLi"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """Unrestrict.li account plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def loadAccountInfo(self, user, req):
+ json_data = req.load('http://unrestrict.li/api/jdownloader/user.php?format=json')
+ self.logDebug("JSON data: " + json_data)
+ json_data = json_loads(json_data)
+
+ if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
+ return {"premium": False}
+
+ validuntil = json_data['result']['expires']
+ trafficleft = int(json_data['result']['traffic'] / 1024)
+
+ return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
+
+ def login(self, user, data, req):
+ html = req.load("https://unrestrict.li/sign_in")
+
+ if 'solvemedia' in html:
+ self.logError("A Captcha is required. Go to http://unrestrict.li/sign_in and login, then retry")
+ return
+
+ self.html = req.load("https://unrestrict.li/sign_in",
+ post={"username": user, "password": data["password"], "signin": "Sign in"})
+
+ if 'sign_out' not in self.html:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UploadedTo.py b/pyload/plugins/accounts/UploadedTo.py
new file mode 100644
index 000000000..d202018f2
--- /dev/null
+++ b/pyload/plugins/accounts/UploadedTo.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Account import Account
+import re
+from time import time
+
+class UploadedTo(Account):
+ __name__ = "UploadedTo"
+ __version__ = "0.26"
+ __type__ = "account"
+ __description__ = """ul.net account plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def loadAccountInfo(self, user, req):
+
+ req.load("http://uploaded.net/language/en")
+ html = req.load("http://uploaded.net/me")
+
+ premium = '<a href="register"><em>Premium</em>' in html or '<em>Premium</em></th>' in html
+
+ if premium:
+ raw_traffic = re.search(r'<th colspan="2"><b class="cB">([^<]+)', html).group(1).replace('.', '')
+ raw_valid = re.search(r"<td>Duration:</td>\s*<th>([^<]+)", html, re.MULTILINE).group(1).strip()
+
+ traffic = int(self.parseTraffic(raw_traffic))
+
+ if raw_valid == "unlimited":
+ validuntil = -1
+ else:
+ raw_valid = re.findall(r"(\d+) (Week|weeks|days|day|hours|hour)", raw_valid)
+ validuntil = time()
+ for n, u in raw_valid:
+ validuntil += 3600 * int(n) * {"Week": 168, "weeks": 168, "days": 24, "day": 24, "hours": 1, "hour": 1}[u]
+
+ return {"validuntil":validuntil, "trafficleft":traffic, "maxtraffic":50*1024*1024}
+ else:
+ return {"premium" : False, "validuntil" : -1}
+
+ def login(self, user, data, req):
+
+ req.load("http://uploaded.net/language/en")
+ req.cj.setCookie("uploaded.net", "lang", "en")
+
+ page = req.load("http://uploaded.net/io/login", post={ "id" : user, "pw" : data["password"], "_" : ""})
+
+ if "User and password do not match!" in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/UploadheroCom.py b/pyload/plugins/accounts/UploadheroCom.py
new file mode 100644
index 000000000..18ed69ae6
--- /dev/null
+++ b/pyload/plugins/accounts/UploadheroCom.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.Account import Account
+import re,datetime,time
+
+class UploadheroCom(Account):
+ __name__ = "UploadheroCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """Uploadhero.com account plugin"""
+ __author_name__ = ("mcmyst")
+ __author_mail__ = ("mcmyst@hotmail.fr")
+
+
+ def loadAccountInfo(self, user, req):
+ premium_pattern = re.compile('Il vous reste <span class="bleu">([0-9]+)</span> jours premium.')
+
+ data = self.getAccountData(user)
+ page = req.load("http://uploadhero.com/my-account")
+
+ if premium_pattern.search(page):
+ end_date = datetime.date.today() + datetime.timedelta(days=int(premium_pattern.search(page).group(1)))
+ end_date = time.mktime(future.timetuple())
+ account_info = {"validuntil": end_date, "trafficleft": -1, "premium": True}
+ else:
+ account_info = {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ return account_info
+
+ def login(self, user, data, req):
+ page = req.load("http://uploadhero.com/lib/connexion.php", post={"pseudo_login": user, "password_login": data["password"]})
+
+ if "mot de passe invalide" in page:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/UploadingCom.py b/pyload/plugins/accounts/UploadingCom.py
new file mode 100644
index 000000000..507e4ab18
--- /dev/null
+++ b/pyload/plugins/accounts/UploadingCom.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Account import Account
+from time import time, strptime, mktime
+import re
+
+class UploadingCom(Account):
+ __name__ = "UploadingCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """uploading.com account plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def loadAccountInfo(self, user, req):
+ src = req.load("http://uploading.com/")
+ premium = True
+ if "UPGRADE TO PREMIUM" in src:
+ return {"validuntil": -1, "trafficleft": -1, "premium": False}
+
+ m = re.search("Valid Until:(.*?)<", src)
+ if m:
+ validuntil = int(mktime(strptime(m.group(1).strip(), "%b %d, %Y")))
+ else:
+ validuntil = -1
+
+ return {"validuntil": validuntil, "trafficleft": -1, "premium": True}
+
+ def login(self, user, data, req):
+ req.cj.setCookie("uploading.com", "lang", "1")
+ req.cj.setCookie("uploading.com", "language", "1")
+ req.cj.setCookie("uploading.com", "setlang", "en")
+ req.cj.setCookie("uploading.com", "_lang", "en")
+ req.load("http://uploading.com/")
+ req.load("http://uploading.com/general/login_form/?JsHttpRequest=%s-xml" % long(time()*1000), post={"email": user, "password": data["password"], "remember": "on"})
diff --git a/pyload/plugins/accounts/UploadstationCom.py b/pyload/plugins/accounts/UploadstationCom.py
new file mode 100644
index 000000000..e86cec7ce
--- /dev/null
+++ b/pyload/plugins/accounts/UploadstationCom.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.accounts.FilejungleCom import FilejungleCom
+
+class UploadstationCom(FilejungleCom):
+ __name__ = "UploadstationCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """uploadstation.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ URL = "http://uploadstation.com/"
diff --git a/pyload/plugins/accounts/UptoboxCom.py b/pyload/plugins/accounts/UptoboxCom.py
new file mode 100644
index 000000000..b07991817
--- /dev/null
+++ b/pyload/plugins/accounts/UptoboxCom.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.XFSPAccount import XFSPAccount
+
+class UptoboxCom(XFSPAccount):
+ __name__ = "UptoboxCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """DDLStorage.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = "http://uptobox.com/" \ No newline at end of file
diff --git a/pyload/plugins/accounts/WarserverCz.py b/pyload/plugins/accounts/WarserverCz.py
new file mode 100644
index 000000000..21961956b
--- /dev/null
+++ b/pyload/plugins/accounts/WarserverCz.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+from module.utils import parseFileSize
+from time import mktime, strptime
+
+class WarserverCz(Account):
+ __name__ = "WarserverCz"
+ __version__ = "0.02"
+ __type__ = "account"
+ __description__ = """Warserver.cz account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ VALID_UNTIL_PATTERN = ur'<li>Neomezené stahování do: <strong>(.+?)<'
+ TRAFFIC_LEFT_PATTERN = ur'<li>Kredit: <strong>(.+?)<'
+
+ DOMAIN = "http://www.warserver.cz"
+
+ def loadAccountInfo(self, user, req):
+ html = req.load("%s/uzivatele/prehled" % self.DOMAIN, decode = True)
+
+ validuntil = trafficleft = None
+ premium = False
+
+ found = re.search(self.VALID_UNTIL_PATTERN, html)
+ if found:
+ self.logDebug("VALID_UNTIL", found.group(1))
+ try:
+ #validuntil = mktime(strptime(found.group(1), "%d %B %Y"))
+ premium = True
+ trafficleft = -1
+ except Exception, e:
+ self.logError(e)
+
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if found:
+ self.logDebug("TRAFFIC_LEFT", found.group(1))
+ trafficleft = parseFileSize((found.group(1).replace("&thinsp;",""))) // 1024
+ premium = True if trafficleft > 1 << 18 else False
+
+ return ({"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium})
+
+ def login(self, user, data, req):
+ html = req.load('%s/uzivatele/prihlaseni?do=prihlaseni-submit' % self.DOMAIN,
+ post = {"username": user,
+ "password": data['password'],
+ "send": u"Přihlásit"},
+ decode = True)
+
+ if '<p class="chyba">' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/WuploadCom.py b/pyload/plugins/accounts/WuploadCom.py
new file mode 100644
index 000000000..3d9ddfffa
--- /dev/null
+++ b/pyload/plugins/accounts/WuploadCom.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from types import MethodType
+
+from module.plugins.Account import Account
+from module.common.json_layer import json_loads
+
+class WuploadCom(Account):
+ __name__ = "WuploadCom"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """wupload.com account plugin"""
+ __author_name__ = ("RaNaN", "Paul King")
+ __author_mail__ = ("RaNaN@pyload.org", "")
+
+ API_URL = "http://api.wupload.com"
+
+ def init(self):
+ fs = self.core.pluginManager.loadClass("accounts", "FilesonicCom")
+
+ methods = ["loadAccountInfo", "login"]
+ #methods to bind from fs
+
+ for m in methods:
+ setattr(self, m, MethodType(fs.__dict__[m], self, WuploadCom))
+
+ def getDomain(self, req):
+ xml = req.load(self.API_URL + "/utility?method=getWuploadDomainForCurrentIp&format=json",
+ decode=True)
+ return json_loads(xml)["FSApi_Utility"]["getWuploadDomainForCurrentIp"]["response"] \ No newline at end of file
diff --git a/pyload/plugins/accounts/X7To.py b/pyload/plugins/accounts/X7To.py
new file mode 100644
index 000000000..90f65d55e
--- /dev/null
+++ b/pyload/plugins/accounts/X7To.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: ernieb
+"""
+
+import re
+from time import strptime, mktime
+
+from module.plugins.Account import Account
+
+class X7To(Account):
+ __name__ = "X7To"
+ __version__ = "0.1"
+ __type__ = "account"
+ __description__ = """X7.To account plugin"""
+ __author_name__ = ("ernieb")
+ __author_mail__ = ("ernieb")
+
+ def loadAccountInfo(self, user, req):
+ page = req.load("http://www.x7.to/my")
+
+ validCheck = re.search("Premium-Mitglied bis ([0-9]*-[0-9]*-[0-9]*)", page, re.IGNORECASE)
+ if validCheck:
+ valid = validCheck.group(1)
+ valid = int(mktime(strptime(valid, "%Y-%m-%d")))
+ else:
+ validCheck = re.search("Premium member until ([0-9]*-[0-9]*-[0-9]*)", page, re.IGNORECASE)
+ if validCheck:
+ valid = validCheck.group(1)
+ valid = int(mktime(strptime(valid, "%Y-%m-%d")))
+ else:
+ valid = 0
+
+ trafficleft = re.search(r'<em style="white-space:nowrap">([\d]*[,]?[\d]?[\d]?) (KB|MB|GB)</em>', page, re.IGNORECASE)
+ if trafficleft:
+ units = float(trafficleft.group(1).replace(",", "."))
+ pow = {'KB': 0, 'MB': 1, 'GB': 2}[trafficleft.group(2)]
+ trafficleft = int(units * 1024 ** pow)
+ else:
+ trafficleft = -1
+
+ return {"trafficleft": trafficleft, "validuntil": valid}
+
+
+ def login(self, user, data, req):
+ #req.cj.setCookie("share.cx", "lang", "english")
+ page = req.load("http://x7.to/lang/en", None, {})
+ page = req.load("http://x7.to/james/login", None,
+ {"redirect": "http://www.x7.to/", "id": user, "pw": data['password'], "submit": "submit"})
+
+ if "Username and password are not matching." in page:
+ self.wrongPassword()
diff --git a/pyload/plugins/accounts/YibaishiwuCom.py b/pyload/plugins/accounts/YibaishiwuCom.py
new file mode 100644
index 000000000..e2aa6f11d
--- /dev/null
+++ b/pyload/plugins/accounts/YibaishiwuCom.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.Account import Account
+import re
+
+class YibaishiwuCom(Account):
+ __name__ = "YibaishiwuCom"
+ __version__ = "0.01"
+ __type__ = "account"
+ __description__ = """115.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ ACCOUNT_INFO_PATTERN = r'var USER_PERMISSION = {(.*?)}'
+
+ def loadAccountInfo(self, user, req):
+ #self.relogin(user)
+ html = req.load("http://115.com/", decode = True)
+
+ found = re.search(self.ACCOUNT_INFO_PATTERN, html, re.S)
+ premium = True if (found and 'is_vip: 1' in found.group(1)) else False
+ validuntil = trafficleft = (-1 if found else 0)
+ return dict({"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium})
+
+ def login(self, user, data, req):
+ html = req.load('http://passport.115.com/?ac=login', post = {
+ "back": "http://www.115.com/",
+ "goto": "http://115.com/",
+ "login[account]": user,
+ "login[passwd]": data['password']
+ }, decode = True)
+
+ if not 'var USER_PERMISSION = {' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/accounts/ZeveraCom.py b/pyload/plugins/accounts/ZeveraCom.py
new file mode 100644
index 000000000..61a66cd89
--- /dev/null
+++ b/pyload/plugins/accounts/ZeveraCom.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+from module.plugins.Account import Account
+
+import re
+from time import mktime, strptime
+
+class ZeveraCom(Account):
+ __name__ = "ZeveraCom"
+ __version__ = "0.21"
+ __type__ = "account"
+ __description__ = """Zevera.com account plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def loadAccountInfo(self, user, req):
+ data = self.getAPIData(req)
+ if data == "No traffic":
+ account_info = {"trafficleft": 0, "validuntil": 0, "premium": False}
+ else:
+ account_info = {
+ "trafficleft": int(data['availabletodaytraffic']) * 1024,
+ "validuntil": mktime(strptime(data['endsubscriptiondate'],"%Y/%m/%d %H:%M:%S")),
+ "premium": True
+ }
+ return account_info
+
+ def login(self, user, data, req):
+ self.loginname = user
+ self.password = data["password"]
+ if self.getAPIData(req) == "No traffic":
+ self.wrongPassword()
+
+ def getAPIData(self, req, just_header = False, **kwargs):
+ get_data = {
+ 'cmd': 'accountinfo',
+ 'login': self.loginname,
+ 'pass': self.password
+ }
+ get_data.update(kwargs)
+
+ response = req.load("http://www.zevera.com/jDownloader.ashx", get = get_data, decode = True, just_header = just_header)
+ self.logDebug(response)
+
+ if ':' in response:
+ if not just_header:
+ response = response.replace(',','\n')
+ return dict((y.strip().lower(), z.strip()) for (y,z) in [x.split(':',1) for x in response.splitlines() if ':' in x])
+ else:
+ return response \ No newline at end of file
diff --git a/pyload/plugins/accounts/__init__.py b/pyload/plugins/accounts/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/accounts/__init__.py
diff --git a/pyload/plugins/addons/AlldebridCom.py b/pyload/plugins/addons/AlldebridCom.py
new file mode 100644
index 000000000..6818b8c43
--- /dev/null
+++ b/pyload/plugins/addons/AlldebridCom.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+# should be working
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+
+class AlldebridCom(MultiHoster):
+ __name__ = "AlldebridCom"
+ __version__ = "0.13"
+ __type__ = "hook"
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("https", "bool", "Enable HTTPS", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __description__ = """Real-Debrid.com hook plugin"""
+ __author_name__ = ("Andy, Voigt")
+ __author_mail__ = ("spamsales@online.de")
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://www.alldebrid.com/api.php?action=get_host").replace("\"","").strip()
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/addons/BypassCaptcha.py b/pyload/plugins/addons/BypassCaptcha.py
new file mode 100644
index 000000000..e24a439af
--- /dev/null
+++ b/pyload/plugins/addons/BypassCaptcha.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN, Godofdream, zoidberg
+"""
+
+from thread import start_new_thread
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+
+from module.plugins.Hook import Hook
+
+PYLOAD_KEY = "4f771155b640970d5607f919a615bdefc67e7d32"
+
+class BypassCaptchaException(Exception):
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<BypassCaptchaException %s>" % self.err
+
+ def __repr__(self):
+ return "<BypassCaptchaException %s>" % self.err
+
+class BypassCaptcha(Hook):
+ __name__ = "BypassCaptcha"
+ __version__ = "0.04"
+ __description__ = """send captchas to BypassCaptcha.com"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force BC even if client is connected", False),
+ ("passkey", "password", "Passkey", "")]
+ __author_name__ = ("RaNaN", "Godofdream", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "soilfcition@gmail.com", "zoidberg@mujmail.cz")
+
+ SUBMIT_URL = "http://bypasscaptcha.com/upload.php"
+ RESPOND_URL = "http://bypasscaptcha.com/check_value.php"
+ GETCREDITS_URL = "http://bypasscaptcha.com/ex_left.php"
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.GETCREDITS_URL,
+ post = {"key": self.getConfig("passkey")}
+ )
+
+ data = dict([x.split(' ',1) for x in response.splitlines()])
+ return int(data['Left'])
+
+
+ def submit(self, captcha, captchaType="file", match=None):
+ req = getRequest()
+
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ response = req.load(self.SUBMIT_URL,
+ post={"vendor_key": PYLOAD_KEY,
+ "key": self.getConfig("passkey"),
+ "gen_task_id": "1",
+ "file": (FORM_FILE, captcha)},
+ multipart=True)
+ finally:
+ req.close()
+
+ data = dict([x.split(' ',1) for x in response.splitlines()])
+ if not data or "Value" not in data:
+ raise BypassCaptchaException(response)
+
+ result = data['Value']
+ ticket = data['TaskId']
+ self.logDebug("result %s : %s" % (ticket,result))
+
+ return ticket, result
+
+ def respond(self, ticket, success):
+ try:
+ response = getURL(self.RESPOND_URL,
+ post={"task_id": ticket,
+ "key": self.getConfig("passkey"),
+ "cv": 1 if success else 0}
+ )
+ except BadHeader, e:
+ self.logError("Could not send response.", str(e))
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo("Your %s account has not enough credits" % self.__name__)
+
+ def captchaCorrect(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ self.respond(task.data["ticket"], True)
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ self.respond(task.data["ticket"], False)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except BypassCaptchaException, e:
+ task.error = e.getCode()
+ return
+
+ task.data["ticket"] = ticket
+ task.setResult(result) \ No newline at end of file
diff --git a/pyload/plugins/addons/CaptchaBrotherhood.py b/pyload/plugins/addons/CaptchaBrotherhood.py
new file mode 100644
index 000000000..bdf547827
--- /dev/null
+++ b/pyload/plugins/addons/CaptchaBrotherhood.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN, zoidberg
+"""
+from __future__ import with_statement
+
+from thread import start_new_thread
+
+import pycurl
+import StringIO
+from urllib import urlencode
+from time import sleep
+import Image
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+from module.plugins.Hook import Hook
+
+class CaptchaBrotherhoodException(Exception):
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<CaptchaBrotherhoodException %s>" % self.err
+
+ def __repr__(self):
+ return "<CaptchaBrotherhoodException %s>" % self.err
+
+class CaptchaBrotherhood(Hook):
+ __name__ = "CaptchaBrotherhood"
+ __version__ = "0.04"
+ __description__ = """send captchas to CaptchaBrotherhood.com"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Password", ""),]
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ API_URL = "http://www.captchabrotherhood.com/"
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.API_URL + "askCredits.aspx",
+ get = {"username": self.getConfig("username"),
+ "password": self.getConfig("passkey")})
+ if not response.startswith("OK"):
+ raise CaptchaBrotherhoodException(response)
+ else:
+ credits = int(response[3:])
+ self.logInfo(_("%d credits left") % credits)
+ self.info["credits"] = credits
+ return credits
+
+ def submit(self, captcha, captchaType="file", match=None):
+ try:
+ img = Image.open(captcha)
+ output = StringIO.StringIO()
+ self.logDebug("CAPTCHA IMAGE", img, img.format, img.mode)
+ if img.format in ("GIF", "JPEG"):
+ img.save(output, img.format)
+ else:
+ if img.mode != "RGB":
+ img = img.convert("RGB")
+ img.save(output, "JPEG")
+ data = output.getvalue()
+ output.close()
+ except Exception, e:
+ raise CaptchaBrotherhoodException("Reading or converting captcha image failed: %s" % e)
+
+ req = getRequest()
+
+ url = "%ssendNewCaptcha.aspx?%s" % (self.API_URL,
+ urlencode({"username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "captchaSource": "pyLoad",
+ "timeout": "80"})
+ )
+
+ req.c.setopt(pycurl.URL, url)
+ req.c.setopt(pycurl.POST, 1)
+ req.c.setopt(pycurl.POSTFIELDS, data)
+ req.c.setopt(pycurl.HTTPHEADER, [ "Content-Type: text/html" ])
+
+ try:
+ req.c.perform()
+ response = req.getResponse()
+ except Exception, e:
+ raise CaptchaBrotherhoodException("Submit captcha image failed")
+
+ req.close()
+
+ if not response.startswith("OK"):
+ raise CaptchaBrotherhoodException(response[1])
+
+ ticket = response[3:]
+
+ for i in range(15):
+ sleep(5)
+ response = self.get_api("askCaptchaResult", ticket)
+ if response.startswith("OK-answered"):
+ return ticket, response[12:]
+
+ raise CaptchaBrotherhoodException("No solution received in time")
+
+ def get_api(self, api, ticket):
+ response = getURL("%s%s.aspx" % (self.API_URL, api),
+ get={"username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "captchaID": ticket}
+ )
+ if not response.startswith("OK"):
+ raise CaptchaBrotherhoodException("Unknown response: %s" % response)
+
+ return response
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 10:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+ else:
+ self.logInfo("Your CaptchaBrotherhood Account has not enough credits")
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ response = self.get_api("complainCaptcha", task.data['ticket'])
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except CaptchaBrotherhoodException, e:
+ task.error = e.getCode()
+ return
+
+ task.data["ticket"] = ticket
+ task.setResult(result) \ No newline at end of file
diff --git a/pyload/plugins/addons/CaptchaTrader.py b/pyload/plugins/addons/CaptchaTrader.py
new file mode 100644
index 000000000..82dd2383a
--- /dev/null
+++ b/pyload/plugins/addons/CaptchaTrader.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN
+"""
+
+try:
+ from json import loads
+except ImportError:
+ from simplejson import loads
+
+from thread import start_new_thread
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+
+from module.plugins.Addon import Addon
+
+PYLOAD_KEY = "9f65e7f381c3af2b076ea680ae96b0b7"
+
+class CaptchaTraderException(Exception):
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<CaptchaTraderException %s>" % self.err
+
+ def __repr__(self):
+ return "<CaptchaTraderException %s>" % self.err
+
+class CaptchaTrader(Addon):
+ __name__ = "CaptchaTrader"
+ __version__ = "0.15"
+ __description__ = """send captchas to captchatrader.com"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Password", ""),]
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ SUBMIT_URL = "http://api.captchatrader.com/submit"
+ RESPOND_URL = "http://api.captchatrader.com/respond"
+ GETCREDITS_URL = "http://api.captchatrader.com/get_credits/username:%(user)s/password:%(password)s/"
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ json = getURL(CaptchaTrader.GETCREDITS_URL % {"user": self.getConfig("username"),
+ "password": self.getConfig("passkey")})
+ response = loads(json)
+ if response[0] < 0:
+ raise CaptchaTraderException(response[1])
+ else:
+ self.logInfo(_("%s credits left") % response[1])
+ self.info["credits"] = response[1]
+ return response[1]
+
+ def submit(self, captcha, captchaType="file", match=None):
+ if not PYLOAD_KEY:
+ raise CaptchaTraderException("No API Key Specified!")
+
+ #if type(captcha) == str and captchaType == "file":
+ # raise CaptchaTraderException("Invalid Type")
+ assert captchaType in ("file", "url-jpg", "url-jpeg", "url-png", "url-bmp")
+
+ req = getRequest()
+
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ json = req.load(CaptchaTrader.SUBMIT_URL, post={"api_key": PYLOAD_KEY,
+ "username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "value": (FORM_FILE, captcha),
+ "type": captchaType}, multipart=True)
+ finally:
+ req.close()
+
+ response = loads(json)
+ if response[0] < 0:
+ raise CaptchaTraderException(response[1])
+
+ ticket = response[0]
+ result = response[1]
+ self.logDebug("result %s : %s" % (ticket,result))
+
+ return ticket, result
+
+ def respond(self, ticket, success):
+ try:
+ json = getURL(CaptchaTrader.RESPOND_URL, post={"is_correct": 1 if success else 0,
+ "username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "ticket": ticket})
+
+ response = loads(json)
+ if response[0] < 0:
+ raise CaptchaTraderException(response[1])
+
+ except BadHeader, e:
+ self.logError(_("Could not send response."), str(e))
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 10:
+ task.handler.append(self)
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo(_("Your CaptchaTrader Account has not enough credits"))
+
+ def captchaCorrect(self, task):
+ if "ticket" in task.data:
+ ticket = task.data["ticket"]
+ self.respond(ticket, True)
+
+ def captchaInvalid(self, task):
+ if "ticket" in task.data:
+ ticket = task.data["ticket"]
+ self.respond(ticket, False)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except CaptchaTraderException, e:
+ task.error = e.getCode()
+ return
+
+ task.data["ticket"] = ticket
+ task.setResult(result)
diff --git a/pyload/plugins/addons/Checksum.py b/pyload/plugins/addons/Checksum.py
new file mode 100644
index 000000000..b290838bb
--- /dev/null
+++ b/pyload/plugins/addons/Checksum.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+from __future__ import with_statement
+import hashlib, zlib
+from os import remove
+from os.path import getsize, isfile, splitext
+import re
+
+from module.utils import save_join, fs_encode
+from module.plugins.Hook import Hook
+
+def computeChecksum(local_file, algorithm):
+ if algorithm in getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")):
+ h = getattr(hashlib, algorithm)()
+ chunk_size = 128 * h.block_size
+
+ with open(local_file, 'rb') as f:
+ for chunk in iter(lambda: f.read(chunk_size), ''):
+ h.update(chunk)
+
+ return h.hexdigest()
+
+ elif algorithm in ("adler32", "crc32"):
+ hf = getattr(zlib, algorithm)
+ last = 0
+
+ with open(local_file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), ''):
+ last = hf(chunk, last)
+
+ return "%x" % last
+
+ else:
+ return None
+
+class Checksum(Hook):
+ __name__ = "Checksum"
+ __version__ = "0.07"
+ __description__ = "Verify downloaded file size and checksum (enable in general preferences)"
+ __config__ = [("activated", "bool", "Activated", True),
+ ("action", "fail;retry;nothing", "What to do if check fails?", "retry"),
+ ("max_tries", "int", "Number of retries", 2)]
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ methods = { 'sfv':'crc32', 'crc': 'crc32', 'hash': 'md5'}
+ regexps = { 'sfv': r'^(?P<name>[^;].+)\s+(?P<hash>[0-9A-Fa-f]{8})$',
+ 'md5': r'^(?P<name>[0-9A-Fa-f]{32}) (?P<file>.+)$',
+ 'crc': r'filename=(?P<name>.+)\nsize=(?P<size>\d+)\ncrc32=(?P<hash>[0-9A-Fa-f]{8})$',
+ 'default': r'^(?P<hash>[0-9A-Fa-f]+)\s+\*?(?P<name>.+)$' }
+
+ def setup(self):
+ if not self.config['general']['checksum']:
+ self.logInfo("Checksum validation is disabled in general configuration")
+
+ self.algorithms = sorted(getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")), reverse = True)
+ self.algorithms.extend(["crc32", "adler32"])
+ self.formats = self.algorithms + ['sfv', 'crc', 'hash']
+
+ def downloadFinished(self, pyfile):
+ """
+ Compute checksum for the downloaded file and compare it with the hash provided by the hoster.
+ pyfile.plugin.check_data should be a dictionary which can contain:
+ a) if known, the exact filesize in bytes (e.g. "size": 123456789)
+ b) hexadecimal hash string with algorithm name as key (e.g. "md5": "d76505d0869f9f928a17d42d66326307")
+ """
+ if hasattr(pyfile.plugin, "check_data") and (isinstance(pyfile.plugin.check_data, dict)):
+ data = pyfile.plugin.check_data.copy()
+ elif hasattr(pyfile.plugin, "api_data") and (isinstance(pyfile.plugin.api_data, dict)):
+ data = pyfile.plugin.api_data.copy()
+ else:
+ return
+
+ self.logDebug(data)
+
+ if not pyfile.plugin.lastDownload:
+ self.checkFailed(pyfile, None, "No file downloaded")
+
+ local_file = fs_encode(pyfile.plugin.lastDownload)
+ #download_folder = self.config['general']['download_folder']
+ #local_file = fs_encode(save_join(download_folder, pyfile.package().folder, pyfile.name))
+
+ if not isfile(local_file):
+ self.checkFailed(pyfile, None, "File does not exist")
+
+ # validate file size
+ if "size" in data:
+ api_size = int(data['size'])
+ file_size = getsize(local_file)
+ if api_size != file_size:
+ self.logWarning("File %s has incorrect size: %d B (%d expected)" % (pyfile.name, file_size, api_size))
+ self.checkFailed(pyfile, local_file, "Incorrect file size")
+ del data['size']
+
+ # validate checksum
+ if data and self.config['general']['checksum']:
+ if "checksum" in data:
+ data['md5'] = data['checksum']
+
+ for key in self.algorithms:
+ if key in data:
+ checksum = computeChecksum(local_file, key.replace("-","").lower())
+ if checksum:
+ if checksum == data[key]:
+ self.logInfo('File integrity of "%s" verified by %s checksum (%s).' % (pyfile.name, key.upper(), checksum))
+ return
+ else:
+ self.logWarning("%s checksum for file %s does not match (%s != %s)" % (key.upper(), pyfile.name, checksum, data[key]))
+ self.checkFailed(pyfile, local_file, "Checksums do not match")
+ else:
+ self.logWarning("Unsupported hashing algorithm: %s" % key.upper())
+ else:
+ self.logWarning("Unable to validate checksum for file %s" % (pyfile.name))
+
+ def checkFailed(self, pyfile, local_file, msg):
+ action = self.getConfig("action")
+ if action == "fail":
+ pyfile.plugin.fail(reason = msg)
+ elif action == "retry":
+ if local_file:
+ remove(local_file)
+ pyfile.plugin.retry(reason = msg, max_tries = self.getConfig("max_tries"))
+
+
+ def packageFinished(self, pypack):
+ download_folder = save_join(self.config['general']['download_folder'], pypack.folder, "")
+
+ for link in pypack.getChildren().itervalues():
+ file_type = splitext(link["name"])[1][1:].lower()
+ #self.logDebug(link, file_type)
+
+ if file_type not in self.formats:
+ continue
+
+ hash_file = fs_encode(save_join(download_folder, link["name"]))
+ if not isfile(hash_file):
+ self.logWarning("File not found: %s" % link["name"])
+ continue
+
+ with open(hash_file) as f:
+ text = f.read()
+
+ for m in re.finditer(self.regexps.get(file_type, self.regexps['default']), text):
+ data = m.groupdict()
+ self.logDebug(link["name"], data)
+
+ local_file = fs_encode(save_join(download_folder, data["name"]))
+ algorithm = self.methods.get(file_type, file_type)
+ checksum = computeChecksum(local_file, algorithm)
+ if checksum == data["hash"]:
+ self.logInfo('File integrity of "%s" verified by %s checksum (%s).' % (data["name"], algorithm, checksum))
+ else:
+ self.logWarning("%s checksum for file %s does not match (%s != %s)" % (algorithm, data["name"], checksum, data["hash"])) \ No newline at end of file
diff --git a/pyload/plugins/addons/ClickAndLoad.py b/pyload/plugins/addons/ClickAndLoad.py
new file mode 100644
index 000000000..fcc2cf24e
--- /dev/null
+++ b/pyload/plugins/addons/ClickAndLoad.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+ @interface-version: 0.2
+"""
+
+import socket
+import thread
+
+from module.plugins.Addon import Addon
+
+class ClickAndLoad(Addon):
+ __name__ = "ClickAndLoad"
+ __version__ = "0.21"
+ __description__ = """Gives abillity to use jd's click and load. depends on webinterface"""
+ __config__ = [("activated", "bool", "Activated", "True"),
+ ("extern", "bool", "Allow external link adding", "False")]
+ __author_name__ = ("RaNaN", "mkaay")
+ __author_mail__ = ("RaNaN@pyload.de", "mkaay@mkaay.de")
+
+ def activate(self):
+ self.port = int(self.core.config['webinterface']['port'])
+ if self.core.config['webinterface']['activated']:
+ try:
+ if self.getConfig("extern"):
+ ip = "0.0.0.0"
+ else:
+ ip = "127.0.0.1"
+
+ thread.start_new_thread(proxy, (self, ip, self.port, 9666))
+ except:
+ self.logError("ClickAndLoad port already in use.")
+
+
+def proxy(self, *settings):
+ thread.start_new_thread(server, (self,) + settings)
+ lock = thread.allocate_lock()
+ lock.acquire()
+ lock.acquire()
+
+
+def server(self, *settings):
+ try:
+ dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ dock_socket.bind((settings[0], settings[2]))
+ dock_socket.listen(5)
+ while True:
+ client_socket = dock_socket.accept()[0]
+ server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server_socket.connect(("127.0.0.1", settings[1]))
+ thread.start_new_thread(forward, (client_socket, server_socket))
+ thread.start_new_thread(forward, (server_socket, client_socket))
+ except socket.error, e:
+ if hasattr(e, "errno"):
+ errno = e.errno
+ else:
+ errno = e.args[0]
+
+ if errno == 98:
+ self.logWarning(_("Click'N'Load: Port 9666 already in use"))
+ return
+ thread.start_new_thread(server, (self,) + settings)
+ except:
+ thread.start_new_thread(server, (self,) + settings)
+
+
+def forward(source, destination):
+ string = ' '
+ while string:
+ string = source.recv(1024)
+ if string:
+ destination.sendall(string)
+ else:
+ #source.shutdown(socket.SHUT_RD)
+ destination.shutdown(socket.SHUT_WR)
diff --git a/pyload/plugins/addons/DeathByCaptcha.py b/pyload/plugins/addons/DeathByCaptcha.py
new file mode 100644
index 000000000..59ff40ded
--- /dev/null
+++ b/pyload/plugins/addons/DeathByCaptcha.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN, zoidberg
+"""
+from __future__ import with_statement
+
+from thread import start_new_thread
+from pycurl import FORM_FILE, HTTPHEADER, RESPONSE_CODE
+from time import sleep
+from base64 import b64encode
+import re
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+from module.plugins.Hook import Hook
+from module.common.json_layer import json_loads
+
+class DeathByCaptchaException(Exception):
+ DBC_ERRORS = {'not-logged-in': 'Access denied, check your credentials',
+ 'invalid-credentials': 'Access denied, check your credentials',
+ 'banned': 'Access denied, account is suspended',
+ 'insufficient-funds': 'Insufficient account balance to decrypt CAPTCHA',
+ 'invalid-captcha': 'CAPTCHA is not a valid image',
+ 'service-overload': 'CAPTCHA was rejected due to service overload, try again later',
+ 'invalid-request': 'Invalid request',
+ 'timed-out': 'No CAPTCHA solution received in time' }
+
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def getDesc(self):
+ if self.err in self.DBC_ERRORS.keys():
+ return self.DBC_ERRORS[self.err]
+ else:
+ return self.err
+
+ def __str__(self):
+ return "<DeathByCaptchaException %s>" % self.err
+
+ def __repr__(self):
+ return "<DeathByCaptchaException %s>" % self.err
+
+class DeathByCaptcha(Hook):
+ __name__ = "DeathByCaptcha"
+ __version__ = "0.03"
+ __description__ = """send captchas to DeathByCaptcha.com"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("passkey", "password", "Password", ""),
+ ("force", "bool", "Force DBC even if client is connected", False)]
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ API_URL = "http://api.dbcapi.me/api/"
+
+ def setup(self):
+ self.info = {}
+
+ def call_api(self, api="captcha", post=False, multipart=False):
+ req = getRequest()
+ req.c.setopt(HTTPHEADER, ["Accept: application/json",
+ "User-Agent: pyLoad %s" % self.core.version])
+
+ if post:
+ if not isinstance(post, dict):
+ post = {}
+ post.update({"username": self.getConfig("username"),
+ "password": self.getConfig("passkey")})
+
+ response = None
+ try:
+ json = req.load("%s%s" % (self.API_URL, api),
+ post = post,
+ multipart=multipart)
+ self.logDebug(json)
+ response = json_loads(json)
+
+ if "error" in response:
+ raise DeathByCaptchaException(response['error'])
+ elif "status" not in response:
+ raise DeathByCaptchaException(str(response))
+
+ except BadHeader, e:
+ if 403 == e.code:
+ raise DeathByCaptchaException('not-logged-in')
+ elif 413 == e.code:
+ raise DeathByCaptchaException('invalid-captcha')
+ elif 503 == e.code:
+ raise DeathByCaptchaException('service-overload')
+ elif e.code in (400, 405):
+ raise DeathByCaptchaException('invalid-request')
+ else:
+ raise
+
+ finally:
+ req.close()
+
+ return response
+
+ def getCredits(self):
+ response = self.call_api("user", True)
+
+ if 'is_banned' in response and response['is_banned']:
+ raise DeathByCaptchaException('banned')
+ elif 'balance' in response and 'rate' in response:
+ self.info.update(response)
+ else:
+ raise DeathByCaptchaException(response)
+
+ def getStatus(self):
+ response = self.call_api("status", False)
+
+ if 'is_service_overloaded' in response and response['is_service_overloaded']:
+ raise DeathByCaptchaException('service-overload')
+
+ def submit(self, captcha, captchaType="file", match=None):
+ #workaround multipart-post bug in HTTPRequest.py
+ if re.match("^[A-Za-z0-9]*$", self.getConfig("passkey")):
+ multipart = True
+ data = (FORM_FILE, captcha)
+ else:
+ multipart = False
+ with open(captcha, 'rb') as f:
+ data = f.read()
+ data = "base64:" + b64encode(data)
+
+ response = self.call_api("captcha", {"captchafile": data}, multipart)
+
+ if "captcha" not in response:
+ raise DeathByCaptchaException(response)
+ ticket = response['captcha']
+
+ for i in range(24):
+ sleep(5)
+ response = self.call_api("captcha/%d" % ticket, False)
+ if response['text'] and response['is_correct']:
+ break
+ else:
+ raise DeathByCaptchaException('timed-out')
+
+ result = response['text']
+ self.logDebug("result %s : %s" % (ticket,result))
+
+ return ticket, result
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ try:
+ self.getStatus()
+ self.getCredits()
+ except DeathByCaptchaException, e:
+ self.logError(e.getDesc())
+ return False
+
+ balance, rate = self.info["balance"], self.info["rate"]
+ self.logInfo("Account balance: US$%.3f (%d captchas left at %.2f cents each)" % (balance / 100, balance // rate, rate))
+
+ if balance > rate:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(180)
+ start_new_thread(self.processCaptcha, (task,))
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ try:
+ response = self.call_api("captcha/%d/report" % task.data["ticket"], True)
+ except DeathByCaptchaException, e:
+ self.logError(e.getDesc())
+ except Exception, e:
+ self.logError(e)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except DeathByCaptchaException, e:
+ task.error = e.getCode()
+ self.logError(e.getDesc())
+ return
+
+ task.data["ticket"] = ticket
+ task.setResult(result) \ No newline at end of file
diff --git a/pyload/plugins/addons/DebridItaliaCom.py b/pyload/plugins/addons/DebridItaliaCom.py
new file mode 100644
index 000000000..99b2dd626
--- /dev/null
+++ b/pyload/plugins/addons/DebridItaliaCom.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.MultiHoster import MultiHoster
+
+
+class DebridItaliaCom(MultiHoster):
+ __name__ = "DebridItaliaCom"
+ __version__ = "0.06"
+ __type__ = "hook"
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __description__ = """Debriditalia.com hook plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def getHoster(self):
+ return ["netload.in", "hotfile.com", "rapidshare.com", "multiupload.com",
+ "uploading.com", "megashares.com", "crocko.com", "filepost.com",
+ "bitshare.com", "share-links.biz", "putlocker.com", "uploaded.to",
+ "speedload.org", "rapidgator.net", "likeupload.net", "cyberlocker.ch",
+ "depositfiles.com", "extabit.com", "filefactory.com", "sharefiles.co",
+ "ryushare.com", "tusfiles.net", "nowvideo.co", "cloudzer.net", "letitbit.net",
+ "easybytez.com"]
diff --git a/pyload/plugins/addons/DeleteFinished.py b/pyload/plugins/addons/DeleteFinished.py
new file mode 100644
index 000000000..155158701
--- /dev/null
+++ b/pyload/plugins/addons/DeleteFinished.py
@@ -0,0 +1,84 @@
+ # -*- coding: utf-8 -*-
+
+'''
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: Walter Purcaro
+'''
+
+from module.database import style
+from module.plugins.Hook import Hook
+
+
+class DeleteFinished(Hook):
+ __name__ = 'DeleteFinished'
+ __version__ = '1.09'
+ __description__ = 'Automatically delete all finished packages from queue'
+ __config__ = [
+ ('activated', 'bool', 'Activated', 'False'),
+ ('interval', 'int', 'Delete every (hours)', '72'),
+ ('deloffline', 'bool', 'Delete packages with offline links', 'False')
+ ]
+ __author_name__ = ('Walter Purcaro')
+ __author_mail__ = ('vuolter@gmail.com')
+
+ ## overwritten methods ##
+ def periodical(self):
+ if not self.info['sleep']:
+ deloffline = self.getConfig('deloffline')
+ mode = '0,1,4' if deloffline else '0,4'
+ msg = 'delete all finished packages in queue list (%s packages with offline links)'
+ self.logInfo(msg % ('including' if deloffline else 'excluding'))
+ self.deleteFinished(mode)
+ self.info['sleep'] = True
+ self.addEvent('packageFinished', self.wakeup)
+
+ def pluginConfigChanged(self, plugin, name, value):
+ if name == 'interval' and value != self.interval:
+ self.interval = value * 3600
+ self.initPeriodical()
+
+ def unload(self):
+ self.removeEvent('packageFinished', self.wakeup)
+
+ def coreReady(self):
+ self.info = {'sleep': True}
+ interval = self.getConfig('interval')
+ self.pluginConfigChanged('DeleteFinished', 'interval', interval)
+ self.addEvent('packageFinished', self.wakeup)
+
+ ## own methods ##
+ @style.queue
+ def deleteFinished(self, mode):
+ self.c.execute('DELETE FROM packages WHERE NOT EXISTS(SELECT 1 FROM links WHERE package=packages.id AND status NOT IN (%s))' % mode)
+ self.c.execute('DELETE FROM links WHERE NOT EXISTS(SELECT 1 FROM packages WHERE id=links.package)')
+
+ def wakeup(self, pypack):
+ self.removeEvent('packageFinished', self.wakeup)
+ self.info['sleep'] = False
+
+ ## event managing ##
+ def addEvent(self, event, func):
+ '''Adds an event listener for event name'''
+ if event in self.m.events:
+ if func in self.m.events[event]:
+ self.logDebug('Function already registered %s' % func)
+ else:
+ self.m.events[event].append(func)
+ else:
+ self.m.events[event] = [func]
+
+ def setup(self):
+ self.m = self.manager
+ self.removeEvent = self.m.removeEvent
diff --git a/pyload/plugins/addons/DownloadScheduler.py b/pyload/plugins/addons/DownloadScheduler.py
new file mode 100644
index 000000000..4049d71c5
--- /dev/null
+++ b/pyload/plugins/addons/DownloadScheduler.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+ Original idea by new.cze
+"""
+
+import re
+from time import localtime
+
+from module.plugins.Hook import Hook
+
+
+class DownloadScheduler(Hook):
+ __name__ = "DownloadScheduler"
+ __version__ = "0.21"
+ __description__ = """Download Scheduler"""
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("timetable", "str", "List time periods as hh:mm full or number(kB/s)",
+ "0:00 full, 7:00 250, 10:00 0, 17:00 150"),
+ ("abort", "bool", "Abort active downloads when start period with speed 0", "False")]
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ def setup(self):
+ self.cb = None # callback to scheduler job; will be by removed hookmanager when hook unloaded
+
+ def coreReady(self):
+ self.updateSchedule()
+
+ def updateSchedule(self, schedule=None):
+ if schedule is None:
+ schedule = self.getConfig("timetable")
+
+ schedule = re.findall("(\d{1,2}):(\d{2})[\s]*(-?\d+)",
+ schedule.lower().replace("full", "-1").replace("none", "0"))
+ if not schedule:
+ self.logError("Invalid schedule")
+ return
+
+ t0 = localtime()
+ now = (t0.tm_hour, t0.tm_min, t0.tm_sec, "X")
+ schedule = sorted([(int(x[0]), int(x[1]), 0, int(x[2])) for x in schedule] + [now])
+
+ self.logDebug("Schedule", schedule)
+
+ for i, v in enumerate(schedule):
+ if v[3] == "X":
+ last, next = schedule[i - 1], schedule[(i + 1) % len(schedule)]
+ self.logDebug("Now/Last/Next", now, last, next)
+
+ self.setDownloadSpeed(last[3])
+
+ next_time = (((24 + next[0] - now[0]) * 60 + next[1] - now[1]) * 60 + next[2] - now[2]) % 86400
+ self.core.scheduler.removeJob(self.cb)
+ self.cb = self.core.scheduler.addJob(next_time, self.updateSchedule, threaded=False)
+
+ def setDownloadSpeed(self, speed):
+ if speed == 0:
+ abort = self.getConfig("abort")
+ self.logInfo("Stopping download server. (Running downloads will %sbe aborted.)" % ('' if abort else 'not '))
+ self.core.api.pauseServer()
+ if abort:
+ self.core.api.stopAllDownloads()
+ else:
+ self.core.api.unpauseServer()
+
+ if speed > 0:
+ self.logInfo("Setting download speed to %d kB/s" % speed)
+ self.core.api.setConfigValue("download", "limit_speed", 1)
+ self.core.api.setConfigValue("download", "max_speed", speed)
+ else:
+ self.logInfo("Setting download speed to FULL")
+ self.core.api.setConfigValue("download", "limit_speed", 0)
+ self.core.api.setConfigValue("download", "max_speed", -1)
diff --git a/pyload/plugins/addons/EasybytezCom.py b/pyload/plugins/addons/EasybytezCom.py
new file mode 100644
index 000000000..6a4ded85b
--- /dev/null
+++ b/pyload/plugins/addons/EasybytezCom.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+import re
+
+class EasybytezCom(MultiHoster):
+ __name__ = "EasybytezCom"
+ __version__ = "0.03"
+ __type__ = "hook"
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+ __description__ = """EasyBytez.com hook plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def getHoster(self):
+ self.account = self.core.accountManager.getAccountPlugin(self.__name__)
+ user = self.account.selectAccount()[0]
+
+ try:
+ req = self.account.getAccountRequest(user)
+ page = req.load("http://www.easybytez.com")
+
+ found = re.search(r'</textarea>\s*Supported sites:(.*)', page)
+ return found.group(1).split(',')
+ except Exception, e:
+ self.logDebug(e)
+ self.logWarning("Unable to load supported hoster list, using last known")
+ return ['bitshare.com', 'crocko.com', 'ddlstorage.com', 'depositfiles.com', 'extabit.com', 'hotfile.com', 'mediafire.com', 'netload.in', 'rapidgator.net', 'rapidshare.com', 'uploading.com', 'uload.to', 'uploaded.to'] \ No newline at end of file
diff --git a/pyload/plugins/addons/Ev0InFetcher.py b/pyload/plugins/addons/Ev0InFetcher.py
new file mode 100644
index 000000000..970c70cd5
--- /dev/null
+++ b/pyload/plugins/addons/Ev0InFetcher.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+from module.lib import feedparser
+from time import mktime, time
+
+from module.plugins.Addon import Addon
+
+class Ev0InFetcher(Addon):
+ __name__ = "Ev0InFetcher"
+ __version__ = "0.21"
+ __description__ = """checks rss feeds for ev0.in"""
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("interval", "int", "Check interval in minutes", "10"),
+ ("queue", "bool", "Move new shows directly to Queue", False),
+ ("shows", "str", "Shows to check for (comma separated)", ""),
+ ("quality", "xvid;x264;rmvb", "Video Format", "xvid"),
+ ("hoster", "str", "Hoster to use (comma separated)", "NetloadIn,RapidshareCom,MegauploadCom,HotfileCom")]
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def setup(self):
+ self.interval = self.getConfig("interval") * 60
+
+ def filterLinks(self, links):
+ results = self.core.pluginManager.parseUrls(links)
+ sortedLinks = {}
+
+ for url, hoster in results[0]:
+ if hoster not in sortedLinks:
+ sortedLinks[hoster] = []
+ sortedLinks[hoster].append(url)
+
+ for h in self.getConfig("hoster").split(","):
+ try:
+ return sortedLinks[h.strip()]
+ except:
+ continue
+ return []
+
+ def periodical(self):
+ def normalizefiletitle(filename):
+ filename = filename.replace('.', ' ')
+ filename = filename.replace('_', ' ')
+ filename = filename.lower()
+ return filename
+
+ shows = [s.strip() for s in self.getConfig("shows").split(",")]
+
+ feed = feedparser.parse("http://feeds.feedburner.com/ev0in/%s?format=xml" % self.getConfig("quality"))
+
+ showStorage = {}
+ for show in shows:
+ showStorage[show] = int(self.getStorage("show_%s_lastfound" % show, 0))
+
+ found = False
+ for item in feed['items']:
+ for show, lastfound in showStorage.iteritems():
+ if show.lower() in normalizefiletitle(item['title']) and lastfound < int(mktime(item.date_parsed)):
+ links = self.filterLinks(item['description'].split("<br />"))
+ packagename = item['title'].encode("utf-8")
+ self.logInfo("Ev0InFetcher: new episode '%s' (matched '%s')" % (packagename, show))
+ self.core.api.addPackage(packagename, links, 1 if self.getConfig("queue") else 0)
+ self.setStorage("show_%s_lastfound" % show, int(mktime(item.date_parsed)))
+ found = True
+ if not found:
+ #self.logDebug("Ev0InFetcher: no new episodes found")
+ pass
+
+ for show, lastfound in self.getStorage().iteritems():
+ if int(lastfound) > 0 and int(lastfound) + (3600*24*30) < int(time()):
+ self.delStorage("show_%s_lastfound" % show)
+ self.logDebug("Ev0InFetcher: cleaned '%s' record" % show)
diff --git a/pyload/plugins/addons/ExpertDecoders.py b/pyload/plugins/addons/ExpertDecoders.py
new file mode 100644
index 000000000..2e66e49ca
--- /dev/null
+++ b/pyload/plugins/addons/ExpertDecoders.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN, zoidberg
+"""
+from __future__ import with_statement
+
+from thread import start_new_thread
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+from uuid import uuid4
+from base64 import b64encode
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+
+from module.plugins.Hook import Hook
+
+class ExpertDecoders(Hook):
+ __name__ = "ExpertDecoders"
+ __version__ = "0.01"
+ __description__ = """send captchas to expertdecoders.com"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force CT even if client is connected", False),
+ ("passkey", "password", "Access key", ""),]
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ API_URL = "http://www.fasttypers.org/imagepost.ashx"
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.API_URL, post = { "key": self.getConfig("passkey"), "action": "balance" })
+
+ if response.isdigit():
+ self.logInfo(_("%s credits left") % response)
+ self.info["credits"] = credits = int(response)
+ return credits
+ else:
+ self.logError(response)
+ return 0
+
+ def processCaptcha(self, task):
+ task.data["ticket"] = ticket = uuid4()
+ result = None
+
+ with open(task.captchaFile, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+ #self.logDebug("%s: %s : %s" % (ticket, task.captchaFile, data))
+
+ req = getRequest()
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ result = req.load(self.API_URL,
+ post={ "action": "upload",
+ "key": self.getConfig("passkey"),
+ "file": data,
+ "gen_task_id": ticket }
+ )
+ finally:
+ req.close()
+
+ self.logDebug("result %s : %s" % (ticket, result))
+ task.setResult(result)
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo(_("Your ExpertDecoders Account has not enough credits"))
+
+ def captchaInvalid(self, task):
+ if "ticket" in task.data:
+
+ try:
+ response = getURL(self.API_URL,
+ post={ "action": "refund",
+ "key": self.getConfig("passkey"),
+ "gen_task_id": task.data["ticket"] }
+ )
+ self.logInfo("Request refund: %s" % response)
+
+ except BadHeader, e:
+ self.logError("Could not send refund request.", str(e)) \ No newline at end of file
diff --git a/pyload/plugins/addons/ExternalScripts.py b/pyload/plugins/addons/ExternalScripts.py
new file mode 100644
index 000000000..8f5a5841e
--- /dev/null
+++ b/pyload/plugins/addons/ExternalScripts.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+import subprocess
+from os import access, X_OK, makedirs
+from os.path import basename
+
+from module.plugins.Addon import Addon
+from module.utils.fs import save_join, exists, join, listdir
+
+class ExternalScripts(Addon):
+ __name__ = "ExternalScripts"
+ __version__ = "0.22"
+ __description__ = """Run external scripts"""
+ __config__ = [("activated", "bool", "Activated", "True")]
+ __author_name__ = ("mkaay", "RaNaN", "spoob")
+ __author_mail__ = ("mkaay@mkaay.de", "ranan@pyload.org", "spoob@pyload.org")
+
+ event_list = ["unrarFinished", "allDownloadsFinished", "allDownloadsProcessed"]
+
+ def setup(self):
+ self.scripts = {}
+
+ folders = ['download_preparing', 'download_finished', 'package_finished',
+ 'before_reconnect', 'after_reconnect', 'unrar_finished',
+ 'all_dls_finished', 'all_dls_processed']
+
+ for folder in folders:
+
+ self.scripts[folder] = []
+
+ self.initPluginType(folder, join(pypath, 'scripts', folder))
+ self.initPluginType(folder, join('scripts', folder))
+
+ for script_type, names in self.scripts.iteritems():
+ if names:
+ self.logInfo((_("Installed scripts for %s: ") % script_type ) + ", ".join([basename(x) for x in names]))
+
+
+ def initPluginType(self, folder, path):
+ if not exists(path):
+ try:
+ makedirs(path)
+ except :
+ self.logDebug("Script folder %s not created" % folder)
+ return
+
+ for f in listdir(path):
+ if f.startswith("#") or f.startswith(".") or f.startswith("_") or f.endswith("~") or f.endswith(".swp"):
+ continue
+
+ if not access(join(path,f), X_OK):
+ self.logWarning(_("Script not executable:") + " %s/%s" % (folder, f))
+
+ self.scripts[folder].append(join(path, f))
+
+ def callScript(self, script, *args):
+ try:
+ cmd = [script] + [str(x) if not isinstance(x, basestring) else x for x in args]
+ #output goes to pyload
+ subprocess.Popen(cmd, bufsize=-1)
+ except Exception, e:
+ self.logError(_("Error in %(script)s: %(error)s") % { "script" :basename(script), "error": str(e)})
+
+ def downloadPreparing(self, pyfile):
+ for script in self.scripts['download_preparing']:
+ self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.id)
+
+ def downloadFinished(self, pyfile):
+ for script in self.scripts['download_finished']:
+ self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.name,
+ save_join(self.core.config['general']['download_folder'], pyfile.package().folder, pyfile.name),
+ pyfile.id)
+
+
+ def packageFinished(self, pypack):
+ for script in self.scripts['package_finished']:
+ folder = self.core.config['general']['download_folder']
+ folder = save_join(folder, pypack.folder)
+
+ self.callScript(script, pypack.name, folder, pypack.password, pypack.id)
+
+ def beforeReconnecting(self, ip):
+ for script in self.scripts['before_reconnect']:
+ self.callScript(script, ip)
+
+ def afterReconnecting(self, ip):
+ for script in self.scripts['after_reconnect']:
+ self.callScript(script, ip)
+
+ def unrarFinished(self, folder, fname):
+ for script in self.scripts["unrar_finished"]:
+ self.callScript(script, folder, fname)
+
+ def allDownloadsFinished(self):
+ for script in self.scripts["all_dls_finished"]:
+ self.callScript(script)
+
+ def allDownloadsProcessed(self):
+ for script in self.scripts["all_dls_processed"]:
+ self.callScript(script)
+
diff --git a/pyload/plugins/addons/ExtractArchive.py b/pyload/plugins/addons/ExtractArchive.py
new file mode 100644
index 000000000..d5609863e
--- /dev/null
+++ b/pyload/plugins/addons/ExtractArchive.py
@@ -0,0 +1,312 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from os.path import basename, isfile, isdir, join
+from traceback import print_exc
+from copy import copy
+
+# monkey patch bug in python 2.6 and lower
+# see http://bugs.python.org/issue6122
+# http://bugs.python.org/issue1236
+# http://bugs.python.org/issue1731717
+if sys.version_info < (2, 7) and os.name != "nt":
+ from subprocess import Popen
+ import errno
+
+ def _eintr_retry_call(func, *args):
+ while True:
+ try:
+ return func(*args)
+ except OSError, e:
+ if e.errno == errno.EINTR:
+ continue
+ raise
+
+ # unsued timeout option for older python version
+ def wait(self, timeout=0):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ try:
+ pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
+ except OSError, e:
+ if e.errno != errno.ECHILD:
+ raise
+ # This happens if SIGCLD is set to be ignored or waiting
+ # for child processes has otherwise been disabled for our
+ # process. This child is dead, we can't get the status.
+ sts = 0
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+ Popen.wait = wait
+
+if os.name != "nt":
+ from os import chown
+ from pwd import getpwnam
+ from grp import getgrnam
+
+from module.utils.fs import save_join, fs_encode, exists, remove, chmod, makedirs
+from module.plugins.Addon import Addon, threaded, Expose
+from module.plugins.internal.AbstractExtractor import ArchiveError, CRCError, WrongPassword
+
+class ExtractArchive(Addon):
+ """
+ Provides: unrarFinished (folder, filename)
+ """
+ __name__ = "ExtractArchive"
+ __version__ = "0.14"
+ __description__ = "Extract different kind of archives"
+ __config__ = [("activated", "bool", "Activated", True),
+ ("fullpath", "bool", "Extract full path", True),
+ ("overwrite", "bool", "Overwrite files", True),
+ ("passwordfile", "file", "password file", "unrar_passwords.txt"),
+ ("deletearchive", "bool", "Delete archives when done", False),
+ ("subfolder", "bool", "Create subfolder for each package", False),
+ ("destination", "folder", "Extract files to", ""),
+ ("recursive", "bool", "Extract archives in archvies", True),
+ ("queue", "bool", "Wait for all downloads to be finished", True),
+ ("renice", "int", "CPU Priority", 0), ]
+ __author_name__ = ("pyload Team")
+ __author_mail__ = ("admin<at>pyload.org")
+
+ event_list = ["allDownloadsProcessed"]
+
+ def setup(self):
+ self.plugins = []
+ self.passwords = []
+ names = []
+
+ for p in ("UnRar", "UnZip"):
+ try:
+ module = self.core.pluginManager.loadModule("internal", p)
+ klass = getattr(module, p)
+ if klass.checkDeps():
+ names.append(p)
+ self.plugins.append(klass)
+
+ except OSError, e:
+ if e.errno == 2:
+ self.logInfo(_("No %s installed") % p)
+ else:
+ self.logWarning(_("Could not activate %s") % p, str(e))
+ if self.core.debug:
+ print_exc()
+
+ except Exception, e:
+ self.logWarning(_("Could not activate %s") % p, str(e))
+ if self.core.debug:
+ print_exc()
+
+ if names:
+ self.logInfo(_("Activated") + " " + " ".join(names))
+ else:
+ self.logInfo(_("No Extract plugins activated"))
+
+ # queue with package ids
+ self.queue = []
+
+ @Expose
+ def extractPackage(self, id):
+ """ Extract package with given id"""
+ self.manager.startThread(self.extract, [id])
+
+ def packageFinished(self, pypack):
+ if self.getConfig("queue"):
+ self.logInfo(_("Package %s queued for later extracting") % pypack.name)
+ self.queue.append(pypack.id)
+ else:
+ self.manager.startThread(self.extract, [pypack.id])
+
+
+ @threaded
+ def allDownloadsProcessed(self, thread):
+ local = copy(self.queue)
+ del self.queue[:]
+ self.extract(local, thread)
+
+
+ def extract(self, ids, thread=None):
+ # reload from txt file
+ self.reloadPasswords()
+
+ # dl folder
+ dl = self.config['general']['download_folder']
+
+ extracted = []
+
+ #iterate packages -> plugins -> targets
+ for pid in ids:
+ p = self.core.files.getPackage(pid)
+ self.logInfo(_("Check package %s") % p.name)
+ if not p: continue
+
+ # determine output folder
+ out = save_join(dl, p.folder, "")
+ # force trailing slash
+
+ if self.getConfig("destination") and self.getConfig("destination").lower() != "none":
+
+ out = save_join(dl, p.folder, self.getConfig("destination"), "")
+ #relative to package folder if destination is relative, otherwise absolute path overwrites them
+
+ if self.getConfig("subfolder"):
+ out = join(out, fs_encode(p.folder))
+
+ if not exists(out):
+ makedirs(out)
+
+ files_ids = [(save_join(dl, p.folder, x["name"]), x["id"]) for x in p.getChildren().itervalues()]
+ matched = False
+
+ # check as long there are unseen files
+ while files_ids:
+ new_files_ids = []
+
+ for plugin in self.plugins:
+ targets = plugin.getTargets(files_ids)
+ if targets:
+ self.logDebug("Targets for %s: %s" % (plugin.__name__, targets))
+ matched = True
+ for target, fid in targets:
+ if target in extracted:
+ self.logDebug(basename(target), "skipped")
+ continue
+ extracted.append(target) #prevent extracting same file twice
+
+ klass = plugin(self, target, out, self.getConfig("fullpath"), self.getConfig("overwrite"),
+ self.getConfig("renice"))
+ klass.init()
+
+ self.logInfo(basename(target), _("Extract to %s") % out)
+ new_files = self.startExtracting(klass, fid, p.password.strip().splitlines(), thread)
+ self.logDebug("Extracted: %s" % new_files)
+ self.setPermissions(new_files)
+
+ for file in new_files:
+ if not exists(file):
+ self.logDebug("new file %s does not exists" % file)
+ continue
+ if self.getConfig("recursive") and isfile(file):
+ new_files_ids.append((file, fid)) #append as new target
+
+ files_ids = new_files_ids # also check extracted files
+
+ if not matched: self.logInfo(_("No files found to extract"))
+
+ def startExtracting(self, plugin, fid, passwords, thread):
+ pyfile = self.core.files.getFile(fid)
+ if not pyfile: return []
+
+ pyfile.setCustomStatus(_("extracting"))
+ thread.addActive(pyfile) #keep this file until everything is done
+
+ try:
+ progress = lambda x: pyfile.setProgress(x)
+ success = False
+
+ if not plugin.checkArchive():
+ plugin.extract(progress)
+ success = True
+ else:
+ self.logInfo(basename(plugin.file), _("Password protected"))
+ self.logDebug("Passwords: %s" % str(passwords))
+
+ pwlist = copy(self.getPasswords())
+ #remove already supplied pws from list (only local)
+ for pw in passwords:
+ if pw in pwlist: pwlist.remove(pw)
+
+ for pw in passwords + pwlist:
+ try:
+ self.logDebug("Try password: %s" % pw)
+ if plugin.checkPassword(pw):
+ plugin.extract(progress, pw)
+ self.addPassword(pw)
+ success = True
+ break
+ except WrongPassword:
+ self.logDebug("Password was wrong")
+
+ if not success:
+ self.logError(basename(plugin.file), _("Wrong password"))
+ return []
+
+ if self.core.debug:
+ self.logDebug("Would delete: %s" % ", ".join(plugin.getDeleteFiles()))
+
+ if self.getConfig("deletearchive"):
+ files = plugin.getDeleteFiles()
+ self.logInfo(_("Deleting %s files") % len(files))
+ for f in files:
+ if exists(f): remove(f)
+ else: self.logDebug("%s does not exists" % f)
+
+ self.logInfo(basename(plugin.file), _("Extracting finished"))
+ self.manager.dispatchEvent("unrarFinished", plugin.out, plugin.file)
+
+ return plugin.getExtractedFiles()
+
+
+ except ArchiveError, e:
+ self.logError(basename(plugin.file), _("Archive Error"), str(e))
+ except CRCError:
+ self.logError(basename(plugin.file), _("CRC Mismatch"))
+ except Exception, e:
+ if self.core.debug:
+ print_exc()
+ self.logError(basename(plugin.file), _("Unknown Error"), str(e))
+
+ return []
+
+ @Expose
+ def getPasswords(self):
+ """ List of saved passwords """
+ return self.passwords
+
+
+ def reloadPasswords(self):
+ pwfile = self.getConfig("passwordfile")
+ if not exists(pwfile):
+ open(pwfile, "wb").close()
+
+ passwords = []
+ f = open(pwfile, "rb")
+ for pw in f.read().splitlines():
+ passwords.append(pw)
+ f.close()
+
+ self.passwords = passwords
+
+
+ @Expose
+ def addPassword(self, pw):
+ """ Adds a password to saved list"""
+ pwfile = self.getConfig("passwordfile")
+
+ if pw in self.passwords: self.passwords.remove(pw)
+ self.passwords.insert(0, pw)
+
+ f = open(pwfile, "wb")
+ for pw in self.passwords:
+ f.write(pw + "\n")
+ f.close()
+
+ def setPermissions(self, files):
+ for f in files:
+ if not exists(f): continue
+ try:
+ if self.core.config["permission"]["change_file"]:
+ if isfile(f):
+ chmod(f, int(self.core.config["permission"]["file"], 8))
+ elif isdir(f):
+ chmod(f, int(self.core.config["permission"]["folder"], 8))
+
+ if self.core.config["permission"]["change_dl"] and os.name != "nt":
+ uid = getpwnam(self.config["permission"]["user"])[2]
+ gid = getgrnam(self.config["permission"]["group"])[2]
+ chown(f, uid, gid)
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed"), e)
diff --git a/pyload/plugins/addons/HotFolder.py b/pyload/plugins/addons/HotFolder.py
new file mode 100644
index 000000000..c3c285369
--- /dev/null
+++ b/pyload/plugins/addons/HotFolder.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+ @interface-version: 0.2
+"""
+
+from os import makedirs
+from os import listdir
+from os.path import exists
+from os.path import join
+from os.path import isfile
+from shutil import move
+import time
+
+from module.plugins.Addon import Addon
+
+class HotFolder(Addon):
+ __name__ = "HotFolder"
+ __version__ = "0.11"
+ __description__ = """observe folder and file for changes and add container and links"""
+ __config__ = [ ("activated", "bool", "Activated" , "False"),
+ ("folder", "str", "Folder to observe", "container"),
+ ("watch_file", "bool", "Observe link file", "False"),
+ ("keep", "bool", "Keep added containers", "True"),
+ ("file", "str", "Link file", "links.txt")]
+ __threaded__ = []
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.de")
+
+ def setup(self):
+ self.interval = 10
+
+ def periodical(self):
+
+ if not exists(join(self.getConfig("folder"), "finished")):
+ makedirs(join(self.getConfig("folder"), "finished"))
+
+ if self.getConfig("watch_file"):
+
+ if not exists(self.getConfig("file")):
+ f = open(self.getConfig("file"), "wb")
+ f.close()
+
+
+ f = open(self.getConfig("file"), "rb")
+ content = f.read().strip()
+ f.close()
+ f = open(self.getConfig("file"), "wb")
+ f.close()
+ if content:
+ name = "%s_%s.txt" % (self.getConfig("file"), time.strftime("%H-%M-%S_%d%b%Y") )
+
+ f = open(join(self.getConfig("folder"), "finished", name), "wb")
+ f.write(content)
+ f.close()
+
+ self.core.api.addPackage(f.name, [f.name], 1)
+
+ for f in listdir(self.getConfig("folder")):
+ path = join(self.getConfig("folder"), f)
+
+ if not isfile(path) or f.endswith("~") or f.startswith("#") or f.startswith("."):
+ continue
+
+ newpath = join(self.getConfig("folder"), "finished", f if self.getConfig("keep") else "tmp_"+f)
+ move(path, newpath)
+
+ self.logInfo(_("Added %s from HotFolder") % f)
+ self.core.api.addPackage(f, [newpath], 1)
+
+ \ No newline at end of file
diff --git a/pyload/plugins/addons/IRCInterface.py b/pyload/plugins/addons/IRCInterface.py
new file mode 100644
index 000000000..821f80b6c
--- /dev/null
+++ b/pyload/plugins/addons/IRCInterface.py
@@ -0,0 +1,431 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+ @author: jeix
+ @interface-version: 0.2
+"""
+
+from select import select
+import socket
+from threading import Thread
+import time
+from time import sleep
+from traceback import print_exc
+import re
+
+from module.plugins.Addon import Addon
+from module.network.RequestFactory import getURL
+from module.utils import formatSize
+from module.Api import PackageDoesNotExists, FileDoesNotExists
+
+from pycurl import FORM_FILE
+
+class IRCInterface(Thread, Addon):
+ __name__ = "IRCInterface"
+ __version__ = "0.11"
+ __description__ = """connect to irc and let owner perform different tasks"""
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("host", "str", "IRC-Server Address", "Enter your server here!"),
+ ("port", "int", "IRC-Server Port", "6667"),
+ ("ident", "str", "Clients ident", "pyload-irc"),
+ ("realname", "str", "Realname", "pyload-irc"),
+ ("nick", "str", "Nickname the Client will take", "pyLoad-IRC"),
+ ("owner", "str", "Nickname the Client will accept commands from", "Enter your nick here!"),
+ ("info_file", "bool", "Inform about every file finished", "False"),
+ ("info_pack", "bool", "Inform about every package finished", "True"),
+ ("captcha", "bool", "Send captcha requests", "True")]
+ __author_name__ = ("Jeix")
+ __author_mail__ = ("Jeix@hasnomail.com")
+
+ def __init__(self, core, manager):
+ Thread.__init__(self)
+ Addon.__init__(self, core, manager)
+ self.setDaemon(True)
+ # self.sm = core.server_methods
+ self.api = core.api #todo, only use api
+
+ def coreReady(self):
+ self.new_package = {}
+
+ self.abort = False
+
+ self.links_added = 0
+ self.more = []
+
+ self.start()
+
+
+ def packageFinished(self, pypack):
+ try:
+ if self.getConfig("info_pack"):
+ self.response(_("Package finished: %s") % pypack.name)
+ except:
+ pass
+
+ def downloadFinished(self, pyfile):
+ try:
+ if self.getConfig("info_file"):
+ self.response(_("Download finished: %(name)s @ %(plugin)s ") % { "name" : pyfile.name, "plugin": pyfile.pluginname} )
+ except:
+ pass
+
+ def newCaptchaTask(self, task):
+ if self.getConfig("captcha") and task.isTextual():
+ task.handler.append(self)
+ task.setWaiting(60)
+
+ page = getURL("http://www.freeimagehosting.net/upload.php", post={"attached" : (FORM_FILE, task.captchaFile)}, multipart=True)
+
+ url = re.search(r"\[img\]([^\[]+)\[/img\]\[/url\]", page).group(1)
+ self.response(_("New Captcha Request: %s") % url)
+ self.response(_("Answer with 'c %s text on the captcha'") % task.id)
+
+ def run(self):
+ # connect to IRC etc.
+ self.sock = socket.socket()
+ host = self.getConfig("host")
+ self.sock.connect((host, self.getConfig("port")))
+ nick = self.getConfig("nick")
+ self.sock.send("NICK %s\r\n" % nick)
+ self.sock.send("USER %s %s bla :%s\r\n" % (nick, host, nick))
+ for t in self.getConfig("owner").split():
+ if t.strip().startswith("#"):
+ self.sock.send("JOIN %s\r\n" % t.strip())
+ self.logInfo("pyLoad IRC: Connected to %s!" % host)
+ self.logInfo("pyLoad IRC: Switching to listening mode!")
+ try:
+ self.main_loop()
+
+ except IRCError, ex:
+ self.sock.send("QUIT :byebye\r\n")
+ print_exc()
+ self.sock.close()
+
+
+ def main_loop(self):
+ readbuffer = ""
+ while True:
+ sleep(1)
+ fdset = select([self.sock], [], [], 0)
+ if self.sock not in fdset[0]:
+ continue
+
+ if self.abort:
+ raise IRCError("quit")
+
+ readbuffer += self.sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ line = line.rstrip()
+ first = line.split()
+
+ if first[0] == "PING":
+ self.sock.send("PONG %s\r\n" % first[1])
+
+ if first[0] == "ERROR":
+ raise IRCError(line)
+
+ msg = line.split(None, 3)
+ if len(msg) < 4:
+ continue
+
+ msg = {
+ "origin":msg[0][1:],
+ "action":msg[1],
+ "target":msg[2],
+ "text":msg[3][1:]
+ }
+
+ self.handle_events(msg)
+
+
+ def handle_events(self, msg):
+ if not msg["origin"].split("!", 1)[0] in self.getConfig("owner").split():
+ return
+
+ if msg["target"].split("!", 1)[0] != self.getConfig("nick"):
+ return
+
+ if msg["action"] != "PRIVMSG":
+ return
+
+ # HANDLE CTCP ANTI FLOOD/BOT PROTECTION
+ if msg["text"] == "\x01VERSION\x01":
+ self.logDebug("Sending CTCP VERSION.")
+ self.sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
+ return
+ elif msg["text"] == "\x01TIME\x01":
+ self.logDebug("Sending CTCP TIME.")
+ self.sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
+ return
+ elif msg["text"] == "\x01LAG\x01":
+ self.logDebug("Received CTCP LAG.") # don't know how to answer
+ return
+
+ trigger = "pass"
+ args = None
+
+ try:
+ temp = msg["text"].split()
+ trigger = temp[0]
+ if len(temp) > 1:
+ args = temp[1:]
+ except:
+ pass
+
+ handler = getattr(self, "event_%s" % trigger, self.event_pass)
+ try:
+ res = handler(args)
+ for line in res:
+ self.response(line, msg["origin"])
+ except Exception, e:
+ self.logError("pyLoad IRC: "+ repr(e))
+
+
+ def response(self, msg, origin=""):
+ if origin == "":
+ for t in self.getConfig("owner").split():
+ self.sock.send("PRIVMSG %s :%s\r\n" % (t.strip(), msg))
+ else:
+ self.sock.send("PRIVMSG %s :%s\r\n" % (origin.split("!", 1)[0], msg))
+
+
+#### Events
+ def event_pass(self, args):
+ return []
+
+ def event_status(self, args):
+ downloads = self.api.statusDownloads()
+ if not downloads:
+ return ["INFO: There are no active downloads currently."]
+
+ temp_progress = ""
+ lines = ["ID - Name - Status - Speed - ETA - Progress"]
+ for data in downloads:
+
+ if data.status == 5:
+ temp_progress = data.format_wait
+ else:
+ temp_progress = "%d%% (%s)" % (data.percent, data.format_size)
+
+ lines.append("#%d - %s - %s - %s - %s - %s" %
+ (
+ data.fid,
+ data.name,
+ data.statusmsg,
+ "%s/s" % formatSize(data.speed),
+ "%s" % data.format_eta,
+ temp_progress
+ )
+ )
+ return lines
+
+ def event_queue(self, args):
+ ps = self.api.getQueueData()
+
+ if not ps:
+ return ["INFO: There are no packages in queue."]
+
+ lines = []
+ for pack in ps:
+ lines.append('PACKAGE #%s: "%s" with %d links.' % (pack.pid, pack.name, len(pack.links) ))
+
+ return lines
+
+ def event_collector(self, args):
+ ps = self.api.getCollectorData()
+ if not ps:
+ return ["INFO: No packages in collector!"]
+
+ lines = []
+ for pack in ps:
+ lines.append('PACKAGE #%s: "%s" with %d links.' % (pack.pid, pack.name, len(pack.links) ))
+
+ return lines
+
+ def event_info(self, args):
+ if not args:
+ return ['ERROR: Use info like this: info <id>']
+
+ info = None
+ try:
+ info = self.api.getFileData(int(args[0]))
+
+ except FileDoesNotExists:
+ return ["ERROR: Link doesn't exists."]
+
+ return ['LINK #%s: %s (%s) [%s][%s]' % (info.fid, info.name, info.format_size, info.statusmsg,
+ info.plugin)]
+
+ def event_packinfo(self, args):
+ if not args:
+ return ['ERROR: Use packinfo like this: packinfo <id>']
+
+ lines = []
+ pack = None
+ try:
+ pack = self.api.getPackageData(int(args[0]))
+
+ except PackageDoesNotExists:
+ return ["ERROR: Package doesn't exists."]
+
+ id = args[0]
+
+ self.more = []
+
+ lines.append('PACKAGE #%s: "%s" with %d links' % (id, pack.name, len(pack.links)) )
+ for pyfile in pack.links:
+ self.more.append('LINK #%s: %s (%s) [%s][%s]' % (pyfile.fid, pyfile.name, pyfile.format_size,
+ pyfile.statusmsg, pyfile.plugin))
+
+ if len(self.more) < 6:
+ lines.extend(self.more)
+ self.more = []
+ else:
+ lines.extend(self.more[:6])
+ self.more = self.more[6:]
+ lines.append("%d more links do display." % len(self.more))
+
+
+ return lines
+
+ def event_more(self, args):
+ if not self.more:
+ return ["No more information to display."]
+
+ lines = self.more[:6]
+ self.more = self.more[6:]
+ lines.append("%d more links do display." % len(self.more))
+
+ return lines
+
+ def event_start(self, args):
+
+ self.api.unpauseServer()
+ return ["INFO: Starting downloads."]
+
+ def event_stop(self, args):
+
+ self.api.pauseServer()
+ return ["INFO: No new downloads will be started."]
+
+
+ def event_add(self, args):
+ if len(args) < 2:
+ return ['ERROR: Add links like this: "add <packagename|id> links". ',
+ 'This will add the link <link> to to the package <package> / the package with id <id>!']
+
+
+
+ pack = args[0].strip()
+ links = [x.strip() for x in args[1:]]
+
+ count_added = 0
+ count_failed = 0
+ try:
+ id = int(pack)
+ pack = self.api.getPackageData(id)
+ if not pack:
+ return ["ERROR: Package doesn't exists."]
+
+ #TODO add links
+
+ return ["INFO: Added %d links to Package %s [#%d]" % (len(links), pack["name"], id)]
+
+ except:
+ # create new package
+ id = self.api.addPackage(pack, links, 1)
+ return ["INFO: Created new Package %s [#%d] with %d links." % (pack, id, len(links))]
+
+
+ def event_del(self, args):
+ if len(args) < 2:
+ return ["ERROR: Use del command like this: del -p|-l <id> [...] (-p indicates that the ids are from packages, -l indicates that the ids are from links)"]
+
+ if args[0] == "-p":
+ ret = self.api.deletePackages(map(int, args[1:]))
+ return ["INFO: Deleted %d packages!" % len(args[1:])]
+
+ elif args[0] == "-l":
+ ret = self.api.delLinks(map(int, args[1:]))
+ return ["INFO: Deleted %d links!" % len(args[1:])]
+
+ else:
+ return ["ERROR: Use del command like this: del <-p|-l> <id> [...] (-p indicates that the ids are from packages, -l indicates that the ids are from links)"]
+
+ def event_push(self, args):
+ if not args:
+ return ["ERROR: Push package to queue like this: push <package id>"]
+
+ id = int(args[0])
+ try:
+ info = self.api.getPackageInfo(id)
+ except PackageDoesNotExists:
+ return ["ERROR: Package #%d does not exist." % id]
+
+ self.api.pushToQueue(id)
+ return ["INFO: Pushed package #%d to queue." % id]
+
+ def event_pull(self, args):
+ if not args:
+ return ["ERROR: Pull package from queue like this: pull <package id>."]
+
+ id = int(args[0])
+ if not self.api.getPackageData(id):
+ return ["ERROR: Package #%d does not exist." % id]
+
+ self.api.pullFromQueue(id)
+ return ["INFO: Pulled package #%d from queue to collector." % id]
+
+ def event_c(self, args):
+ """ captcha answer """
+ if not args:
+ return ["ERROR: Captcha ID missing."]
+
+ task = self.core.captchaManager.getTaskByID(args[0])
+ if not task:
+ return ["ERROR: Captcha Task with ID %s does not exists." % args[0]]
+
+ task.setResult(" ".join(args[1:]))
+ return ["INFO: Result %s saved." % " ".join(args[1:])]
+
+
+ def event_help(self, args):
+ lines = ["The following commands are available:",
+ "add <package|packid> <links> [...] Adds link to package. (creates new package if it does not exist)",
+ "queue Shows all packages in the queue",
+ "collector Shows all packages in collector",
+ "del -p|-l <id> [...] Deletes all packages|links with the ids specified",
+ "info <id> Shows info of the link with id <id>",
+ "packinfo <id> Shows info of the package with id <id>",
+ "more Shows more info when the result was truncated",
+ "start Starts all downloads",
+ "stop Stops the download (but not abort active downloads)",
+ "push <id> Push package to queue",
+ "pull <id> Pull package from queue",
+ "status Show general download status",
+ "help Shows this help message"]
+ return lines
+
+
+class IRCError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
diff --git a/pyload/plugins/addons/ImageTyperz.py b/pyload/plugins/addons/ImageTyperz.py
new file mode 100644
index 000000000..f8f515113
--- /dev/null
+++ b/pyload/plugins/addons/ImageTyperz.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN, zoidberg
+"""
+from __future__ import with_statement
+
+from thread import start_new_thread
+from pycurl import FORM_FILE, LOW_SPEED_TIME
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+
+from module.plugins.Hook import Hook
+import re
+from base64 import b64encode
+
+class ImageTyperzException(Exception):
+ def __init__(self, err):
+ self.err = err
+
+ def getCode(self):
+ return self.err
+
+ def __str__(self):
+ return "<ImageTyperzException %s>" % self.err
+
+ def __repr__(self):
+ return "<ImageTyperzException %s>" % self.err
+
+class ImageTyperz(Hook):
+ __name__ = "ImageTyperz"
+ __version__ = "0.04"
+ __description__ = """send captchas to ImageTyperz.com"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("username", "str", "Username", ""),
+ ("passkey", "password", "Password", ""),
+ ("force", "bool", "Force IT even if client is connected", False)]
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ SUBMIT_URL = "http://captchatypers.com/Forms/UploadFileAndGetTextNEW.ashx"
+ RESPOND_URL = "http://captchatypers.com/Forms/SetBadImage.ashx"
+ GETCREDITS_URL = "http://captchatypers.com/Forms/RequestBalance.ashx"
+
+ def setup(self):
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.GETCREDITS_URL,
+ post = {"action": "REQUESTBALANCE",
+ "username": self.getConfig("username"),
+ "password": self.getConfig("passkey")}
+ )
+
+ if response.startswith('ERROR'):
+ raise ImageTyperzException(response)
+
+ try:
+ balance = float(response)
+ except:
+ raise ImageTyperzException("invalid response")
+
+ self.logInfo("Account balance: $%s left" % response)
+ return balance
+
+ def submit(self, captcha, captchaType="file", match=None):
+ req = getRequest()
+ #raise timeout threshold
+ req.c.setopt(LOW_SPEED_TIME, 80)
+
+ try:
+ #workaround multipart-post bug in HTTPRequest.py
+ if re.match("^[A-Za-z0-9]*$", self.getConfig("passkey")):
+ multipart = True
+ data = (FORM_FILE, captcha)
+ else:
+ multipart = False
+ with open(captcha, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+
+ response = req.load(self.SUBMIT_URL,
+ post={ "action": "UPLOADCAPTCHA",
+ "username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "file": data},
+ multipart = multipart)
+ finally:
+ req.close()
+
+ if response.startswith("ERROR"):
+ raise ImageTyperzException(response)
+ else:
+ data = response.split('|')
+ if len(data) == 2:
+ ticket, result = data
+ else:
+ raise ImageTyperzException("Unknown response %s" % response)
+
+ return ticket, result
+
+ def newCaptchaTask(self, task):
+ if "service" in task.data:
+ return False
+
+ if not task.isTextual():
+ return False
+
+ if not self.getConfig("username") or not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.data['service'] = self.__name__
+ task.setWaiting(100)
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logInfo("Your %s account has not enough credits" % self.__name__)
+
+ def captchaInvalid(self, task):
+ if task.data['service'] == self.__name__ and "ticket" in task.data:
+ response = getURL(self.RESPOND_URL,
+ post={"action": "SETBADIMAGE",
+ "username": self.getConfig("username"),
+ "password": self.getConfig("passkey"),
+ "imageid": task.data["ticket"]}
+ )
+
+ if response == "SUCCESS":
+ self.logInfo("Bad captcha solution received, requested refund")
+ else:
+ self.logError("Bad captcha solution received, refund request failed", response)
+
+ def processCaptcha(self, task):
+ c = task.captchaFile
+ try:
+ ticket, result = self.submit(c)
+ except ImageTyperzException, e:
+ task.error = e.getCode()
+ return
+
+ task.data["ticket"] = ticket
+ task.setResult(result) \ No newline at end of file
diff --git a/pyload/plugins/addons/LinkdecrypterCom.py b/pyload/plugins/addons/LinkdecrypterCom.py
new file mode 100644
index 000000000..c117cafb9
--- /dev/null
+++ b/pyload/plugins/addons/LinkdecrypterCom.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+
+from module.plugins.Hook import Hook
+from module.network.RequestFactory import getURL
+from module.utils import remove_chars
+
+class LinkdecrypterCom(Hook):
+ __name__ = "LinkdecrypterCom"
+ __version__ = "0.18"
+ __description__ = """linkdecrypter.com - regexp loader"""
+ __config__ = [ ("activated", "bool", "Activated" , "False") ]
+ __author_name__ = ("zoidberg")
+
+ def coreReady(self):
+ page = getURL("http://linkdecrypter.com/")
+ m = re.search(r'<b>Supported\(\d+\)</b>: <i>([^+<]*)', page)
+ if not m:
+ self.logError(_("Crypter list not found"))
+ return
+
+ builtin = [ name.lower() for name in self.core.pluginManager.crypterPlugins.keys() ]
+ builtin.extend([ "downloadserienjunkiesorg" ])
+
+ crypter_pattern = re.compile("(\w[\w.-]+)")
+ online = []
+ for crypter in m.group(1).split(', '):
+ m = re.match(crypter_pattern, crypter)
+ if m and remove_chars(m.group(1), "-.") not in builtin:
+ online.append(m.group(1).replace(".", "\\."))
+
+ if not online:
+ self.logError(_("Crypter list is empty"))
+ return
+
+ regexp = r"https?://([^.]+\.)*?(%s)/.*" % "|".join(online)
+
+ dict = self.core.pluginManager.crypterPlugins[self.__name__]
+ dict["pattern"] = regexp
+ dict["re"] = re.compile(regexp)
+
+ self.logDebug("REGEXP: " + regexp)
diff --git a/pyload/plugins/addons/MergeFiles.py b/pyload/plugins/addons/MergeFiles.py
new file mode 100644
index 000000000..8d7f8aef1
--- /dev/null
+++ b/pyload/plugins/addons/MergeFiles.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: and9000
+"""
+
+import os
+import re
+import sys
+import traceback
+
+from os.path import join
+from module.utils import save_join, fs_encode
+from module.plugins.Addon import Addon
+
+BUFFER_SIZE = 4096
+
+class MergeFiles(Addon):
+ __name__ = "MergeFiles"
+ __version__ = "0.11"
+ __description__ = "Merges parts splitted with hjsplit"
+ __config__ = [
+ ("activated" , "bool" , "Activated" , "False"),
+ ]
+ __threaded__ = ["packageFinished"]
+ __author_name__ = ("and9000")
+ __author_mail__ = ("me@has-no-mail.com")
+
+ def setup(self):
+ # nothing to do
+ pass
+
+ def packageFinished(self, pack):
+ files = {}
+ fid_dict = {}
+ for fid, data in pack.getChildren().iteritems():
+ if re.search("\.[0-9]{3}$", data["name"]):
+ if data["name"][:-4] not in files:
+ files[data["name"][:-4]] = []
+ files[data["name"][:-4]].append(data["name"])
+ files[data["name"][:-4]].sort()
+ fid_dict[data["name"]] = fid
+
+ download_folder = self.core.config['general']['download_folder']
+
+ if self.core.config['general']['folder_per_package']:
+ download_folder = save_join(download_folder, pack.folder)
+
+ for name, file_list in files.iteritems():
+ self.logInfo("Starting merging of %s" % name)
+ final_file = open(join(download_folder, fs_encode(name)), "wb")
+
+ for splitted_file in file_list:
+ self.logDebug("Merging part %s" % splitted_file)
+ pyfile = self.core.files.getFile(fid_dict[splitted_file])
+ pyfile.setStatus("processing")
+ try:
+ s_file = open(os.path.join(download_folder, splitted_file), "rb")
+ size_written = 0
+ s_file_size = int(os.path.getsize(os.path.join(download_folder, splitted_file)))
+ while True:
+ f_buffer = s_file.read(BUFFER_SIZE)
+ if f_buffer:
+ final_file.write(f_buffer)
+ size_written += BUFFER_SIZE
+ pyfile.setProgress((size_written*100)/s_file_size)
+ else:
+ break
+ s_file.close()
+ self.logDebug("Finished merging part %s" % splitted_file)
+ except Exception, e:
+ print traceback.print_exc()
+ finally:
+ pyfile.setProgress(100)
+ pyfile.setStatus("finished")
+ pyfile.release()
+
+ final_file.close()
+ self.logInfo("Finished merging of %s" % name)
+
+
diff --git a/pyload/plugins/addons/MultiDebridCom.py b/pyload/plugins/addons/MultiDebridCom.py
new file mode 100644
index 000000000..c95138648
--- /dev/null
+++ b/pyload/plugins/addons/MultiDebridCom.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.MultiHoster import MultiHoster
+from module.network.RequestFactory import getURL
+from module.common.json_layer import json_loads
+
+
+class MultiDebridCom(MultiHoster):
+ __name__ = "MultiDebridCom"
+ __version__ = "0.01"
+ __type__ = "hook"
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __description__ = """Multi-debrid.com hook plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def getHoster(self):
+ json_data = getURL('http://multi-debrid.com/api.php?hosts', decode=True)
+ self.logDebug('JSON data: ' + json_data)
+ json_data = json_loads(json_data)
+
+ return json_data['hosts']
diff --git a/pyload/plugins/addons/MultiHome.py b/pyload/plugins/addons/MultiHome.py
new file mode 100644
index 000000000..e38ce047a
--- /dev/null
+++ b/pyload/plugins/addons/MultiHome.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from module.plugins.Addon import Addon
+from time import time
+
+class MultiHome(Addon):
+ __name__ = "MultiHome"
+ __version__ = "0.11"
+ __description__ = """ip address changer"""
+ __config__ = [ ("activated", "bool", "Activated" , "False"),
+ ("interfaces", "str", "Interfaces" , "None") ]
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def setup(self):
+ self.register = {}
+ self.interfaces = []
+ self.parseInterfaces(self.getConfig("interfaces").split(";"))
+ if not self.interfaces:
+ self.parseInterfaces([self.config["download"]["interface"]])
+ self.setConfig("interfaces", self.toConfig())
+
+ def toConfig(self):
+ return ";".join([i.adress for i in self.interfaces])
+
+ def parseInterfaces(self, interfaces):
+ for interface in interfaces:
+ if not interface or str(interface).lower() == "none":
+ continue
+ self.interfaces.append(Interface(interface))
+
+ def coreReady(self):
+ requestFactory = self.core.requestFactory
+ oldGetRequest = requestFactory.getRequest
+ def getRequest(pluginName, account=None):
+ iface = self.bestInterface(pluginName, account)
+ if iface:
+ iface.useFor(pluginName, account)
+ requestFactory.iface = lambda: iface.adress
+ self.logDebug("Multihome: using address: "+iface.adress)
+ return oldGetRequest(pluginName, account)
+ requestFactory.getRequest = getRequest
+
+ def bestInterface(self, pluginName, account):
+ best = None
+ for interface in self.interfaces:
+ if not best or interface.lastPluginAccess(pluginName, account) < best.lastPluginAccess(pluginName, account):
+ best = interface
+ return best
+
+class Interface(object):
+ def __init__(self, adress):
+ self.adress = adress
+ self.history = {}
+
+ def lastPluginAccess(self, pluginName, account):
+ if (pluginName, account) in self.history:
+ return self.history[(pluginName, account)]
+ return 0
+
+ def useFor(self, pluginName, account):
+ self.history[(pluginName, account)] = time()
+
+ def __repr__(self):
+ return "<Interface - %s>" % self.adress
diff --git a/pyload/plugins/addons/MultiHoster.py b/pyload/plugins/addons/MultiHoster.py
new file mode 100644
index 000000000..329a87e4a
--- /dev/null
+++ b/pyload/plugins/addons/MultiHoster.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from types import MethodType
+
+from pyload.plugins.MultiHoster import MultiHoster as MultiHosterAccount, normalize
+from pyload.plugins.Addon import Addon, AddEventListener
+from pyload.PluginManager import PluginTuple
+
+
+class MultiHoster(Addon):
+ __version__ = "0.1"
+ __internal__ = True
+ __description__ = "Gives ability to use MultiHoster services."
+ __config__ = []
+ __author_mail__ = ("pyLoad Team",)
+ __author_mail__ = ("support@pyload.org",)
+
+ #TODO: multiple accounts - multihoster / config options
+
+ def init(self):
+
+ # overwritten plugins
+ self.plugins = {}
+
+ def addHoster(self, account):
+
+ self.logInfo(_("Activated %s") % account.__name__)
+
+ pluginMap = {}
+ for name in self.core.pluginManager.getPlugins("hoster").keys():
+ pluginMap[name.lower()] = name
+
+ supported = []
+ new_supported = []
+
+ for hoster in account.getHosterList():
+ name = normalize(hoster)
+
+ if name in pluginMap:
+ supported.append(pluginMap[name])
+ else:
+ new_supported.append(hoster)
+
+ if not supported and not new_supported:
+ account.logError(_("No Hoster loaded"))
+ return
+
+ klass = self.core.pluginManager.getPluginClass(account.__name__)
+
+ # inject plugin plugin
+ account.logDebug("Overwritten Hosters: %s" % ", ".join(sorted(supported)))
+ for hoster in supported:
+ self.plugins[hoster] = klass
+
+ account.logDebug("New Hosters: %s" % ", ".join(sorted(new_supported)))
+
+ # create new regexp
+ patterns = [x.replace(".", "\\.") for x in new_supported]
+
+ if klass.__pattern__:
+ patterns.append(klass.__pattern__)
+
+ regexp = r".*(%s).*" % "|".join(patterns)
+
+ # recreate plugin tuple for new regexp
+ hoster = self.core.pluginManager.getPlugins("hoster")
+ p = hoster[account.__name__]
+ new = PluginTuple(p.version, re.compile(regexp), p.deps, p.category, p.user, p.path)
+ hoster[account.__name__] = new
+
+
+ @AddEventListener("account:deleted")
+ def refreshAccounts(self, plugin=None, user=None):
+ self.logDebug("Re-checking accounts")
+
+ self.plugins = {}
+ for name, account in self.core.accountManager.iterAccounts():
+ if isinstance(account, MultiHosterAccount) and account.isUsable():
+ self.addHoster(account)
+
+ @AddEventListener("account:updated")
+ def refreshAccount(self, plugin, user):
+
+ account = self.core.accountManager.getAccount(plugin, user)
+ if isinstance(account, MultiHosterAccount) and account.isUsable():
+ self.addHoster(account)
+
+ def activate(self):
+ self.refreshAccounts()
+
+ # new method for plugin manager
+ def getPlugin(self2, name):
+ if name in self.plugins:
+ return self.plugins[name]
+ return self2.getPluginClass(name)
+
+ pm = self.core.pluginManager
+ pm.getPlugin = MethodType(getPlugin, pm, object)
+
+ def deactivate(self):
+ #restore state
+ pm = self.core.pluginManager
+ pm.getPlugin = pm.getPluginClass
+
diff --git a/pyload/plugins/addons/MultishareCz.py b/pyload/plugins/addons/MultishareCz.py
new file mode 100644
index 000000000..7e5a3e007
--- /dev/null
+++ b/pyload/plugins/addons/MultishareCz.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+import re
+
+class MultishareCz(MultiHoster):
+ __name__ = "MultishareCz"
+ __version__ = "0.04"
+ __type__ = "hook"
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "uloz.to")]
+ __description__ = """MultiShare.cz hook plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ HOSTER_PATTERN = r'<img class="logo-shareserveru"[^>]*?alt="([^"]+)"></td>\s*<td class="stav">[^>]*?alt="OK"'
+
+ def getHoster(self):
+
+ page = getURL("http://www.multishare.cz/monitoring/")
+ return re.findall(self.HOSTER_PATTERN, page) \ No newline at end of file
diff --git a/pyload/plugins/addons/Premium4Me.py b/pyload/plugins/addons/Premium4Me.py
new file mode 100644
index 000000000..edbdfbdb9
--- /dev/null
+++ b/pyload/plugins/addons/Premium4Me.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+
+class Premium4Me(MultiHoster):
+ __name__ = "Premium4Me"
+ __version__ = "0.03"
+ __type__ = "hook"
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for downloads from supported hosters:", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+ __description__ = """Premium.to hook plugin"""
+ __author_name__ = ("RaNaN", "zoidberg", "stickell")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ def getHoster(self):
+
+ page = getURL("http://premium.to/api/hosters.php?authcode=%s" % self.account.authcode)
+ return [x.strip() for x in page.replace("\"", "").split(";")]
+
+ def coreReady(self):
+
+ self.account = self.core.accountManager.getAccountPlugin("Premium4Me")
+
+ user = self.account.selectAccount()[0]
+
+ if not user:
+ self.logError(_("Please add your premium.to account first and restart pyLoad"))
+ return
+
+ return MultiHoster.coreReady(self) \ No newline at end of file
diff --git a/pyload/plugins/addons/PremiumizeMe.py b/pyload/plugins/addons/PremiumizeMe.py
new file mode 100644
index 000000000..a10c24f85
--- /dev/null
+++ b/pyload/plugins/addons/PremiumizeMe.py
@@ -0,0 +1,50 @@
+from module.plugins.internal.MultiHoster import MultiHoster
+
+from module.common.json_layer import json_loads
+from module.network.RequestFactory import getURL
+
+class PremiumizeMe(MultiHoster):
+ __name__ = "PremiumizeMe"
+ __version__ = "0.12"
+ __type__ = "hook"
+ __description__ = """Premiumize.Me hook plugin"""
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __author_name__ = ("Florian Franzen")
+ __author_mail__ = ("FlorianFranzen@gmail.com")
+
+ def getHoster(self):
+ # If no accounts are available there will be no hosters available
+ if not self.account or not self.account.canUse():
+ return []
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ # Get supported hosters list from premiumize.me using the json API v1 (see https://secure.premiumize.me/?show=api)
+ answer = getURL("https://api.premiumize.me/pm-api/v1.php?method=hosterlist&params[login]=%s&params[pass]=%s" % (user, data['password']))
+ data = json_loads(answer)
+
+
+ # If account is not valid thera are no hosters available
+ if data['status'] != 200:
+ return []
+
+ # Extract hosters from json file
+ return data['result']['hosterlist']
+
+ def coreReady(self):
+ # Get account plugin and check if there is a valid account available
+ self.account = self.core.accountManager.getAccountPlugin("PremiumizeMe")
+ if not self.account.canUse():
+ self.account = None
+ self.logError(_("Please add a valid premiumize.me account first and restart pyLoad."))
+ return
+
+ # Run the overwriten core ready which actually enables the multihoster hook
+ return MultiHoster.coreReady(self) \ No newline at end of file
diff --git a/pyload/plugins/addons/RealdebridCom.py b/pyload/plugins/addons/RealdebridCom.py
new file mode 100644
index 000000000..be74b47c3
--- /dev/null
+++ b/pyload/plugins/addons/RealdebridCom.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+
+class RealdebridCom(MultiHoster):
+ __name__ = "RealdebridCom"
+ __version__ = "0.43"
+ __type__ = "hook"
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("https", "bool", "Enable HTTPS", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+ __description__ = """Real-Debrid.com hook plugin"""
+ __author_name__ = ("Devirex, Hazzard")
+ __author_mail__ = ("naibaf_11@yahoo.de")
+
+ def getHoster(self):
+ https = "https" if self.getConfig("https") else "http"
+ page = getURL(https + "://real-debrid.com/api/hosters.php").replace("\"","").strip()
+
+ return [x.strip() for x in page.split(",") if x.strip()]
diff --git a/pyload/plugins/addons/RehostTo.py b/pyload/plugins/addons/RehostTo.py
new file mode 100644
index 000000000..cd9f7ccef
--- /dev/null
+++ b/pyload/plugins/addons/RehostTo.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+
+class RehostTo(MultiHoster):
+ __name__ = "RehostTo"
+ __version__ = "0.43"
+ __type__ = "hook"
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to stanard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24")]
+
+ __description__ = """rehost.to hook plugin"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ def getHoster(self):
+
+ page = getURL("http://rehost.to/api.php?cmd=get_supported_och_dl&long_ses=%s" % self.long_ses)
+ return [x.strip() for x in page.replace("\"", "").split(",")]
+
+
+ def coreReady(self):
+
+ self.account = self.core.accountManager.getAccountPlugin("RehostTo")
+
+ user = self.account.selectAccount()[0]
+
+ if not user:
+ self.logError("Rehost.to: "+ _("Please add your rehost.to account first and restart pyLoad"))
+ return
+
+ data = self.account.getAccountInfo(user)
+ self.ses = data["ses"]
+ self.long_ses = data["long_ses"]
+
+ return MultiHoster.coreReady(self) \ No newline at end of file
diff --git a/pyload/plugins/addons/RestartFailed.py b/pyload/plugins/addons/RestartFailed.py
new file mode 100644
index 000000000..c78ccf96c
--- /dev/null
+++ b/pyload/plugins/addons/RestartFailed.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Hook import Hook
+
+class RestartFailed(Hook):
+ __name__ = "RestartFailed"
+ __version__ = "1.52"
+ __description__ = "restartedFailed Packages after defined time"
+ __config__ = [("activated", "bool", "Activated" , "False"),
+ ("interval", "int", "Interval in Minutes", "15") ]
+
+ __author_name__ = ("bambie")
+ __author_mail__ = ("bambie@gulli.com")
+
+ interval = 300
+
+ def setup(self):
+ self.info = {"running": False}
+
+ def coreReady(self):
+ self.info["running"] = True
+ self.logInfo("loaded")
+ self.interval = self.getConfig("interval") * 60
+ self.logDebug("interval is set to %s" % self.interval)
+
+ def periodical(self):
+ self.logDebug("periodical called")
+ if self.getConfig("interval") * 60 != self.interval:
+ self.interval = self.getConfig("interval") * 60
+ self.logDebug("interval is set to %s" % self.interval)
+ self.core.api.restartFailed()
diff --git a/pyload/plugins/addons/SkipRev.py b/pyload/plugins/addons/SkipRev.py
new file mode 100644
index 000000000..561329122
--- /dev/null
+++ b/pyload/plugins/addons/SkipRev.py
@@ -0,0 +1,46 @@
+ # -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: Walter Purcaro
+"""
+
+from module.plugins.Hook import Hook
+from os.path import basename
+from re import search
+
+
+class SkipRev(Hook):
+ __name__ = "SkipRev"
+ __version__ = "0.07"
+ __description__ = "Skip download when filename has rev extension"
+ __config__ = [
+ ("activated", "bool", "Activated", "False"),
+ ("number", "int", "Do not skip until rev part", "1")
+ ]
+ __author_name__ = ("Walter Purcaro")
+ __author_mail__ = ("vuolter@gmail.com")
+
+ def downloadPreparing(self, pyfile):
+ # self.logDebug("self.downloadPreparing")
+ name = basename(pyfile.name)
+ if not name.endswith(".rev"):
+ return
+ number = self.getConfig("number")
+ part = search(r'\.part(\d+)\.rev$', name)
+ if not part or int(part.group(1)) <= number:
+ return
+ self.logInfo("Skipping " + name)
+ pyfile.setStatus("skipped")
diff --git a/pyload/plugins/addons/UnSkipOnFail.py b/pyload/plugins/addons/UnSkipOnFail.py
new file mode 100644
index 000000000..4b7a58be8
--- /dev/null
+++ b/pyload/plugins/addons/UnSkipOnFail.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: hgg
+"""
+from os.path import basename
+
+from module.utils import fs_encode
+from module.plugins.Hook import Hook
+from module.PyFile import PyFile
+
+class UnSkipOnFail(Hook):
+ __name__ = 'UnSkipOnFail'
+ __version__ = '0.01'
+ __description__ = 'When a download fails, restart "skipped" duplicates.'
+ __config__ = [('activated', 'bool', 'Activated', True),]
+ __author_name__ = ('hagg',)
+ __author_mail__ = ('')
+
+ def downloadFailed(self, pyfile):
+ pyfile_name = basename(pyfile.name)
+ pid = pyfile.package().id
+ msg = 'look for skipped duplicates for %s (pid:%s)...'
+ self.logInfo(msg % (pyfile_name, pid))
+ dups = self.findDuplicates(pyfile)
+ for link in dups:
+ # check if link is "skipped"(=4)
+ if link.status == 4:
+ lpid = link.packageID
+ self.logInfo('restart "%s" (pid:%s)...' % (pyfile_name, lpid))
+ self.setLinkStatus(link, "queued")
+
+ def findDuplicates(self, pyfile):
+ """ Search all packages for duplicate links to "pyfile".
+ Duplicates are links that would overwrite "pyfile".
+ To test on duplicity the package-folder and link-name
+ of twolinks are compared (basename(link.name)).
+ So this method returns a list of all links with equal
+ package-folders and filenames as "pyfile", but except
+ the data for "pyfile" iotselöf.
+ It does MOT check the link's status.
+ """
+ dups = []
+ pyfile_name = fs_encode(basename(pyfile.name))
+ # get packages (w/o files, as most file data is useless here)
+ queue = self.core.api.getQueue()
+ for package in queue:
+ # check if package-folder equals pyfile's package folder
+ if fs_encode(package.folder) == fs_encode(pyfile.package().folder):
+ # now get packaged data w/ files/links
+ pdata = self.core.api.getPackageData(package.pid)
+ if pdata.links:
+ for link in pdata.links:
+ link_name = fs_encode(basename(link.name))
+ # check if link name collides with pdata's name
+ if link_name == pyfile_name:
+ # at last check if it is not pyfile itself
+ if link.fid != pyfile.id:
+ dups.append(link)
+ return dups
+
+ def setLinkStatus(self, link, new_status):
+ """ Change status of "link" to "new_status".
+ "link" has to be a valid FileData object,
+ "new_status" has to be a valid status name
+ (i.e. "queued" for this Plugin)
+ It creates a temporary PyFile object using
+ "link" data, changes its status, and tells
+ the core.files-manager to save its data.
+ """
+ pyfile = PyFile(self.core.files,
+ link.fid,
+ link.url,
+ link.name,
+ link.size,
+ link.status,
+ link.error,
+ link.plugin,
+ link.packageID,
+ link.order)
+ pyfile.setStatus(new_status)
+ self.core.files.save()
+ pyfile.release()
+
diff --git a/pyload/plugins/addons/UnrestrictLi.py b/pyload/plugins/addons/UnrestrictLi.py
new file mode 100644
index 000000000..0810a22d5
--- /dev/null
+++ b/pyload/plugins/addons/UnrestrictLi.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.MultiHoster import MultiHoster
+from module.network.RequestFactory import getURL
+from module.common.json_layer import json_loads
+
+
+class UnrestrictLi(MultiHoster):
+ __name__ = "UnrestrictLi"
+ __version__ = "0.02"
+ __type__ = "hook"
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", ""),
+ ("unloadFailing", "bool", "Revert to standard download if download fails", "False"),
+ ("interval", "int", "Reload interval in hours (0 to disable)", "24"),
+ ("history", "bool", "Delete History", "False")]
+
+ __description__ = """Unrestrict.li hook plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def getHoster(self):
+ json_data = getURL('http://unrestrict.li/api/jdownloader/hosts.php?format=json')
+ json_data = json_loads(json_data)
+
+ host_list = [element['host'] for element in json_data['result']]
+
+ return host_list
diff --git a/pyload/plugins/addons/UpdateManager.py b/pyload/plugins/addons/UpdateManager.py
new file mode 100644
index 000000000..b30289287
--- /dev/null
+++ b/pyload/plugins/addons/UpdateManager.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+import sys
+import re
+from os import stat
+from os.path import join, exists
+from time import time
+
+from module.ConfigParser import IGNORE
+from module.network.RequestFactory import getURL
+from module.plugins.Hook import threaded, Expose, Hook
+
+class UpdateManager(Hook):
+ __name__ = "UpdateManager"
+ __version__ = "0.15"
+ __description__ = """checks for updates"""
+ __config__ = [("activated", "bool", "Activated", "True"),
+ ("interval", "int", "Check interval in minutes", "480"),
+ ("debug", "bool", "Check for plugin changes when in debug mode", False)]
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("ranan@pyload.org")
+
+ URL = "http://get.pyload.org/check2/%s/"
+ MIN_TIME = 3 * 60 * 60 # 3h minimum check interval
+
+ @property
+ def debug(self):
+ return self.core.debug and self.getConfig("debug")
+
+ def setup(self):
+ if self.debug:
+ self.logDebug("Monitoring file changes")
+ self.interval = 4
+ self.last_check = 0 #timestamp of updatecheck
+ self.old_periodical = self.periodical
+ self.periodical = self.checkChanges
+ self.mtimes = {} #recordes times
+ else:
+ self.interval = max(self.getConfig("interval") * 60, self.MIN_TIME)
+
+ self.updated = False
+ self.reloaded = True
+ self.version = "None"
+
+ self.info = {"pyload": False, "plugins": False}
+
+ @threaded
+ def periodical(self):
+
+ updates = self.checkForUpdate()
+ if updates:
+ self.checkPlugins(updates)
+
+ if self.updated and not self.reloaded:
+ self.info["plugins"] = True
+ self.logInfo(_("*** Plugins have been updated, please restart pyLoad ***"))
+ elif self.updated and self.reloaded:
+ self.logInfo(_("Plugins updated and reloaded"))
+ self.updated = False
+ elif self.version == "None":
+ self.logInfo(_("No plugin updates available"))
+
+ @Expose
+ def recheckForUpdates(self):
+ """recheck if updates are available"""
+ self.periodical()
+
+ def checkForUpdate(self):
+ """checks if an update is available, return result"""
+
+ try:
+ if self.version == "None": # No updated known
+ version_check = getURL(self.URL % self.core.api.getServerVersion()).splitlines()
+ self.version = version_check[0]
+
+ # Still no updates, plugins will be checked
+ if self.version == "None":
+ self.logInfo(_("No Updates for pyLoad"))
+ return version_check[1:]
+
+
+ self.info["pyload"] = True
+ self.logInfo(_("*** New pyLoad Version %s available ***") % self.version)
+ self.logInfo(_("*** Get it here: http://pyload.org/download ***"))
+
+ except:
+ self.logWarning(_("Not able to connect server for updates"))
+
+ return None # Nothing will be done
+
+
+ def checkPlugins(self, updates):
+ """ checks for plugins updates"""
+
+ # plugins were already updated
+ if self.info["plugins"]: return
+
+ reloads = []
+
+ vre = re.compile(r'__version__.*=.*("|\')([0-9.]+)')
+ url = updates[0]
+ schema = updates[1].split("|")
+ updates = updates[2:]
+
+ for plugin in updates:
+ info = dict(zip(schema, plugin.split("|")))
+ filename = info["name"]
+ prefix = info["type"]
+ version = info["version"]
+
+ if filename.endswith(".pyc"):
+ name = filename[:filename.find("_")]
+ else:
+ name = filename.replace(".py", "")
+
+ #TODO: obsolete in 0.5.0
+ if prefix.endswith("s"):
+ type = prefix[:-1]
+ else:
+ type = prefix
+
+ plugins = getattr(self.core.pluginManager, "%sPlugins" % type)
+
+ if name in plugins:
+ if float(plugins[name]["v"]) >= float(version):
+ continue
+
+ if name in IGNORE or (type, name) in IGNORE:
+ continue
+
+ self.logInfo(_("New version of %(type)s|%(name)s : %(version).2f") % {
+ "type": type,
+ "name": name,
+ "version": float(version)
+ })
+
+ try:
+ content = getURL(url % info)
+ except Exception, e:
+ self.logWarning(_("Error when updating %s") % filename, str(e))
+ continue
+
+ m = vre.search(content)
+ if not m or m.group(2) != version:
+ self.logWarning(_("Error when updating %s") % name, _("Version mismatch"))
+ continue
+
+ f = open(join("userplugins", prefix, filename), "wb")
+ f.write(content)
+ f.close()
+ self.updated = True
+
+ reloads.append((prefix, name))
+
+ self.reloaded = self.core.pluginManager.reloadPlugins(reloads)
+
+ def checkChanges(self):
+
+ if self.last_check + max(self.getConfig("interval") * 60, self.MIN_TIME) < time():
+ self.old_periodical()
+ self.last_check = time()
+
+ modules = filter(
+ lambda m: m and (m.__name__.startswith("module.plugins.") or m.__name__.startswith("userplugins.")) and m.__name__.count(".") >= 2,
+ sys.modules.itervalues())
+
+ reloads = []
+
+ for m in modules:
+ root, type, name = m.__name__.rsplit(".", 2)
+ id = (type, name)
+ if type in self.core.pluginManager.plugins:
+ f = m.__file__.replace(".pyc", ".py")
+ if not exists(f): continue
+
+ mtime = stat(f).st_mtime
+
+ if id not in self.mtimes:
+ self.mtimes[id] = mtime
+ elif self.mtimes[id] < mtime:
+ reloads.append(id)
+ self.mtimes[id] = mtime
+
+ self.core.pluginManager.reloadPlugins(reloads)
diff --git a/pyload/plugins/addons/XFileSharingPro.py b/pyload/plugins/addons/XFileSharingPro.py
new file mode 100644
index 000000000..f14ba7eb7
--- /dev/null
+++ b/pyload/plugins/addons/XFileSharingPro.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Hook import Hook
+import re
+
+class XFileSharingPro(Hook):
+ __name__ = "XFileSharingPro"
+ __version__ = "0.05"
+ __type__ = "hook"
+ __config__ = [ ("activated" , "bool" , "Activated" , "True"),
+ ("loadDefault", "bool", "Include default (built-in) hoster list" , "True"),
+ ("includeList", "str", "Include hosters (comma separated)", ""),
+ ("excludeList", "str", "Exclude hosters (comma separated)", "") ]
+ __description__ = """Hoster URL pattern loader for the generic XFileSharingPro plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def coreReady(self):
+ self.loadPattern()
+
+ def loadPattern(self):
+ hosterList = self.getConfigSet('includeList')
+ excludeList = self.getConfigSet('excludeList')
+
+ if self.getConfig('loadDefault'):
+ hosterList |= set((
+ #WORKING HOSTERS:
+ "aieshare.com", "asixfiles.com", "banashare.com", "cyberlocker.ch", "eyesfile.co", "eyesfile.com",
+ "fileband.com", "filedwon.com", "filedownloads.org", "hipfile.com", "kingsupload.com", "mlfat4arab.com",
+ "netuploaded.com", "odsiebie.pl", "q4share.com", "ravishare.com", "uptobox.com", "verzend.be", "xvidstage.com",
+ #NOT TESTED:
+ "bebasupload.com", "boosterking.com", "divxme.com", "filevelocity.com", "glumbouploads.com", "grupload.com", "heftyfile.com",
+ "host4desi.com", "laoupload.com", "linkzhost.com", "movreel.com", "rockdizfile.com", "limfile.com"
+ "share76.com", "sharebeast.com", "sharehut.com", "sharerun.com", "shareswift.com", "sharingonline.com", "6ybh-upload.com",
+ "skipfile.com", "spaadyshare.com", "space4file.com", "uploadbaz.com", "uploadc.com",
+ "uploaddot.com", "uploadfloor.com", "uploadic.com", "uploadville.com", "vidbull.com", "zalaa.com",
+ "zomgupload.com", "kupload.org", "movbay.org", "multishare.org", "omegave.org", "toucansharing.org", "uflinq.org",
+ "banicrazy.info", "flowhot.info", "upbrasil.info", "shareyourfilez.biz", "bzlink.us", "cloudcache.cc", "fileserver.cc"
+ "farshare.to", "filemaze.ws", "filehost.ws", "filestock.ru", "moidisk.ru", "4up.im", "100shared.com",
+ #WRONG FILE NAME:
+ "sendmyway.com", "upchi.co.il",
+ #NOT WORKING:
+ "amonshare.com", "imageporter.com", "file4safe.com",
+ #DOWN OR BROKEN:
+ "ddlanime.com", "fileforth.com", "loombo.com", "goldfile.eu", "putshare.com"
+ ))
+
+ hosterList -= (excludeList)
+ hosterList -= set(('', u''))
+
+ if not hosterList:
+ self.unload()
+ return
+
+ regexp = r"http://(?:[^/]*\.)?(%s)/\w{12}" % ("|".join(sorted(hosterList)).replace('.','\.'))
+ #self.logDebug(regexp)
+
+ dict = self.core.pluginManager.hosterPlugins['XFileSharingPro']
+ dict["pattern"] = regexp
+ dict["re"] = re.compile(regexp)
+ self.logDebug("Pattern loaded - handling %d hosters" % len(hosterList))
+
+ def getConfigSet(self, option):
+ s = self.getConfig(option).lower().replace('|',',').replace(';',',')
+ return set([x.strip() for x in s.split(',')])
+
+ def unload(self):
+ dict = self.core.pluginManager.hosterPlugins['XFileSharingPro']
+ dict["pattern"] = r"^unmatchable$"
+ dict["re"] = re.compile(r"^unmatchable$")
diff --git a/pyload/plugins/addons/XMPPInterface.py b/pyload/plugins/addons/XMPPInterface.py
new file mode 100644
index 000000000..40454f1c3
--- /dev/null
+++ b/pyload/plugins/addons/XMPPInterface.py
@@ -0,0 +1,276 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+ @interface-version: 0.2
+"""
+
+from pyxmpp import streamtls
+from pyxmpp.all import JID, Message, Presence
+from pyxmpp.jabber.client import JabberClient
+from pyxmpp.interface import implements
+from pyxmpp.interfaces import *
+
+from module.plugins.addons.IRCInterface import IRCInterface
+
+class XMPPInterface(IRCInterface, JabberClient):
+ __name__ = "XMPPInterface"
+ __version__ = "0.11"
+ __description__ = """connect to jabber and let owner perform different tasks"""
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("jid", "str", "Jabber ID", "user@exmaple-jabber-server.org"),
+ ("pw", "str", "Password", ""),
+ ("tls", "bool", "Use TLS", False),
+ ("owners", "str", "List of JIDs accepting commands from", "me@icq-gateway.org;some@msn-gateway.org"),
+ ("info_file", "bool", "Inform about every file finished", "False"),
+ ("info_pack", "bool", "Inform about every package finished", "True"),
+ ("captcha", "bool", "Send captcha requests", "True")]
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ implements(IMessageHandlersProvider)
+
+ def __init__(self, core, manager):
+ IRCInterface.__init__(self, core, manager)
+
+ self.jid = JID(self.getConfig("jid"))
+ password = self.getConfig("pw")
+
+ # if bare JID is provided add a resource -- it is required
+ if not self.jid.resource:
+ self.jid = JID(self.jid.node, self.jid.domain, "pyLoad")
+
+ if self.getConfig("tls"):
+ tls_settings = streamtls.TLSSettings(require=True, verify_peer=False)
+ auth = ("sasl:PLAIN", "sasl:DIGEST-MD5")
+ else:
+ tls_settings = None
+ auth = ("sasl:DIGEST-MD5", "digest")
+
+ # setup client with provided connection information
+ # and identity data
+ JabberClient.__init__(self, self.jid, password,
+ disco_name="pyLoad XMPP Client", disco_type="bot",
+ tls_settings=tls_settings, auth_methods=auth)
+
+ self.interface_providers = [
+ VersionHandler(self),
+ self,
+ ]
+
+ def coreReady(self):
+ self.new_package = {}
+
+ self.start()
+
+ def packageFinished(self, pypack):
+ try:
+ if self.getConfig("info_pack"):
+ self.announce(_("Package finished: %s") % pypack.name)
+ except:
+ pass
+
+ def downloadFinished(self, pyfile):
+ try:
+ if self.getConfig("info_file"):
+ self.announce(
+ _("Download finished: %(name)s @ %(plugin)s") % {"name": pyfile.name, "plugin": pyfile.pluginname})
+ except:
+ pass
+
+ def run(self):
+ # connect to IRC etc.
+ self.connect()
+ try:
+ self.loop()
+ except Exception, ex:
+ self.logError("pyLoad XMPP: %s" % str(ex))
+
+ def stream_state_changed(self, state, arg):
+ """This one is called when the state of stream connecting the component
+ to a server changes. This will usually be used to let the user
+ know what is going on."""
+ self.logDebug("pyLoad XMPP: *** State changed: %s %r ***" % (state, arg))
+
+ def disconnected(self):
+ self.logDebug("pyLoad XMPP: Client was disconnected")
+
+ def stream_closed(self, stream):
+ self.logDebug("pyLoad XMPP: Stream was closed | %s" % stream)
+
+ def stream_error(self, err):
+ self.logDebug("pyLoad XMPP: Stream Error: %s" % err)
+
+ def get_message_handlers(self):
+ """Return list of (message_type, message_handler) tuples.
+
+ The handlers returned will be called when matching message is received
+ in a client session."""
+ return [
+ ("normal", self.message),
+ ]
+
+ def presence_control(self, stanza):
+ from_jid = unicode(stanza.get_from_jid())
+ stanza_type = stanza.get_type()
+ self.log.debug("pyLoad XMPP: %s stanza from %s" % (stanza_type,
+ from_jid))
+
+ if from_jid in self.getConfig("owners"):
+ return stanza.make_accept_response()
+
+ return stanza.make_deny_response()
+
+ def session_started(self):
+ self.stream.send(Presence())
+
+ self.stream.set_presence_handler("subscribe", self.presence_control)
+ self.stream.set_presence_handler("subscribed", self.presence_control)
+ self.stream.set_presence_handler("unsubscribe", self.presence_control)
+ self.stream.set_presence_handler("unsubscribed", self.presence_control)
+
+ def message(self, stanza):
+ """Message handler for the component."""
+ subject = stanza.get_subject()
+ body = stanza.get_body()
+ t = stanza.get_type()
+ self.logDebug(u'pyLoad XMPP: Message from %s received.' % (unicode(stanza.get_from(), )))
+ self.logDebug(u'pyLoad XMPP: Body: %s Subject: %s Type: %s' % (body, subject, t))
+
+ if t == "headline":
+ # 'headline' messages should never be replied to
+ return True
+ if subject:
+ subject = u"Re: " + subject
+
+ to_jid = stanza.get_from()
+ from_jid = stanza.get_to()
+
+ #j = JID()
+ to_name = to_jid.as_utf8()
+ from_name = from_jid.as_utf8()
+
+ names = self.getConfig("owners").split(";")
+
+ if to_name in names or to_jid.node + "@" + to_jid.domain in names:
+ messages = []
+
+ trigger = "pass"
+ args = None
+
+ try:
+ temp = body.split()
+ trigger = temp[0]
+ if len(temp) > 1:
+ args = temp[1:]
+ except:
+ pass
+
+ handler = getattr(self, "event_%s" % trigger, self.event_pass)
+ try:
+ res = handler(args)
+ for line in res:
+ m = Message(
+ to_jid=to_jid,
+ from_jid=from_jid,
+ stanza_type=stanza.get_type(),
+ subject=subject,
+ body=line)
+
+ messages.append(m)
+ except Exception, e:
+ self.logError("pyLoad XMPP: " + repr(e))
+
+ return messages
+
+ else:
+ return True
+
+ def response(self, msg, origin=""):
+ return self.announce(msg)
+
+ def announce(self, message):
+ """ send message to all owners"""
+ for user in self.getConfig("owners").split(";"):
+ self.logDebug("pyLoad XMPP: Send message to %s" % user)
+
+ to_jid = JID(user)
+
+ m = Message(from_jid=self.jid,
+ to_jid=to_jid,
+ stanza_type="chat",
+ body=message)
+
+ stream = self.get_stream()
+ if not stream:
+ self.connect()
+ stream = self.get_stream()
+
+ stream.send(m)
+
+ def beforeReconnecting(self, ip):
+ self.disconnect()
+
+ def afterReconnecting(self, ip):
+ self.connect()
+
+
+class VersionHandler(object):
+ """Provides handler for a version query.
+
+ This class will answer version query and announce 'jabber:iq:version' namespace
+ in the client's disco#info results."""
+
+ implements(IIqHandlersProvider, IFeaturesProvider)
+
+ def __init__(self, client):
+ """Just remember who created this."""
+ self.client = client
+
+ def get_features(self):
+ """Return namespace which should the client include in its reply to a
+ disco#info query."""
+ return ["jabber:iq:version"]
+
+ def get_iq_get_handlers(self):
+ """Return list of tuples (element_name, namespace, handler) describing
+ handlers of <iq type='get'/> stanzas"""
+ return [
+ ("query", "jabber:iq:version", self.get_version),
+ ]
+
+ def get_iq_set_handlers(self):
+ """Return empty list, as this class provides no <iq type='set'/> stanza handler."""
+ return []
+
+ def get_version(self, iq):
+ """Handler for jabber:iq:version queries.
+
+ jabber:iq:version queries are not supported directly by PyXMPP, so the
+ XML node is accessed directly through the libxml2 API. This should be
+ used very carefully!"""
+ iq = iq.make_result_response()
+ q = iq.new_query("jabber:iq:version")
+ q.newTextChild(q.ns(), "name", "Echo component")
+ q.newTextChild(q.ns(), "version", "1.0")
+ return iq
+
+ def unload(self):
+ self.log.debug("pyLoad XMPP: unloading")
+ self.disconnect()
+
+ def deactivate(self):
+ self.unload()
diff --git a/pyload/plugins/addons/ZeveraCom.py b/pyload/plugins/addons/ZeveraCom.py
new file mode 100644
index 000000000..cadf60069
--- /dev/null
+++ b/pyload/plugins/addons/ZeveraCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.MultiHoster import MultiHoster
+
+class ZeveraCom(MultiHoster):
+ __name__ = "ZeveraCom"
+ __version__ = "0.02"
+ __type__ = "hook"
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+ __description__ = """Real-Debrid.com hook plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def getHoster(self):
+ page = getURL("http://www.zevera.com/jDownloader.ashx?cmd=gethosters")
+ return [x.strip() for x in page.replace("\"", "").split(",")] \ No newline at end of file
diff --git a/pyload/plugins/addons/__init__.py b/pyload/plugins/addons/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/addons/__init__.py
diff --git a/pyload/plugins/container/DLC_25.pyc b/pyload/plugins/container/DLC_25.pyc
new file mode 100644
index 000000000..b8fde0051
--- /dev/null
+++ b/pyload/plugins/container/DLC_25.pyc
Binary files differ
diff --git a/pyload/plugins/container/DLC_26.pyc b/pyload/plugins/container/DLC_26.pyc
new file mode 100644
index 000000000..41a4e0cb8
--- /dev/null
+++ b/pyload/plugins/container/DLC_26.pyc
Binary files differ
diff --git a/pyload/plugins/container/DLC_27.pyc b/pyload/plugins/container/DLC_27.pyc
new file mode 100644
index 000000000..a6bffaf74
--- /dev/null
+++ b/pyload/plugins/container/DLC_27.pyc
Binary files differ
diff --git a/pyload/plugins/crypter/BitshareComFolder.py b/pyload/plugins/crypter/BitshareComFolder.py
new file mode 100644
index 000000000..b77ddb9d9
--- /dev/null
+++ b/pyload/plugins/crypter/BitshareComFolder.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class BitshareComFolder(SimpleCrypter):
+ __name__ = "BitshareComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?bitshare\.com/\?d=\w+"
+ __version__ = "0.01"
+ __description__ = """Bitshare.com Folder Plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'<a href="(http://bitshare.com/files/.+)">.+</a></td>'
+ TITLE_PATTERN = r'View public folder "(?P<title>.+)"</h1>'
diff --git a/pyload/plugins/crypter/C1neonCom.py b/pyload/plugins/crypter/C1neonCom.py
new file mode 100644
index 000000000..36b84764e
--- /dev/null
+++ b/pyload/plugins/crypter/C1neonCom.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: godofdream
+"""
+
+import re
+import random
+from module.plugins.Crypter import Crypter
+from module.common.json_layer import json_loads
+class C1neonCom(Crypter):
+ __name__ = "C1neonCom"
+ __type__ = "container"
+ __pattern__ = r"http://(www\.)?c1neon.com/.*?"
+ __version__ = "0.05"
+ __config__ = [
+ ("changeNameS", "Packagename;Show;Season;Episode", "Rename Show by", "Show"),
+ ("changeName", "Packagename;Movie", "Rename Movie by", "Movie"),
+ ("useStreams", "bool", "Use Streams too", False),
+ ("hosterListMode", "all;onlypreferred", "Use for hosters (if supported)", "all"),
+ ("randomPreferred", "bool", "Randomize Preferred-List", False),
+ ("hosterList", "str", "Preferred Hoster list (comma separated, no ending)", "2shared,Bayfiles,Netload,Rapidshare,Share-online"),
+ ("ignoreList", "str", "Ignored Hoster list (comma separated, no ending)", "Megaupload")
+ ]
+ __description__ = """C1neon.Com Container Plugin"""
+ __author_name__ = ("godofdream")
+ __author_mail__ = ("soilfiction@gmail.com")
+
+ VALUES_PATTERN = r"var subcats = (.*?)(;</script>|;var)"
+ SHOW_PATTERN = r"title='(.*?)'"
+ SERIE_PATTERN = r"<title>.*Serie.*</title>"
+
+ def decrypt(self, pyfile):
+ src = self.req.load(str(pyfile.url))
+
+ pattern = re.compile(self.VALUES_PATTERN, re.DOTALL)
+ data = json_loads(re.search(pattern, src).group(1))
+
+ # Get package info
+ links = []
+ Showname = re.search(self.SHOW_PATTERN, src)
+ if Showname:
+ Showname = Showname.group(1).decode("utf-8")
+ else:
+ Showname = self.pyfile.package().name
+
+ if re.search(self.SERIE_PATTERN, src):
+ for Season in data:
+ self.logDebug("Season " + Season)
+ for Episode in data[Season]:
+ self.logDebug("Episode " + Episode)
+ links.extend(self.getpreferred(data[Season][Episode]))
+ if self.getConfig("changeNameS") == "Episode":
+ self.packages.append((data[Season][Episode]['info']['name'].split("»")[0], links, data[Season][Episode]['info']['name'].split("»")[0]))
+ links = []
+
+ if self.getConfig("changeNameS") == "Season":
+ self.packages.append((Showname + " Season " + Season, links, Showname + " Season " + Season))
+ links = []
+
+ if self.getConfig("changeNameS") == "Show":
+ if links == []:
+ self.fail('Could not extract any links (Out of Date?)')
+ else:
+ self.packages.append((Showname, links, Showname))
+
+ elif self.getConfig("changeNameS") == "Packagename":
+ if links == []:
+ self.fail('Could not extract any links (Out of Date?)')
+ else:
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ else:
+ for Movie in data:
+ links.extend(self.getpreferred(data[Movie]))
+ if self.getConfig("changeName") == "Movie":
+ if links == []:
+ self.fail('Could not extract any links (Out of Date?)')
+ else:
+ self.packages.append((Showname, links, Showname))
+
+ elif self.getConfig("changeName") == "Packagename":
+ if links == []:
+ self.fail('Could not extract any links (Out of Date?)')
+ else:
+ self.core.files.addLinks(links, self.pyfile.package().id)
+
+ #selects the preferred hoster, after that selects any hoster (ignoring the one to ignore)
+ #selects only one Hoster
+ def getpreferred(self, hosterslist):
+ hosterlist = {}
+ if 'u' in hosterslist:
+ hosterlist.update(hosterslist['u'])
+ if ('d' in hosterslist):
+ hosterlist.update(hosterslist['d'])
+ if self.getConfig("useStreams") and 's' in hosterslist:
+ hosterlist.update(hosterslist['s'])
+
+ result = []
+ preferredList = self.getConfig("hosterList").strip().lower().replace('|',',').replace('.','').replace(';',',').split(',')
+ if self.getConfig("randomPreferred") == True:
+ random.shuffle(preferredList)
+ for preferred in preferredList:
+ for Hoster in hosterlist:
+ if preferred == Hoster.split('<')[0].strip().lower().replace('.',''):
+ for Part in hosterlist[Hoster]:
+ self.logDebug("selected " + Part[3])
+ result.append(str(Part[3]))
+ return result
+
+ ignorelist = self.getConfig("ignoreList").strip().lower().replace('|',',').replace('.','').replace(';',',').split(',')
+ if self.getConfig('hosterListMode') == "all":
+ for Hoster in hosterlist:
+ if Hoster.split('<')[0].strip().lower().replace('.','') not in ignorelist:
+ for Part in hosterlist[Hoster]:
+ self.logDebug("selected " + Part[3])
+ result.append(str(Part[3]))
+ return result
+ return result
+
+
+
diff --git a/pyload/plugins/crypter/CCF.py b/pyload/plugins/crypter/CCF.py
new file mode 100644
index 000000000..ab7ff1099
--- /dev/null
+++ b/pyload/plugins/crypter/CCF.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from urllib2 import build_opener
+
+from module.plugins.Crypter import Crypter
+from module.lib.MultipartPostHandler import MultipartPostHandler
+
+from os import makedirs
+from os.path import exists, join
+
+class CCF(Crypter):
+ __name__ = "CCF"
+ __version__ = "0.2"
+ __pattern__ = r"(?!http://).*\.ccf$"
+ __description__ = """CCF Container Convert Plugin"""
+ __author_name__ = ("Willnix")
+ __author_mail__ = ("Willnix@pyload.org")
+
+ def decrypt(self, pyfile):
+
+ infile = pyfile.url.replace("\n", "")
+
+ opener = build_opener(MultipartPostHandler)
+ params = {"src": "ccf",
+ "filename": "test.ccf",
+ "upload": open(infile, "rb")}
+ tempdlc_content = opener.open('http://service.jdownloader.net/dlcrypt/getDLC.php', params).read()
+
+ download_folder = self.config['general']['download_folder']
+ location = download_folder #join(download_folder, self.pyfile.package().folder.decode(sys.getfilesystemencoding()))
+ if not exists(location):
+ makedirs(location)
+
+ tempdlc_name = join(location, "tmp_%s.dlc" % pyfile.name)
+ tempdlc = open(tempdlc_name, "w")
+ tempdlc.write(re.search(r'<dlc>(.*)</dlc>', tempdlc_content, re.DOTALL).group(1))
+ tempdlc.close()
+
+ self.packages.append((tempdlc_name, [tempdlc_name], tempdlc_name))
+
diff --git a/pyload/plugins/crypter/CrockoComFolder.py b/pyload/plugins/crypter/CrockoComFolder.py
new file mode 100644
index 000000000..d727ec7ab
--- /dev/null
+++ b/pyload/plugins/crypter/CrockoComFolder.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+class CrockoComFolder(SimpleCrypter):
+ __name__ = "CrockoComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?crocko.com/f/.*"
+ __version__ = "0.01"
+ __description__ = """Crocko.com Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<td class="last"><a href="([^"]+)">download</a>' \ No newline at end of file
diff --git a/pyload/plugins/crypter/CryptItCom.py b/pyload/plugins/crypter/CryptItCom.py
new file mode 100644
index 000000000..4935758c7
--- /dev/null
+++ b/pyload/plugins/crypter/CryptItCom.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+
+import re
+
+from random import randint
+
+from module.plugins.Crypter import Crypter
+
+
+class CryptItCom(Crypter):
+ __name__ = "CryptItCom"
+ __type__ = "container"
+ __pattern__ = r"http://[\w\.]*?crypt-it\.com/(s|e|d|c)/[\w]+"
+ __version__ = "0.1"
+ __description__ = """Crypt.It.com Container Plugin"""
+ __author_name__ = ("jeix")
+ __author_mail__ = ("jeix@hasnomail.de")
+
+ def file_exists(self):
+ html = self.load(self.pyfile.url)
+ if r'<div class="folder">Was ist Crypt-It</div>' in html:
+ return False
+ return True
+
+ def decrypt(self, pyfile):
+ if not self.file_exists():
+ self.offline()
+
+ # @TODO parse name and password
+ repl_pattern = r"/(s|e|d|c)/"
+ url = re.sub(repl_pattern, r"/d/", self.pyfile.url)
+
+ pyfile.name = "tmp_cryptit_%s.ccf" % randint(0,1000)
+ location = self.download(url)
+
+ self.packages.append(["Crypt-it Package", [location], "Crypt-it Package"])
+ \ No newline at end of file
diff --git a/pyload/plugins/crypter/CzshareComFolder.py b/pyload/plugins/crypter/CzshareComFolder.py
new file mode 100644
index 000000000..c240c6a70
--- /dev/null
+++ b/pyload/plugins/crypter/CzshareComFolder.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class CzshareComFolder(Crypter):
+ __name__ = "CzshareComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(\w*\.)*czshare\.(com|cz)/folders/.*"
+ __version__ = "0.1"
+ __description__ = """Czshare.com Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FOLDER_PATTERN = r'<tr class="subdirectory">\s*<td>\s*<table>(.*?)</table>'
+ LINK_PATTERN = r'<td class="col2"><a href="([^"]+)">info</a></td>'
+ #NEXT_PAGE_PATTERN = r'<a class="next " href="/([^"]+)">&nbsp;</a>'
+
+ def decrypt(self, pyfile):
+ html = self.load(self.pyfile.url)
+
+ new_links = []
+ found = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if found is None: self.fail("Parse error (FOLDER)")
+ new_links.extend(re.findall(self.LINK_PATTERN, found.group(1)))
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/DDLMusicOrg.py b/pyload/plugins/crypter/DDLMusicOrg.py
new file mode 100644
index 000000000..f7cc996d0
--- /dev/null
+++ b/pyload/plugins/crypter/DDLMusicOrg.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from time import sleep
+
+from module.plugins.Crypter import Crypter
+
+class DDLMusicOrg(Crypter):
+ __name__ = "DDLMusicOrg"
+ __type__ = "container"
+ __pattern__ = r"http://[\w\.]*?ddl-music\.org/captcha/ddlm_cr\d\.php\?\d+\?\d+"
+ __version__ = "0.3"
+ __description__ = """ddl-music.org Container Plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def setup(self):
+ self.multiDL = False
+
+ def decrypt(self, pyfile):
+ html = self.req.load(self.pyfile.url, cookies=True)
+
+ if re.search(r"Wer dies nicht rechnen kann", html) is not None:
+ self.offline()
+
+ math = re.search(r"(\d+) ([\+-]) (\d+) =\s+<inp", self.html)
+ id = re.search(r"name=\"id\" value=\"(\d+)\"", self.html).group(1)
+ linknr = re.search(r"name=\"linknr\" value=\"(\d+)\"", self.html).group(1)
+
+ solve = ""
+ if math.group(2) == "+":
+ solve = int(math.group(1)) + int(math.group(3))
+ else:
+ solve = int(math.group(1)) - int(math.group(3))
+ sleep(3)
+ htmlwithlink = self.req.load(self.pyfile.url, cookies=True, post={"calc%s" % linknr:solve, "send%s" % linknr:"Send", "id":id, "linknr":linknr})
+ m = re.search(r"<form id=\"ff\" action=\"(.*?)\" method=\"post\">", htmlwithlink)
+ if m:
+ self.packages.append((self.pyfile.package().name, [m.group(1)], self.pyfile.package().folder))
+ else:
+ self.retry()
diff --git a/pyload/plugins/crypter/DataHuFolder.py b/pyload/plugins/crypter/DataHuFolder.py
new file mode 100644
index 000000000..f710f60d7
--- /dev/null
+++ b/pyload/plugins/crypter/DataHuFolder.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+import re
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DataHuFolder(SimpleCrypter):
+ __name__ = "DataHuFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?data.hu/dir/\w+"
+ __version__ = "0.03"
+ __description__ = """Data.hu Folder Plugin"""
+ __author_name__ = ("crash", "stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ LINK_PATTERN = r"<a href='(http://data\.hu/get/.+)' target='_blank'>\1</a>"
+ TITLE_PATTERN = ur'<title>(?P<title>.+) Let\xf6lt\xe9se</title>'
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ if u'K\xe9rlek add meg a jelsz\xf3t' in self.html: # Password protected
+ password = self.getPassword()
+ if password is '':
+ self.fail("No password specified, please set right password on Add package form and retry")
+ self.logDebug('The folder is password protected', 'Using password: ' + password)
+ self.html = self.load(pyfile.url, post={'mappa_pass': password}, decode=True)
+ if u'Hib\xe1s jelsz\xf3' in self.html: # Wrong password
+ self.fail("Incorrect password, please set right password on Add package form and retry")
+
+ package_name, folder_name = self.getPackageNameAndFolder()
+
+ package_links = re.findall(self.LINK_PATTERN, self.html)
+ self.logDebug('Package has %d links' % len(package_links))
+
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/DdlstorageComFolder.py b/pyload/plugins/crypter/DdlstorageComFolder.py
new file mode 100644
index 000000000..d76988c92
--- /dev/null
+++ b/pyload/plugins/crypter/DdlstorageComFolder.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class DdlstorageComFolder(SimpleCrypter):
+ __name__ = "DdlstorageComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:\w*\.)*?ddlstorage.com/folder/\w{10}"
+ __version__ = "0.02"
+ __description__ = """DDLStorage.com Folder Plugin"""
+ __author_name__ = ("godofdream", "stickell")
+ __author_mail__ = ("soilfiction@gmail.com", "l.stickell@yahoo.it")
+
+ LINK_PATTERN = '<a class="sub_title" style="text-decoration:none;" href="(http://www.ddlstorage.com/.*)">'
diff --git a/pyload/plugins/crypter/DepositfilesComFolder.py b/pyload/plugins/crypter/DepositfilesComFolder.py
new file mode 100644
index 000000000..9023b238f
--- /dev/null
+++ b/pyload/plugins/crypter/DepositfilesComFolder.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+class DepositfilesComFolder(SimpleCrypter):
+ __name__ = "DepositfilesComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?depositfiles.com/folders/\w+"
+ __version__ = "0.01"
+ __description__ = """Depositfiles.com Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<div class="progressName"[^>]*>\s*<a href="([^"]+)" title="[^"]*" target="_blank">' \ No newline at end of file
diff --git a/pyload/plugins/crypter/Dereferer.py b/pyload/plugins/crypter/Dereferer.py
new file mode 100644
index 000000000..584835e18
--- /dev/null
+++ b/pyload/plugins/crypter/Dereferer.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+"""
+
+import re
+import urllib
+
+from module.plugins.Crypter import Crypter
+
+class Dereferer(Crypter):
+ __name__ = "Dereferer"
+ __type__ = "crypter"
+ __pattern__ = r'https?://([^/]+)/.*?(?P<url>(ht|f)tps?(://|%3A%2F%2F).*)'
+ __version__ = "0.1"
+ __description__ = """Crypter for dereferers"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def decrypt(self, pyfile):
+ link = re.match(self.__pattern__, self.pyfile.url).group('url')
+ self.core.files.addLinks([ urllib.unquote(link).rstrip('+') ], self.pyfile.package().id)
diff --git a/pyload/plugins/crypter/DontKnowMe.py b/pyload/plugins/crypter/DontKnowMe.py
new file mode 100644
index 000000000..dfa72df47
--- /dev/null
+++ b/pyload/plugins/crypter/DontKnowMe.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+import re
+import urllib
+
+from module.plugins.Crypter import Crypter
+
+class DontKnowMe(Crypter):
+ __name__ = "DontKnowMe"
+ __type__ = "crypter"
+ __pattern__ = r"http://dontknow.me/at/\?.+$"
+ __version__ = "0.1"
+ __description__ = """DontKnowMe"""
+ __author_name__ = ("selaux")
+ __author_mail__ = ("")
+
+ LINK_PATTERN = r"http://dontknow.me/at/\?(.+)$"
+
+ def decrypt(self, pyfile):
+ link = re.findall(self.LINK_PATTERN, self.pyfile.url)[0]
+ self.core.files.addLinks([ urllib.unquote(link) ], self.pyfile.package().id)
diff --git a/pyload/plugins/crypter/DownloadVimeoCom.py b/pyload/plugins/crypter/DownloadVimeoCom.py
new file mode 100644
index 000000000..88310915b
--- /dev/null
+++ b/pyload/plugins/crypter/DownloadVimeoCom.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import HTMLParser
+from module.plugins.Crypter import Crypter
+
+class DownloadVimeoCom(Crypter):
+ __name__ = 'DownloadVimeoCom'
+ __type__ = 'crypter'
+ __pattern__ = r'(?:http://vimeo\.com/\d*|http://smotri\.com/video/view/\?id=.*)'
+ ## The download from dailymotion failed with a 403
+ __version__ = '0.1'
+ __description__ = """Video Download Plugin based on downloadvimeo.com"""
+ __author_name__ = ('4Christopher')
+ __author_mail__ = ('4Christopher@gmx.de')
+ BASE_URL = 'http://downloadvimeo.com'
+
+ def decrypt(self, pyfile):
+ self.package = pyfile.package()
+ html = self.load('%s/generate?url=%s' % (self.BASE_URL, pyfile.url))
+ h = HTMLParser.HTMLParser()
+ try:
+ f = re.search(r'cmd quality="(?P<quality>[^"]+?)">\s*?(?P<URL>[^<]*?)</cmd>', html)
+ except:
+ self.logDebug('Failed to find the URL')
+ else:
+ url = h.unescape(f.group('URL'))
+ self.logDebug('Quality: %s, URL: %s' % (f.group('quality'), url))
+ self.packages.append((self.package.name, [url], self.package.folder))
diff --git a/pyload/plugins/crypter/DuckCryptInfo.py b/pyload/plugins/crypter/DuckCryptInfo.py
new file mode 100644
index 000000000..4886d24db
--- /dev/null
+++ b/pyload/plugins/crypter/DuckCryptInfo.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.lib.BeautifulSoup import BeautifulSoup
+from module.plugins.Crypter import Crypter
+
+class DuckCryptInfo(Crypter):
+ __name__ = "DuckCryptInfo"
+ __type__ = "container"
+ __pattern__ = r"http://(?:www\.)?duckcrypt.info/(folder|wait|link)/(\w+)/?(\w*)"
+ __version__ = "0.02"
+ __description__ = """DuckCrypt.Info Container Plugin"""
+ __author_name__ = ("godofdream")
+ __author_mail__ = ("soilfiction@gmail.com")
+
+ TIMER_PATTERN = r'<span id="timer">(.*)</span>'
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+ # seems we don't need to wait
+ #src = self.req.load(str(url))
+ #found = re.search(self.TIMER_PATTERN, src)
+ #if found:
+ # self.logDebug("Sleeping for" % found.group(1))
+ # self.setWait(int(found.group(1)) ,False)
+ found = re.search(self.__pattern__, url)
+ if not found:
+ self.fail('Weird error in link')
+ if str(found.group(1)) == "link":
+ self.handleLink(url)
+ else:
+ self.handleFolder(found)
+
+
+
+ def handleFolder(self, found):
+ src = self.load("http://duckcrypt.info/ajax/auth.php?hash=" + str(found.group(2)))
+ found = re.search(self.__pattern__, src)
+ self.logDebug("Redirectet to " + str(found.group(0)))
+ src = self.load(str(found.group(0)))
+ soup = BeautifulSoup(src)
+ cryptlinks = soup.findAll("div", attrs={"class": "folderbox"})
+ self.logDebug("Redirectet to " + str(cryptlinks))
+ if not cryptlinks:
+ self.fail('no links found - (Plugin out of date?)')
+ for clink in cryptlinks:
+ if clink.find("a"):
+ self.handleLink(clink.find("a")['href'])
+
+ def handleLink(self, url):
+ src = self.load(url)
+ soup = BeautifulSoup(src)
+ link = soup.find("iframe")["src"]
+ if not link:
+ self.logDebug('no links found - (Plugin out of date?)')
+ else:
+ self.core.files.addLinks([link], self.pyfile.package().id)
+
diff --git a/pyload/plugins/crypter/EasybytezComFolder.py b/pyload/plugins/crypter/EasybytezComFolder.py
new file mode 100644
index 000000000..83ec6472e
--- /dev/null
+++ b/pyload/plugins/crypter/EasybytezComFolder.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class EasybytezComFolder(SimpleCrypter):
+ __name__ = "EasybytezComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"https?://(www\.)?easybytez\.com/users/\w+/\w+"
+ __version__ = "0.02"
+ __description__ = """Easybytez Crypter Plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'<div class="link"><a href="(http://www\.easybytez\.com/\w+)" target="_blank">.+</a></div>'
+ TITLE_PATTERN = r'<Title>Files of (?P<title>.+) folder</Title>'
+ PAGES_PATTERN = r"<a href='[^']+'>(?P<pages>\d+)</a><a href='[^']+'>Next &#187;</a><br><small>\(\d+ total\)</small></div>"
+
+ def loadPage(self, page_n):
+ return self.load(self.pyfile.url, get={'page': page_n}, decode=True)
diff --git a/pyload/plugins/crypter/EmbeduploadCom.py b/pyload/plugins/crypter/EmbeduploadCom.py
new file mode 100644
index 000000000..8fd70882f
--- /dev/null
+++ b/pyload/plugins/crypter/EmbeduploadCom.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+from module.network.HTTPRequest import BadHeader
+
+class EmbeduploadCom(Crypter):
+ __name__ = "EmbeduploadCom"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?embedupload.com/\?d=.*"
+ __version__ = "0.02"
+ __description__ = """EmbedUpload.com crypter"""
+ __config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "embedupload"),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<div id="([^"]+)"[^>]*>\s*<a href="([^"]+)" target="_blank" (?:class="DownloadNow"|style="color:red")>'
+
+ def decrypt(self, pyfile):
+ self.html = self.load(self.pyfile.url, decode=True)
+ tmp_links = []
+ new_links = []
+
+ found = re.findall(self.LINK_PATTERN, self.html)
+ if found:
+ prefered_set = set(self.getConfig("preferedHoster").split('|'))
+ prefered_set = map(lambda s: s.lower().split('.')[0], prefered_set)
+ print "PF", prefered_set
+ tmp_links.extend([x[1] for x in found if x[0] in prefered_set])
+ self.getLocation(tmp_links, new_links)
+
+ if not new_links:
+ ignored_set = set(self.getConfig("ignoredHoster").split('|'))
+ ignored_set = map(lambda s: s.lower().split('.')[0], ignored_set)
+ print "IG", ignored_set
+ tmp_links.extend([x[1] for x in found if x[0] not in ignored_set])
+ self.getLocation(tmp_links, new_links)
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links')
+
+ def getLocation(self, tmp_links, new_links):
+ for link in tmp_links:
+ try:
+ header = self.load(link, just_header = True)
+ if "location" in header:
+ new_links.append(header['location'])
+ except BadHeader:
+ pass
+
+ \ No newline at end of file
diff --git a/pyload/plugins/crypter/FilebeerInfoFolder.py b/pyload/plugins/crypter/FilebeerInfoFolder.py
new file mode 100644
index 000000000..f45144f14
--- /dev/null
+++ b/pyload/plugins/crypter/FilebeerInfoFolder.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class FilebeerInfoFolder(Crypter):
+ __name__ = "FilebeerInfoFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?filebeer\.info/(\d+~f).*"
+ __version__ = "0.01"
+ __description__ = """Filebeer.info Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<td title="[^"]*"><a href="([^"]+)" target="_blank">'
+ PAGE_COUNT_PATTERN = r'<p class="introText">\s*Total Pages (\d+)'
+
+ def decrypt(self, pyfile):
+ pyfile.url = re.sub(self.__pattern__, r'http://filebeer.info/\1?page=1', pyfile.url)
+ html = self.load(pyfile.url)
+
+ page_count = int(re.search(self.PAGE_COUNT_PATTERN, html).group(1))
+ new_links = []
+
+ for i in range(1, page_count + 1):
+ self.logInfo("Fetching links from page %i" % i)
+ new_links.extend(re.findall(self.LINK_PATTERN, html))
+
+ if i < page_count:
+ html = self.load("%s?page=%d" % (pyfile.url, i+1))
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/FilefactoryComFolder.py b/pyload/plugins/crypter/FilefactoryComFolder.py
new file mode 100644
index 000000000..32793b491
--- /dev/null
+++ b/pyload/plugins/crypter/FilefactoryComFolder.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class FilefactoryComFolder(Crypter):
+ __name__ = "FilefactoryComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"(http://(www\.)?filefactory\.com/f/\w+).*"
+ __version__ = "0.1"
+ __description__ = """Filefactory.com Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FOLDER_PATTERN = r'<table class="items" cellspacing="0" cellpadding="0">(.*?)</table>'
+ LINK_PATTERN = r'<td class="name"><a href="([^"]+)">'
+ PAGINATOR_PATTERN = r'<div class="list">\s*<label>Pages</label>\s*<ul>(.*?)</ul>\s*</div>'
+ NEXT_PAGE_PATTERN = r'<li class="current">.*?</li>\s*<li class=""><a href="([^"]+)">'
+
+ def decrypt(self, pyfile):
+ url_base = re.search(self.__pattern__, self.pyfile.url).group(1)
+ html = self.load(url_base)
+
+ new_links = []
+ for i in range(1,100):
+ self.logInfo("Fetching links from page %i" % i)
+ found = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if found is None: self.fail("Parse error (FOLDER)")
+
+ new_links.extend(re.findall(self.LINK_PATTERN, found.group(1)))
+
+ try:
+ paginator = re.search(self.PAGINATOR_PATTERN, html, re.DOTALL).group(1)
+ next_page = re.search(self.NEXT_PAGE_PATTERN, paginator).group(1)
+ html = self.load("%s/%s" % (url_base, next_page))
+ except Exception, e:
+ break
+ else:
+ self.logInfo("Limit of 99 pages reached, aborting")
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/FileserveComFolder.py b/pyload/plugins/crypter/FileserveComFolder.py
new file mode 100644
index 000000000..9fe806971
--- /dev/null
+++ b/pyload/plugins/crypter/FileserveComFolder.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class FileserveComFolder(Crypter):
+ __name__ = "FileserveComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?fileserve.com/list/\w+"
+ __version__ = "0.11"
+ __description__ = """FileServeCom.com Folder Plugin"""
+ __author_name__ = ("fionnc")
+ __author_mail__ = ("fionnc@gmail.com")
+
+ FOLDER_PATTERN = r'<table class="file_list">(.*?)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" class="sheet_icon wbold">'
+
+ def decrypt(self, pyfile):
+ html = self.load(self.pyfile.url)
+
+ new_links = []
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if folder is None: self.fail("Parse error (FOLDER)")
+
+ new_links.extend(re.findall(self.LINK_PATTERN, folder.group(1)))
+
+ if new_links:
+ self.core.files.addLinks(map(lambda s:"http://fileserve.com%s" % s, new_links), self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/FourChanOrg.py b/pyload/plugins/crypter/FourChanOrg.py
new file mode 100644
index 000000000..5c96e723d
--- /dev/null
+++ b/pyload/plugins/crypter/FourChanOrg.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class FourChanOrg(Crypter):
+ # Based on 4chandl by Roland Beermann
+ # https://gist.github.com/enkore/3492599
+ __name__ = "FourChanOrg"
+ __type__ = "container"
+ __version__ = "0.3"
+ __pattern__ = r"http://boards\.4chan.org/\w+/res/(\d+)"
+ __description__ = "Downloader for entire 4chan threads"
+
+ def decrypt(self, pyfile):
+ pagehtml = self.load(pyfile.url)
+
+ images = set(re.findall(r'(images\.4chan\.org/[^/]*/src/[^"<]*)', pagehtml))
+ urls = []
+ for image in images:
+ urls.append("http://" + image)
+
+ self.core.files.addLinks(urls, self.pyfile.package().id)
diff --git a/pyload/plugins/crypter/FshareVnFolder.py b/pyload/plugins/crypter/FshareVnFolder.py
new file mode 100644
index 000000000..2515e7edd
--- /dev/null
+++ b/pyload/plugins/crypter/FshareVnFolder.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+class FshareVnFolder(SimpleCrypter):
+ __name__ = "FshareVnFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?fshare.vn/folder/.*"
+ __version__ = "0.01"
+ __description__ = """Fshare.vn Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<li class="w_80pc"><a href="([^"]+)" target="_blank">' \ No newline at end of file
diff --git a/pyload/plugins/crypter/GooGl.py b/pyload/plugins/crypter/GooGl.py
new file mode 100644
index 000000000..bcb1d7494
--- /dev/null
+++ b/pyload/plugins/crypter/GooGl.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.Crypter import Crypter
+from module.common.json_layer import json_loads
+
+
+class GooGl(Crypter):
+ __name__ = "GooGl"
+ __type__ = "crypter"
+ __pattern__ = r"https?://(www\.)?goo\.gl/\w+"
+ __version__ = "0.01"
+ __description__ = """Goo.gl Crypter Plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ API_URL = 'https://www.googleapis.com/urlshortener/v1/url'
+
+ def decrypt(self, pyfile):
+ rep = self.load(self.API_URL, get={'shortUrl': pyfile.url})
+ self.logDebug('JSON data: ' + rep)
+ rep = json_loads(rep)
+
+ if 'longUrl' in rep:
+ self.core.files.addLinks([rep['longUrl']], self.pyfile.package().id)
+ else:
+ self.fail('Unable to expand shortened link')
diff --git a/pyload/plugins/crypter/HoerbuchIn.py b/pyload/plugins/crypter/HoerbuchIn.py
new file mode 100644
index 000000000..6f23b2eb9
--- /dev/null
+++ b/pyload/plugins/crypter/HoerbuchIn.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+from module.lib.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
+
+class HoerbuchIn(Crypter):
+ __name__ = "HoerbuchIn"
+ __type__ = "container"
+ __pattern__ = r"http://(www\.)?hoerbuch\.in/(wp/horbucher/\d+/.+/|tp/out.php\?.+|protection/folder_\d+\.html)"
+ __version__ = "0.7"
+ __description__ = """Hoerbuch.in Container Plugin"""
+ __author_name__ = ("spoob", "mkaay")
+ __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de")
+
+ article = re.compile("http://(www\.)?hoerbuch\.in/wp/horbucher/\d+/.+/")
+ protection = re.compile("http://(www\.)?hoerbuch\.in/protection/folder_\d+.html")
+
+ def decrypt(self, pyfile):
+ self.pyfile = pyfile
+
+ if self.article.match(self.pyfile.url):
+ src = self.load(self.pyfile.url)
+ soup = BeautifulSoup(src, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
+
+ abookname = soup.find("a", attrs={"rel": "bookmark"}).text
+ for a in soup.findAll("a", attrs={"href": self.protection}):
+ package = "%s (%s)" % (abookname, a.previousSibling.previousSibling.text[:-1])
+ links = self.decryptFolder(a["href"])
+
+ self.packages.append((package, links, self.pyfile.package().folder))
+ else:
+ links = self.decryptFolder(self.pyfile.url)
+
+ self.packages.append((self.pyfile.package().name, links, self.pyfile.package().folder))
+
+ def decryptFolder(self, url):
+ m = self.protection.search(url)
+ if not m:
+ self.fail("Bad URL")
+ url = m.group(0)
+
+ self.pyfile.url = url
+ src = self.req.load(url, post={"viewed": "adpg"})
+
+ links = []
+ pattern = re.compile(r'<div class="container"><a href="(.*?)"')
+ for hoster_url in pattern.findall(src):
+ self.req.lastURL = url
+ self.load(hoster_url)
+ links.append(self.req.lastEffectiveURL)
+
+ return links
diff --git a/pyload/plugins/crypter/HotfileFolderCom.py b/pyload/plugins/crypter/HotfileFolderCom.py
new file mode 100644
index 000000000..ea7311e3c
--- /dev/null
+++ b/pyload/plugins/crypter/HotfileFolderCom.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class HotfileFolderCom(Crypter):
+ __name__ = "HotfileFolderCom"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?hotfile.com/list/\w+/\w+"
+ __version__ = "0.2"
+ __description__ = """HotfileFolder Download Plugin"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ def decryptURL(self, url):
+ html = self.load(url)
+
+ new_links = []
+ for link in re.findall(r'href="(http://(www.)?hotfile\.com/dl/\d+/[0-9a-zA-Z]+[^"]+)', html):
+ new_links.append(link[0])
+
+ if new_links:
+ self.logDebug("Found %d new links" % len(new_links))
+ return new_links
+ else:
+ self.fail('Could not extract any links')
+
diff --git a/pyload/plugins/crypter/ILoadTo.py b/pyload/plugins/crypter/ILoadTo.py
new file mode 100644
index 000000000..100ba2bc6
--- /dev/null
+++ b/pyload/plugins/crypter/ILoadTo.py
@@ -0,0 +1,62 @@
+
+import re
+import urllib
+
+from module.plugins.Crypter import Crypter
+from module.lib.BeautifulSoup import BeautifulSoup
+
+class ILoadTo(Crypter):
+ __name__ = "ILoadTo"
+ __type__ = "crypter"
+ __pattern__ = r"http://iload\.to/go/\d+-[\w\.-]+/"
+ __config__ = []
+ __version__ = "0.1"
+ __description__ = """iload.to Crypter Plugin"""
+ __author_name__ = ("hzpz")
+ __author_mail__ = ("none")
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+ src = self.req.load(str(url))
+ soup = BeautifulSoup(src)
+
+ # find captcha URL and decrypt
+ captchaTag = soup.find("img", attrs={"id": "Captcha"})
+ if not captchaTag:
+ self.fail("Cannot find Captcha")
+
+ captchaUrl = "http://iload.to" + captchaTag["src"]
+ self.logDebug("Captcha URL: %s" % captchaUrl)
+ result = self.decryptCaptcha(str(captchaUrl))
+
+ # find captcha form URL
+ formTag = soup.find("form", attrs={"id": "CaptchaForm"})
+ formUrl = "http://iload.to" + formTag["action"]
+ self.logDebug("Form URL: %s" % formUrl)
+
+ # submit decrypted captcha
+ self.req.lastURL = url
+ src = self.req.load(str(formUrl), post={'captcha': result})
+
+ # find decrypted links
+ links = re.findall(r"<a href=\"(.+)\" style=\"text-align:center;font-weight:bold;\" class=\"button\" target=\"_blank\" onclick=\"this.className\+=' success';\">", src)
+
+ if not len(links) > 0:
+ self.retry()
+
+ self.correctCaptcha()
+
+ cleanedLinks = []
+ for link in links:
+ if link.startswith("http://dontknow.me/at/?"):
+ cleanedLink = urllib.unquote(link[23:])
+ else:
+ cleanedLink = link
+ self.logDebug("Link: %s" % cleanedLink)
+ cleanedLinks.append(cleanedLink)
+
+ self.logDebug("Decrypted %d links" % len(links))
+
+ self.pyfile.package().password = "iload.to"
+ self.packages.append((self.pyfile.package().name, cleanedLinks, self.pyfile.package().folder)) \ No newline at end of file
diff --git a/pyload/plugins/crypter/LetitbitNetFolder.py b/pyload/plugins/crypter/LetitbitNetFolder.py
new file mode 100644
index 000000000..68aad9dd7
--- /dev/null
+++ b/pyload/plugins/crypter/LetitbitNetFolder.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+
+class LetitbitNetFolder(Crypter):
+ __name__ = "LetitbitNetFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?letitbit.net/folder/\w+"
+ __version__ = "0.1"
+ __description__ = """Letitbit.net Folder Plugin"""
+ __author_name__ = ("DHMH", "z00nx")
+ __author_mail__ = ("webmaster@pcProfil.de", "z00nx0@gmail.com")
+
+ FOLDER_PATTERN = r'<table>(.*)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" target="_blank">'
+
+ def decrypt(self, pyfile):
+ html = self.load(self.pyfile.url)
+
+ new_links = []
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if folder is None:
+ self.fail("Parse error (FOLDER)")
+
+ new_links.extend(re.findall(self.LINK_PATTERN, folder.group(0)))
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links')
diff --git a/pyload/plugins/crypter/LinkList.py b/pyload/plugins/crypter/LinkList.py
new file mode 100644
index 000000000..ebfa373eb
--- /dev/null
+++ b/pyload/plugins/crypter/LinkList.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.Crypter import Crypter, Package
+
+class LinkList(Crypter):
+ __name__ = "LinkList"
+ __version__ = "0.11"
+ __pattern__ = r".+\.txt$"
+ __description__ = """Read Link Lists in txt format"""
+ __author_name__ = ("spoob", "jeix")
+ __author_mail__ = ("spoob@pyload.org", "jeix@hasnomail.com")
+
+ # method declaration is needed here
+ def decryptURL(self, url):
+ return Crypter.decryptURL(self, url)
+
+ def decryptFile(self, content):
+ links = content.splitlines()
+
+ curPack = "default"
+ packages = {curPack:[]}
+
+ for link in links:
+ link = link.strip()
+ if not link: continue
+
+ if link.startswith(";"):
+ continue
+ if link.startswith("[") and link.endswith("]"):
+ # new package
+ curPack = link[1:-1]
+ packages[curPack] = []
+ continue
+ packages[curPack].append(link)
+
+ # empty packages fix
+ delete = []
+
+ for key,value in packages.iteritems():
+ if not value:
+ delete.append(key)
+
+ for key in delete:
+ del packages[key]
+
+ urls = []
+
+ for name, links in packages.iteritems():
+ if name == "default":
+ urls.extend(links)
+ else:
+ urls.append(Package(name, links))
+
+ return urls \ No newline at end of file
diff --git a/pyload/plugins/crypter/LinkSaveIn.py b/pyload/plugins/crypter/LinkSaveIn.py
new file mode 100644
index 000000000..e021316bf
--- /dev/null
+++ b/pyload/plugins/crypter/LinkSaveIn.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+
+#
+# v2.01 - hagg
+# * cnl2 and web links are skipped if JS is not available (instead of failing the package)
+# * only best available link source is used (priority: cnl2>rsdf>ccf>dlc>web
+#
+
+from Crypto.Cipher import AES
+from module.plugins.Crypter import Crypter
+from module.unescape import unescape
+import base64
+import binascii
+import re
+
+class LinkSaveIn(Crypter):
+ __name__ = "LinkSaveIn"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?linksave.in/(?P<id>\w+)$"
+ __version__ = "2.01"
+ __description__ = """LinkSave.in Crypter Plugin"""
+ __author_name__ = ("fragonib")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es")
+
+ # Constants
+ _JK_KEY_ = "jk"
+ _CRYPTED_KEY_ = "crypted"
+ HOSTER_DOMAIN = "linksave.in"
+
+ def setup(self):
+ self.html = None
+ self.fileid = None
+ self.captcha = False
+ self.package = None
+ self.preferred_sources = ['cnl2', 'rsdf', 'ccf', 'dlc', 'web']
+
+ def decrypt(self, pyfile):
+
+ # Init
+ self.package = pyfile.package()
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+ self.req.cj.setCookie(self.HOSTER_DOMAIN, "Linksave_Language", "english")
+
+ # Request package
+ self.html = self.load(self.pyfile.url)
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ for type_ in self.preferred_sources:
+ package_links.extend(self.handleLinkSource(type_))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
+
+ def isOnline(self):
+ if "<big>Error 404 - Folder not found!</big>" in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isPasswordProtected(self):
+ if re.search(r'''<input.*?type="password"''', self.html):
+ self.logDebug("Links are password protected")
+ return True
+
+ def isCaptchaProtected(self):
+ if "<b>Captcha:</b>" in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ post = {"id": self.fileid, "besucherpasswort": password, 'login': 'submit'}
+ self.html = self.load(self.pyfile.url, post=post)
+
+ def unlockCaptchaProtection(self):
+ captcha_hash = re.search(r'name="hash" value="([^"]+)', self.html).group(1)
+ captcha_url = re.search(r'src=".(/captcha/cap.php\?hsh=[^"]+)', self.html).group(1)
+ captcha_code = self.decryptCaptcha("http://linksave.in" + captcha_url, forceUser=True)
+ self.html = self.load(self.pyfile.url, post={"id": self.fileid, "hash": captcha_hash, "code": captcha_code})
+
+ def getPackageInfo(self):
+ name = self.pyfile.package().name
+ folder = self.pyfile.package().folder
+ self.logDebug("Defaulting to pyfile name [%s] and folder [%s] for package" % (name, folder))
+ return name, folder
+
+ def handleErrors(self):
+ if "The visitorpassword you have entered is wrong" in self.html:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.captcha:
+ if "Wrong code. Please retry" in self.html:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleLinkSource(self, type_):
+ if type_ == 'cnl2':
+ return self.handleCNL2()
+ elif type_ in ('rsdf', 'ccf', 'dlc'):
+ return self.handleContainer(type_)
+ elif type_ == 'web':
+ return self.handleWebLinks()
+ else:
+ self.fail('unknown source type "%s" (this is probably a bug)' % type_)
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Search for Web links")
+ if not self.js:
+ self.logDebug("no JS -> skip Web links")
+ else:
+ #@TODO: Gather paginated web links
+ pattern = r'<a href="http://linksave\.in/(\w{43})"'
+ ids = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(ids))
+ for i, weblink_id in enumerate(ids):
+ try:
+ webLink = "http://linksave.in/%s" % weblink_id
+ self.logDebug("Decrypting Web link %d, %s" % (i+1, webLink))
+ fwLink = "http://linksave.in/fw-%s" % weblink_id
+ response = self.load(fwLink)
+ jscode = re.findall(r'<script type="text/javascript">(.*)</script>', response)[-1]
+ jseval = self.js.eval("document = { write: function(e) { return e; } }; %s" % jscode)
+ dlLink = re.search(r'http://linksave\.in/dl-\w+', jseval).group(0)
+ self.logDebug("JsEngine returns value [%s] for redirection link" % dlLink)
+ response = self.load(dlLink)
+ link = unescape(re.search(r'<iframe src="(.+?)"', response).group(1))
+ package_links.append(link)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (webLink, detail))
+ return package_links
+
+ def handleContainer(self, type_):
+ package_links = []
+ type_ = type_.lower()
+ self.logDebug('Seach for %s Container links' % type_.upper())
+ if not type_.isalnum(): # check to prevent broken re-pattern (cnl2,rsdf,ccf,dlc,web are all alpha-numeric)
+ self.fail('unknown container type "%s" (this is probably a bug)' % type_)
+ pattern = r"\('%s_link'\).href=unescape\('(.*?\.%s)'\)" % (type_, type_)
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Found %d %s Container links" % (len(containersLinks), type_.upper()))
+ for containerLink in containersLinks:
+ link = "http://linksave.in/%s" % unescape(containerLink)
+ package_links.append(link)
+ return package_links
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Search for CNL2 links")
+ if not self.js:
+ self.logDebug("no JS -> skip CNL2 links")
+ elif 'cnl2_load' in self.html:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+ return package_links
+
+ def _getCipherParams(self):
+
+ # Get jk
+ jk_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._JK_KEY_
+ vjk = re.findall(jk_re, self.html)
+
+ # Get crypted
+ crypted_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._CRYPTED_KEY_
+ vcrypted = re.findall(crypted_re, self.html)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+ def _getLinks(self, crypted, jk):
+
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+ return links
+
diff --git a/pyload/plugins/crypter/LinkdecrypterCom.py b/pyload/plugins/crypter/LinkdecrypterCom.py
new file mode 100644
index 000000000..69d2f8192
--- /dev/null
+++ b/pyload/plugins/crypter/LinkdecrypterCom.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.Crypter import Crypter
+
+class LinkdecrypterCom(Crypter):
+ __name__ = "LinkdecrypterCom"
+ __type__ = "crypter"
+ __version__ = "0.27"
+ __description__ = """linkdecrypter.com"""
+ __author_name__ = ("zoidberg", "flowlee")
+
+ TEXTAREA_PATTERN = r'<textarea name="links" wrap="off" readonly="1" class="caja_des">(.+)</textarea>'
+ PASSWORD_PATTERN = r'<input type="text" name="password"'
+ CAPTCHA_PATTERN = r'<img class="captcha" src="(.+?)"(.*?)>'
+ REDIR_PATTERN = r'<i>(Click <a href="./">here</a> if your browser does not redirect you).</i>'
+
+ def decrypt(self, pyfile):
+
+ self.passwords = self.getPassword().splitlines()
+
+ # API not working anymore
+ new_links = self.decryptHTML()
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links')
+
+ def decryptAPI(self):
+
+ get_dict = { "t": "link", "url": self.pyfile.url, "lcache": "1" }
+ self.html = self.load('http://linkdecrypter.com/api', get = get_dict)
+ if self.html.startswith('http://'): return self.html.splitlines()
+
+ if self.html == 'INTERRUPTION(PASSWORD)':
+ for get_dict['pass'] in self.passwords:
+ self.html = self.load('http://linkdecrypter.com/api', get= get_dict)
+ if self.html.startswith('http://'): return self.html.splitlines()
+
+ self.logError('API', self.html)
+ if self.html == 'INTERRUPTION(PASSWORD)':
+ self.fail("No or incorrect password")
+
+ return None
+
+ def decryptHTML(self):
+
+ retries = 5
+
+ post_dict = { "link_cache": "on", "pro_links": self.pyfile.url, "modo_links": "text" }
+ self.html = self.load('http://linkdecrypter.com/', post=post_dict, cookies=True, decode=True)
+
+ while self.passwords or retries:
+ found = re.search(self.TEXTAREA_PATTERN, self.html, flags=re.DOTALL)
+ if found: return [ x for x in found.group(1).splitlines() if '[LINK-ERROR]' not in x ]
+
+ found = re.search(self.CAPTCHA_PATTERN, self.html)
+ if found:
+ captcha_url = 'http://linkdecrypter.com/' + found.group(1)
+ result_type = "positional" if "getPos" in found.group(2) else "textual"
+
+ found = re.search(r"<p><i><b>([^<]+)</b></i></p>", self.html)
+ msg = found.group(1) if found else ""
+ self.logInfo("Captcha protected link", result_type, msg)
+
+ captcha = self.decryptCaptcha(captcha_url, result_type = result_type)
+ if result_type == "positional":
+ captcha = "%d|%d" % captcha
+ self.html = self.load('http://linkdecrypter.com/', post={ "captcha": captcha }, decode=True)
+ retries -= 1
+
+ elif self.PASSWORD_PATTERN in self.html:
+ if self.passwords:
+ password = self.passwords.pop(0)
+ self.logInfo("Password protected link, trying " + password)
+ self.html = self.load('http://linkdecrypter.com/', post={'password': password}, decode=True)
+ else:
+ self.fail("No or incorrect password")
+
+ else:
+ retries -= 1
+ self.html = self.load('http://linkdecrypter.com/', cookies=True, decode=True)
+
+ return None
diff --git a/pyload/plugins/crypter/LixIn.py b/pyload/plugins/crypter/LixIn.py
new file mode 100644
index 000000000..e2ee30731
--- /dev/null
+++ b/pyload/plugins/crypter/LixIn.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class LixIn(Crypter):
+ __name__ = "LixIn"
+ __type__ = "container"
+ __pattern__ = r"http://(www.)?lix.in/(?P<id>.*)"
+ __version__ = "0.22"
+ __description__ = """Lix.in Container Plugin"""
+ __author_name__ = ("spoob")
+ __author_mail__ = ("spoob@pyload.org")
+
+ CAPTCHA_PATTERN='<img src="(?P<image>captcha_img.php\?.*?)"'
+ SUBMIT_PATTERN=r"value='continue.*?'"
+ LINK_PATTERN=r'name="ifram" src="(?P<link>.*?)"'
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+
+ matches = re.search(self.__pattern__,url)
+ if not matches:
+ self.fail("couldn't identify file id")
+
+ id = matches.group("id")
+ self.logDebug("File id is %s" % id)
+
+ self.html = self.req.load(url, decode=True)
+
+ matches = re.search(self.SUBMIT_PATTERN,self.html)
+ if not matches:
+ self.fail("link doesn't seem valid")
+
+ matches = re.search(self.CAPTCHA_PATTERN, self.html)
+ if matches:
+ for i in range(5):
+ matches = re.search(self.CAPTCHA_PATTERN, self.html)
+ if matches:
+ self.logDebug("trying captcha")
+ captcharesult = self.decryptCaptcha("http://lix.in/"+matches.group("image"))
+ self.html = self.req.load(url, decode=True, post={"capt" : captcharesult, "submit":"submit","tiny":id})
+ else:
+ self.logDebug("no captcha/captcha solved")
+ break
+ else:
+ self.html = self.req.load(url, decode=True, post={"submit" : "submit",
+ "tiny" : id})
+
+ matches = re.search(self.LINK_PATTERN, self.html)
+ if not matches:
+ self.fail("can't find destination url")
+
+ new_link = matches.group("link")
+ self.logDebug("Found link %s, adding to package" % new_link)
+
+ self.packages.append((self.pyfile.package().name, [new_link], self.pyfile.package().name))
diff --git a/pyload/plugins/crypter/LofCc.py b/pyload/plugins/crypter/LofCc.py
new file mode 100644
index 000000000..9c98c48a0
--- /dev/null
+++ b/pyload/plugins/crypter/LofCc.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from time import sleep
+from os.path import join
+
+from module.plugins.Crypter import Crypter
+from module.plugins.ReCaptcha import ReCaptcha
+
+class LofCc(Crypter):
+ __name__ = "LofCc"
+ __type__ = "container"
+ __pattern__ = r"http://lof.cc/(.*)"
+ __version__ = "0.1"
+ __description__ = """lof.cc Plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def setup(self):
+ self.multiDL = False
+
+ def decrypt(self, pyfile):
+ html = self.req.load(self.pyfile.url, cookies=True)
+
+ m = re.search(r"src=\"http://www.google.com/recaptcha/api/challenge\?k=(.*?)\"></script>", html)
+ if not m:
+ self.offline()
+
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(m.group(1))
+
+ resultHTML = self.req.load(self.pyfile.url, post={"recaptcha_challenge_field":challenge, "recaptcha_response_field":code}, cookies=True)
+
+ if re.search("class=\"error\"", resultHTML):
+ self.retry()
+
+ self.correctCaptcha()
+
+ dlc = self.req.load(self.pyfile.url+"/dlc", cookies=True)
+
+ name = re.search(self.__pattern__, self.pyfile.url).group(1)+".dlc"
+
+ dlcFile = join(self.config["general"]["download_folder"], name)
+ f = open(dlcFile, "wb")
+ f.write(dlc)
+ f.close()
+
+ self.packages.append((self.pyfile.package().name, [dlcFile], self.pyfile.package().folder))
diff --git a/pyload/plugins/crypter/MBLinkInfo.py b/pyload/plugins/crypter/MBLinkInfo.py
new file mode 100644
index 000000000..e266c7722
--- /dev/null
+++ b/pyload/plugins/crypter/MBLinkInfo.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+
+class MBLinkInfo(Crypter):
+ __name__ = "MBLinkInfo"
+ __type__ = "container"
+ __pattern__ = r"http://(?:www\.)?mblink\.info/?\?id=(\d+)"
+ __version__ = "0.02"
+ __description__ = """MBLink.Info Container Plugin"""
+ __author_name__ = ("Gummibaer", "stickell")
+ __author_mail__ = ("Gummibaer@wiki-bierkiste.de", "l.stickell@yahoo.it")
+
+ URL_PATTERN = r'<meta[^;]+; URL=(.*)["\']>'
+
+ def decrypt(self, pyfile):
+ src = self.load(pyfile.url)
+ found = re.search(self.URL_PATTERN, src)
+ if found:
+ link = found.group(1)
+ self.logDebug("Redirected to " + link)
+ self.core.files.addLinks([link], self.pyfile.package().id)
+ else:
+ self.fail('Unable to detect valid link')
diff --git a/pyload/plugins/crypter/MediafireComFolder.py b/pyload/plugins/crypter/MediafireComFolder.py
new file mode 100644
index 000000000..ddd61379c
--- /dev/null
+++ b/pyload/plugins/crypter/MediafireComFolder.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+from module.plugins.hoster.MediafireCom import checkHTMLHeader
+from module.common.json_layer import json_loads
+
+class MediafireComFolder(Crypter):
+ __name__ = "MediafireComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(\w*\.)*mediafire\.com/(folder/|\?sharekey=|\?\w{13}($|[/#]))"
+ __version__ = "0.14"
+ __description__ = """Mediafire.com Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FOLDER_KEY_PATTERN = r"var afI= '(\w+)';"
+ FILE_URL_PATTERN = '<meta property="og:url" content="http://www.mediafire.com/\?(\w+)"/>'
+
+ def decrypt(self, pyfile):
+ new_links = []
+
+ url, result = checkHTMLHeader(pyfile.url)
+ self.logDebug('Location (%d): %s' % (result, url))
+
+ if result == 0:
+ # load and parse html
+ html = self.load(pyfile.url)
+ found = re.search(self.FILE_URL_PATTERN, html)
+ if found:
+ # file page
+ new_links.append("http://www.mediafire.com/file/%s" % found.group(1))
+ else:
+ # folder page
+ found = re.search(self.FOLDER_KEY_PATTERN, html)
+ if found:
+ folder_key = found.group(1)
+ self.logDebug("FOLDER KEY: %s" % folder_key)
+
+ json_resp = json_loads(self.load("http://www.mediafire.com/api/folder/get_info.php?folder_key=%s&response_format=json&version=1" % folder_key))
+ #self.logInfo(json_resp)
+ if json_resp['response']['result'] == "Success":
+ for link in json_resp['response']['folder_info']['files']:
+ new_links.append("http://www.mediafire.com/file/%s" % link['quickkey'])
+ else:
+ self.fail(json_resp['response']['message'])
+ elif result == 1:
+ self.offline()
+ else:
+ new_links.append(url)
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/Movie2kTo.py b/pyload/plugins/crypter/Movie2kTo.py
new file mode 100644
index 000000000..097cb702e
--- /dev/null
+++ b/pyload/plugins/crypter/Movie2kTo.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+from collections import defaultdict
+
+class Movie2kTo(Crypter):
+ __name__ = 'Movie2kTo'
+ __type__ = 'container'
+ __pattern__ = r'http://(?:www\.)?movie2k\.to/(.*)\.html'
+ __version__ = '0.5'
+ __config__ = [('accepted_hosters', 'str', 'List of accepted hosters', 'Xvidstage, Putlocker, '),
+ ('dir_quality', 'bool', 'Show the quality of the footage in the folder name', 'True'),
+ ('whole_season', 'bool', 'Download whole season', 'False'),
+ ('everything', 'bool', 'Download everything', 'False'),
+ ('firstN', 'int', 'Download the first N files for each episode (the first file is probably all you will need)', '1')]
+ __description__ = """Movie2k.to Container Plugin"""
+ __author_name__ = ('4Christopher')
+ __author_mail__ = ('4Christopher@gmx.de')
+ BASE_URL_PATTERN = r'http://(?:www\.)?movie2k\.to/'
+ TVSHOW_URL_PATH_PATTERN = r'tvshows-(?P<id>\d+?)-(?P<name>.+)'
+ FILM_URL_PATH_PATTERN = r'(?P<name>.+?)-(?:online-film|watch-movie)-(?P<id>\d+)'
+ SEASON_PATTERN = r'<div id="episodediv(\d+?)" style="display:(inline|none)">(.*?)</div>'
+ EP_PATTERN = r'<option value="(.+?)"( selected)?>Episode\s*?(\d+?)</option>'
+ BASE_URL = 'http://www.movie2k.to'
+
+ def decrypt(self, pyfile):
+ self.package = pyfile.package()
+ self.folder = self.package.folder
+ self.qStatReset()
+ whole_season = self.getConfig('whole_season')
+ everything = self.getConfig('everything')
+ self.getInfo(pyfile.url)
+
+ if (whole_season or everything) and self.format == 'tvshow':
+ self.logDebug('Downloading the whole season')
+ for season, season_sel, html in re.findall(self.SEASON_PATTERN, self.html, re.DOTALL | re.I):
+ if (season_sel == 'inline') or everything:
+ season_links = []
+ for url_path, ep_sel, ep in re.findall(self.EP_PATTERN, html, re.I):
+ season_name = self.name_tvshow(season, ep)
+ self.logDebug('%s: %s' % (season_name, url_path))
+ if ep_sel and (season_sel == 'inline'):
+ self.logDebug('%s selected (in the start URL: %s)' % (season_name, pyfile.url))
+ season_links += self.getInfoAndLinks('%s/%s' % (self.BASE_URL, url_path))
+ elif (whole_season and (season_sel == 'inline')) or everything:
+ season_links += self.getInfoAndLinks('%s/%s' % (self.BASE_URL, url_path))
+
+ self.logDebug(season_links)
+ folder = '%s: Season %s' % (self.name, season)
+ name = '%s%s' % (folder, self.qStat())
+ self.packages.append((name, season_links, folder))
+ self.qStatReset()
+ else:
+ links = self.getLinks()
+ name = '%s%s' % (self.package.name, self.qStat())
+ self.packages.append((name, links , self.package.folder))
+
+ def qStat(self):
+ if len(self.q) == 0: return ''
+ if not self.getConfig('dir_quality'): return ''
+ if len(self.q) == 1: return (' (Quality: %d, max (all hosters): %d)' % (self.q[0], self.max_q))
+ return (' (Average quality: %d, min: %d, max: %d, %s, max (all hosters): %d)'
+ % (sum(self.q) / float(len(self.q)), min(self.q), max(self.q), self.q, self.max_q))
+
+ def qStatReset(self):
+ self.q = [] ## to calculate the average, min and max of the quality
+ self.max_q = None ## maximum quality of all hosters
+
+ def tvshow_number(self, number):
+ if int(number) < 10:
+ return '0%s' % number
+ else:
+ return number
+
+ def name_tvshow(self, season, ep):
+ return '%s S%sE%s' % (self.name, self.tvshow_number(season), self.tvshow_number(ep))
+
+ def getInfo(self, url):
+ self.html = self.load(url)
+ self.url_path = re.match(self.__pattern__, url).group(1)
+ self.format = pattern_re = None
+ if re.match(r'tvshows', self.url_path):
+ self.format = 'tvshow'
+ pattern_re = re.search(self.TVSHOW_URL_PATH_PATTERN, self.url_path)
+ elif re.search(self.FILM_URL_PATH_PATTERN, self.url_path):
+ self.format = 'film'
+ pattern_re = re.search(self.FILM_URL_PATH_PATTERN, self.url_path)
+ self.name = pattern_re.group('name')
+ self.id = pattern_re.group('id')
+ self.logDebug('URL Path: %s (ID: %s, Name: %s, Format: %s)'
+ % (self.url_path, self.id, self.name, self.format))
+
+ def getInfoAndLinks(self, url):
+ self.getInfo(url)
+ return self.getLinks()
+
+ ## This function returns the links for one episode as list
+ def getLinks(self):
+ accepted_hosters = re.findall(r'\b(\w+?)\b', self.getConfig('accepted_hosters'))
+ firstN = self.getConfig('firstN')
+ links = []
+ re_quality = re.compile(r'.+?Quality:.+?smileys/(\d)\.gif')
+ ## The quality is one digit. 0 is the worst and 5 is the best.
+ ## Is not always there 

+ re_hoster_id_js = re.compile(r'links\[(\d+?)\].+&nbsp;(.+?)</a>(.+?)</tr>')
+ re_hoster_id_html = re.compile(r'(?:<td height|<tr id).+?<a href=".*?(\d{7}).*?".+?&nbsp;([^<>]+?)</a>(.+?)</tr>')
+ ## I assume that the ID is 7 digits longs
+ count = defaultdict(int)
+ matches = re_hoster_id_html.findall(self.html)
+ matches += re_hoster_id_js.findall(self.html)
+ # self.logDebug(matches)
+ ## h_id: hoster_id of a possible hoster
+ for h_id, hoster, q_html in matches:
+ match_q = re_quality.search(q_html)
+ if match_q:
+ quality = int(match_q.group(1))
+ if self.max_q == None:
+ self.max_q = quality
+ else:
+ if self.max_q < quality: self.max_q = quality
+ q_s = ', Quality: %d' % quality
+ else:
+ q_s = ', unknown quality'
+ if hoster in accepted_hosters:
+ self.logDebug('Accepted: %s, ID: %s%s' % (hoster, h_id, q_s))
+ count[hoster] += 1
+ if count[hoster] <= firstN:
+ if match_q: self.q.append(quality)
+ if h_id != self.id:
+ self.html = self.load('%s/tvshows-%s-%s.html' % (self.BASE_URL, h_id, self.name))
+ else:
+ self.logDebug('This is already the right ID')
+ # The iframe tag must continue with a width. There where
+ # two iframes in the site and I try to make sure that it
+ # matches the right one. This is not (yet) nessesary
+ # because the right iframe happens to be the first iframe.
+ for pattern in (r'<a target="_blank" href="(http://[^"]*?)"', r'<iframe src="(http://[^"]*?)" width'):
+ try:
+ url = re.search(pattern, self.html).group(1)
+ except:
+ self.logDebug('Failed to find the URL (pattern %s)' % pattern)
+ else:
+ self.logDebug('id: %s, %s: %s' % (h_id, hoster, url))
+ links.append(url)
+ break
+ else:
+ self.logDebug('Not accepted: %s, ID: %s%s' % (hoster, h_id, q_s))
+ # self.logDebug(links)
+ return links
diff --git a/pyload/plugins/crypter/MultiloadCz.py b/pyload/plugins/crypter/MultiloadCz.py
new file mode 100644
index 000000000..2c71b8fea
--- /dev/null
+++ b/pyload/plugins/crypter/MultiloadCz.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class MultiloadCz(Crypter):
+ __name__ = "MultiloadCz"
+ __type__ = "crypter"
+ __pattern__ = r"http://.*multiload.cz/(stahnout|slozka)/.*"
+ __version__ = "0.4"
+ __description__ = """multiload.cz"""
+ __config__ = [("usedHoster", "str", "Prefered hoster list (bar-separated) ", ""),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FOLDER_PATTERN = r'<form action="" method="get"><textarea[^>]*>([^>]*)</textarea></form>'
+ LINK_PATTERN = r'<p class="manager-server"><strong>([^<]+)</strong></p><p class="manager-linky"><a href="([^"]+)">'
+
+ def decrypt(self, pyfile):
+ self.html = self.load(self.pyfile.url, decode=True)
+ new_links = []
+
+ if re.search(self.__pattern__, self.pyfile.url).group(1) == "slozka":
+ found = re.search(self.FOLDER_PATTERN, self.html)
+ if found is not None:
+ new_links.extend(found.group(1).split())
+ else:
+ found = re.findall(self.LINK_PATTERN, self.html)
+ if found:
+ prefered_set = set(self.getConfig("usedHoster").split('|'))
+ new_links.extend([x[1] for x in found if x[0] in prefered_set])
+
+ if not new_links:
+ ignored_set = set(self.getConfig("ignoredHoster").split('|'))
+ new_links.extend([x[1] for x in found if x[0] not in ignored_set])
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/MultiuploadCom.py b/pyload/plugins/crypter/MultiuploadCom.py
new file mode 100644
index 000000000..bf5540982
--- /dev/null
+++ b/pyload/plugins/crypter/MultiuploadCom.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+from module.common.json_layer import json_loads
+from time import time
+
+class MultiuploadCom(Crypter):
+ __name__ = "MultiuploadCom"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?multiupload.com/(\w+)"
+ __version__ = "0.01"
+ __description__ = """MultiUpload.com crypter"""
+ __config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "multiupload"),
+ ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ ML_LINK_PATTERN = r'<div id="downloadbutton_" style=""><a href="([^"]+)"'
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+ found = re.search(self.ML_LINK_PATTERN, self.html)
+ ml_url = found.group(1) if found else None
+
+ json_list = json_loads(self.load("http://multiupload.com/progress/", get = {
+ "d": re.search(self.__pattern__, pyfile.url).group(1),
+ "r": str(int(time()*1000))
+ }))
+ new_links = []
+
+ prefered_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("preferedHoster").split('|')))
+
+ if ml_url and 'multiupload' in prefered_set: new_links.append(ml_url)
+
+ for link in json_list:
+ if link['service'].lower() in prefered_set and int(link['status']) and not int(link['deleted']):
+ url = self.getLocation(link['url'])
+ if url: new_links.append(url)
+
+ if not new_links:
+ ignored_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("ignoredHoster").split('|')))
+
+ if 'multiupload' not in ignored_set: new_links.append(ml_url)
+
+ for link in json_list:
+ if link['service'].lower() not in ignored_set and int(link['status']) and not int(link['deleted']):
+ url = self.getLocation(link['url'])
+ if url: new_links.append(url)
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links')
+
+ def getLocation(self, url):
+ header = self.load(url, just_header = True)
+ return header['location'] if "location" in header else None \ No newline at end of file
diff --git a/pyload/plugins/crypter/NCryptIn.py b/pyload/plugins/crypter/NCryptIn.py
new file mode 100644
index 000000000..821636821
--- /dev/null
+++ b/pyload/plugins/crypter/NCryptIn.py
@@ -0,0 +1,251 @@
+# -*- coding: utf-8 -*-
+
+from Crypto.Cipher import AES
+from module.plugins.Crypter import Crypter
+from module.plugins.ReCaptcha import ReCaptcha
+import base64
+import binascii
+import re
+
+class NCryptIn(Crypter):
+ __name__ = "NCryptIn"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?ncrypt.in/folder-([^/\?]+)"
+ __version__ = "1.22"
+ __description__ = """NCrypt.in Crypter Plugin"""
+ __author_name__ = ("fragonib")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es")
+
+ # Constants
+ _JK_KEY_ = "jk"
+ _CRYPTED_KEY_ = "crypted"
+
+ def setup(self):
+ self.html = None
+ self.cleanedHtml = None
+ self.captcha = False
+ self.package = None
+
+ def decrypt(self, pyfile):
+
+ # Init
+ self.package = pyfile.package()
+
+ # Request package
+ self.html = self.load(self.pyfile.url)
+ self.cleanedHtml = self.removeCrap(self.html)
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isProtected():
+ self.html = self.unlockProtection()
+ self.cleanedHtml = self.removeCrap(self.html)
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ package_links.extend(self.handleWebLinks())
+ package_links.extend(self.handleContainers())
+ package_links.extend(self.handleCNL2())
+ package_links = self.removeContainers(package_links)
+ package_links = set(package_links)
+
+ # Pack
+ self.packages = [(package_name, package_links, folder_name)]
+
+ def removeCrap(self, content):
+ patterns = (r'(type="hidden".*?(name=".*?")?.*?value=".*?")',
+ r'display:none;">(.*?)</(div|span)>',
+ r'<div\s+class="jdownloader"(.*?)</div>',
+ r'<iframe\s+style="display:none(.*?)</iframe>')
+ for pattern in patterns:
+ rexpr = re.compile(pattern, re.DOTALL)
+ content = re.sub(rexpr, "", content)
+ return content
+
+ def removeContainers(self,package_links):
+ tmp_package_links = package_links[:]
+ for link in tmp_package_links:
+ self.logDebug(link)
+ if ".dlc" in link or ".ccf" in link or ".rsdf" in link:
+ self.logDebug("Removing [%s] from package_links" % link)
+ package_links.remove(link)
+
+ if len(package_links) > 0:
+ return package_links
+ else:
+ return tmp_package_links
+
+ def isOnline(self):
+ if "Your folder does not exist" in self.cleanedHtml:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isProtected(self):
+ if re.search(r'''<form.*?name.*?protected.*?>''', self.cleanedHtml):
+ self.logDebug("Links are protected")
+ return True
+ return False
+
+ def getPackageInfo(self):
+ title_re = r'<h2><span.*?class="arrow".*?>(?P<title>[^<]+).*?</span>.*?</h2>'
+ m = re.findall(title_re, self.html, re.DOTALL)
+ if m is not None:
+ title = m[-1].strip()
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+ else:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+ return name, folder
+
+ def unlockProtection(self):
+
+ postData = {}
+
+ form = re.search(r'''<form\ name="protected"(.*?)</form>''', self.cleanedHtml, re.DOTALL).group(1)
+
+ # Submit package password
+ if "password" in form:
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ postData['password'] = password
+
+ # Resolve anicaptcha
+ if "anicaptcha" in form:
+ self.captcha = True
+ self.logDebug("Captcha protected, resolving captcha")
+ captchaUri = re.search(r'src="(/temp/anicaptcha/[^"]+)', form).group(1)
+ captcha = self.decryptCaptcha("http://ncrypt.in" + captchaUri)
+ self.logDebug("Captcha resolved [%s]" % captcha)
+ postData['captcha'] = captcha
+
+ # Resolve recaptcha
+ if "recaptcha" in form:
+ self.captcha = True
+ id = re.search(r'\?k=(.*?)"', form).group(1)
+ self.logDebug("Resolving ReCaptcha with key [%s]" % id)
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(id)
+ postData['recaptcha_challenge_field'] = challenge
+ postData['recaptcha_response_field'] = code
+
+ # Resolve circlecaptcha
+ if "circlecaptcha" in form:
+ self.captcha = True
+ self.logDebug("Captcha protected")
+ captcha_img_url = "http://ncrypt.in/classes/captcha/circlecaptcha.php"
+ coords = self.decryptCaptcha(captcha_img_url, forceUser=True, imgtype="png", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+ self.captcha_post_url = self.pyfile.url
+
+ postData['circle.x'] = coords[0]
+ postData['circle.y'] = coords[1]
+
+
+ # Unlock protection
+ postData['submit_protected'] = 'Continue to folder '
+ return self.load(self.pyfile.url, post=postData)
+
+ def handleErrors(self):
+
+ if "This password is invalid!" in self.cleanedHtml:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.captcha:
+ if "The securitycheck was wrong!" in self.cleanedHtml:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Handling Web links")
+
+ pattern = r"(http://ncrypt\.in/link-.*?=)"
+ links = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(links))
+ for i, link in enumerate(links):
+ self.logDebug("Decrypting Web link %d, %s" % (i+1, link))
+ try:
+ url = link.replace("link-", "frame-")
+ link = self.load(url, just_header=True)['location']
+ package_links.append(link)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (link, detail))
+ return package_links
+
+ def handleContainers(self):
+ package_links = []
+ self.logDebug("Handling Container links")
+
+ pattern = r"/container/(rsdf|dlc|ccf)/([a-z0-9]+)"
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Container links" % len(containersLinks))
+ for containerLink in containersLinks:
+ link = "http://ncrypt.in/container/%s/%s.%s" % (containerLink[0], containerLink[1], containerLink[0])
+ package_links.append(link)
+ return package_links
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Handling CNL2 links")
+
+ if 'cnl2_output' in self.cleanedHtml:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+ return package_links
+
+ def _getCipherParams(self):
+
+ pattern = r'<input.*?name="%s".*?value="(.*?)"'
+
+ # Get jk
+ jk_re = pattern % NCryptIn._JK_KEY_
+ vjk = re.findall(jk_re, self.html)
+
+ # Get crypted
+ crypted_re = pattern % NCryptIn._CRYPTED_KEY_
+ vcrypted = re.findall(crypted_re, self.html)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+ def _getLinks(self, crypted, jk):
+
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Block has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/NetfolderIn.py b/pyload/plugins/crypter/NetfolderIn.py
new file mode 100644
index 000000000..c5c602c27
--- /dev/null
+++ b/pyload/plugins/crypter/NetfolderIn.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class NetfolderIn(SimpleCrypter):
+ __name__ = "NetfolderIn"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?netfolder.in/((?P<id1>\w+)/\w+|folder.php\?folder_id=(?P<id2>\w+))"
+ __version__ = "0.6"
+ __description__ = """NetFolder Crypter Plugin"""
+ __author_name__ = ("RaNaN", "fragonib")
+ __author_mail__ = ("RaNaN@pyload.org", "fragonib[AT]yahoo[DOT]es")
+
+ TITLE_PATTERN = r'<div class="Text">Inhalt des Ordners <span(.*)>(?P<title>.+)</span></div>'
+
+ def decrypt(self, pyfile):
+ # Request package
+ self.html = self.load(pyfile.url)
+
+ # Check for password protection
+ if self.isPasswordProtected():
+ self.html = self.submitPassword()
+ if self.html is None:
+ self.fail("Incorrect password, please set right password on Add package form and retry")
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageNameAndFolder()
+
+ # Get package links
+ package_links = self.getLinks()
+
+ # Set package
+ self.packages = [(package_name, package_links, folder_name)]
+
+ def isPasswordProtected(self):
+
+ if '<input type="password" name="password"' in self.html:
+ self.logDebug("Links are password protected")
+ return True
+ return False
+
+ def submitPassword(self):
+ # Gather data
+ try:
+ m = re.match(self.__pattern__, self.pyfile.url)
+ id = max(m.group('id1'), m.group('id2'))
+ except AttributeError:
+ self.logDebug("Unable to get package id from url [%s]" % self.pyfile.url)
+ return
+ url = "http://netfolder.in/folder.php?folder_id=" + id
+ password = self.getPassword()
+
+ # Submit package password
+ post = {'password': password, 'save': 'Absenden'}
+ self.logDebug("Submitting password [%s] for protected links with id [%s]" % (password, id))
+ html = self.load(url, {}, post)
+
+ # Check for invalid password
+ if '<div class="InPage_Error">' in html:
+ self.logDebug("Incorrect password, please set right password on Edit package form and retry")
+ return None
+
+ return html
+
+ def getLinks(self):
+ links = re.search(r'name="list" value="(.*?)"', self.html).group(1).split(",")
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/OneKhDe.py b/pyload/plugins/crypter/OneKhDe.py
new file mode 100644
index 000000000..c77203187
--- /dev/null
+++ b/pyload/plugins/crypter/OneKhDe.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.unescape import unescape
+from module.plugins.Crypter import Crypter
+
+class OneKhDe(Crypter):
+ __name__ = "OneKhDe"
+ __type__ = "container"
+ __pattern__ = r"http://(www\.)?1kh.de/f/"
+ __version__ = "0.1"
+ __description__ = """1kh.de Container Plugin"""
+ __author_name__ = ("spoob")
+ __author_mail__ = ("spoob@pyload.org")
+
+ def __init__(self, parent):
+ Crypter.__init__(self, parent)
+ self.parent = parent
+ self.html = None
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ return True
+
+ def proceed(self, url, location):
+ url = self.parent.url
+ self.html = self.req.load(url)
+ temp_links = []
+ link_ids = re.findall(r"<a id=\"DownloadLink_(\d*)\" href=\"http://1kh.de/", self.html)
+ for id in link_ids:
+ new_link = unescape(re.search("width=\"100%\" src=\"(.*)\"></iframe>", self.req.load("http://1kh.de/l/" + id)).group(1))
+ temp_links.append(new_link)
+ self.links = temp_links
diff --git a/pyload/plugins/crypter/OronComFolder.py b/pyload/plugins/crypter/OronComFolder.py
new file mode 100755
index 000000000..726371966
--- /dev/null
+++ b/pyload/plugins/crypter/OronComFolder.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class OronComFolder(Crypter):
+ __name__ = "OronComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?oron.com/folder/\w+"
+ __version__ = "0.2"
+ __description__ = """Oron.com Folder Plugin"""
+ __author_name__ = ("DHMH")
+ __author_mail__ = ("webmaster@pcProfil.de")
+
+ FOLDER_PATTERN = r'<table(?:.*)class="tbl"(?:.*)>(?:.*)<table(?:.*)class="tbl2"(?:.*)>(?P<body>.*)</table>(?:.*)</table>'
+ LINK_PATTERN = r'<a href="([^"]+)" target="_blank">'
+
+ def decryptURL(self, url):
+ html = self.load(url)
+
+ new_links = []
+
+ if 'No such folder exist' in html:
+ # Don't fail because if there's more than a folder for this package
+ # and only one of them fails, no urls at all will be added.
+ self.logWarning("Folder does not exist")
+ return new_links
+
+ folder = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if folder is None:
+ # Don't fail because if there's more than a folder for this package
+ # and only one of them fails, no urls at all will be added.
+ self.logWarning("Parse error (FOLDER)")
+ return new_links
+
+ new_links.extend(re.findall(self.LINK_PATTERN, folder.group(0)))
+
+ if new_links:
+ self.logDebug("Found %d new links" % len(new_links))
+ return new_links
+ else:
+ # Don't fail because if there's more than a folder for this package
+ # and only one of them fails, no urls at all will be added.
+ self.logWarning('Could not extract any links')
+ return new_links
diff --git a/pyload/plugins/crypter/QuickshareCzFolder.py b/pyload/plugins/crypter/QuickshareCzFolder.py
new file mode 100644
index 000000000..6cb049935
--- /dev/null
+++ b/pyload/plugins/crypter/QuickshareCzFolder.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class QuickshareCzFolder(Crypter):
+ __name__ = "QuickshareCzFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?quickshare.cz/slozka-\d+.*"
+ __version__ = "0.1"
+ __description__ = """Quickshare.cz Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+
+ FOLDER_PATTERN = r'<textarea[^>]*>(.*?)</textarea>'
+ LINK_PATTERN = r'(http://www.quickshare.cz/\S+)'
+
+ def decrypt(self, pyfile):
+ html = self.load(self.pyfile.url)
+
+ new_links = []
+ found = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if found is None: self.fail("Parse error (FOLDER)")
+ new_links.extend(re.findall(self.LINK_PATTERN, found.group(1)))
+
+ if new_links:
+ self.core.files.addLinks(new_links, self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/RSDF.py b/pyload/plugins/crypter/RSDF.py
new file mode 100644
index 000000000..cbc9864b1
--- /dev/null
+++ b/pyload/plugins/crypter/RSDF.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from module.plugins.Crypter import Crypter
+
+class RSDF(Crypter):
+ __name__ = "RSDF"
+ __version__ = "0.21"
+ __pattern__ = r".*\.rsdf"
+ __description__ = """RSDF Container Decode Plugin"""
+ __author_name__ = ("RaNaN", "spoob")
+ __author_mail__ = ("RaNaN@pyload.org", "spoob@pyload.org")
+
+
+ def decrypt(self, pyfile):
+
+ from Crypto.Cipher import AES
+
+ infile = pyfile.url.replace("\n", "")
+ Key = binascii.unhexlify('8C35192D964DC3182C6F84F3252239EB4A320D2500000000')
+
+ IV = binascii.unhexlify('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')
+ IV_Cipher = AES.new(Key, AES.MODE_ECB)
+ IV = IV_Cipher.encrypt(IV)
+
+ obj = AES.new(Key, AES.MODE_CFB, IV)
+
+ rsdf = open(infile, 'r')
+
+ data = rsdf.read()
+ rsdf.close()
+
+ if re.search(r"<title>404 - Not Found</title>", data) is None:
+ data = binascii.unhexlify(''.join(data.split()))
+ data = data.splitlines()
+
+ links = []
+ for link in data:
+ link = base64.b64decode(link)
+ link = obj.decrypt(link)
+ decryptedUrl = link.replace('CCF: ', '')
+ links.append(decryptedUrl)
+
+ self.log.debug("%s: adding package %s with %d links" % (self.__name__,pyfile.package().name,len(links)))
+ self.packages.append((pyfile.package().name, links))
diff --git a/pyload/plugins/crypter/RSLayerCom.py b/pyload/plugins/crypter/RSLayerCom.py
new file mode 100644
index 000000000..6e4266f2e
--- /dev/null
+++ b/pyload/plugins/crypter/RSLayerCom.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+from module.lib.BeautifulSoup import BeautifulSoup
+from module.unescape import unescape
+
+class RSLayerCom(Crypter):
+ __name__ = "RSLayerCom"
+ __type__ = "container"
+ __pattern__ = r"http://(www\.)?rs-layer.com/directory-"
+ __config__ = []
+ __version__ = "0.2"
+ __description__ = """RS-Layer.com Container Plugin"""
+ __author_name__ = ("hzpz")
+ __author_mail__ = ("none")
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+ src = self.req.load(str(url))
+
+ soup = BeautifulSoup(src)
+ captchaTag = soup.find("img", attrs={"id": "captcha_image"})
+ if captchaTag:
+ captchaUrl = "http://rs-layer.com/" + captchaTag["src"]
+ self.logDebug("Captcha URL: %s" % captchaUrl)
+ result = self.decryptCaptcha(str(captchaUrl), imgtype="png")
+ captchaInput = soup.find("input", attrs={"id": "captcha"})
+ self.req.lastUrl = url
+ src = self.req.load(str(url), post={'captcha_input': result, 'image_name': captchaTag["src"]})
+
+ link_ids = re.findall(r"onclick=\"getFile\(\'([0-9]{7}-.{8})\'\);changeBackgroundColor", src)
+
+ if not len(link_ids) > 0:
+ self.retry()
+
+ self.correctCaptcha()
+
+ links = []
+ for id in link_ids:
+ self.logDebug("ID: %s" % id)
+ new_link = unescape(re.search(r"<iframe style=\"width: 100%; height: 100%;\" src=\"(.*)\"></frame>",
+ self.req.load("http://rs-layer.com/link-" + id + ".html")).group(1))
+ self.logDebug("Link: %s" % new_link)
+ links.append(new_link)
+
+ self.packages.append((self.pyfile.package().name, links, self.pyfile.package().folder))
diff --git a/pyload/plugins/crypter/RelinkUs.py b/pyload/plugins/crypter/RelinkUs.py
new file mode 100644
index 000000000..8f29a9158
--- /dev/null
+++ b/pyload/plugins/crypter/RelinkUs.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+
+from Crypto.Cipher import AES
+from module.plugins.Crypter import Crypter
+import base64
+import binascii
+import re
+import os
+
+
+class RelinkUs(Crypter):
+ __name__ = "RelinkUs"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?relink.us/(f/|((view|go).php\?id=))(?P<id>.+)"
+ __version__ = "3.0"
+ __description__ = """Relink.us Crypter Plugin"""
+ __author_name__ = ("fragonib")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es")
+
+ # Constants
+ PREFERRED_LINK_SOURCES = ['cnl2', 'dlc', 'web']
+
+ OFFLINE_TOKEN = "<title>Tattooside"
+ PASSWORD_TOKEN = "container_password.php"
+ PASSWORD_ERROR_ROKEN = "You have entered an incorrect password"
+ PASSWORD_SUBMIT_URL = "http://www.relink.us/container_password.php"
+ CAPTCHA_TOKEN = "container_captcha.php"
+ CAPTCHA_ERROR_ROKEN = "You have solved the captcha wrong"
+ CAPTCHA_IMG_URL = "http://www.relink.us/core/captcha/circlecaptcha.php"
+ CAPTCHA_SUBMIT_URL = "http://www.relink.us/container_captcha.php"
+ FILE_TITLE_REGEX = r"<th>Title</th><td><i>(.*)</i></td></tr>"
+ FILE_NOTITLE = 'No title'
+
+ CNL2_FORM_REGEX = r'<form id="cnl_form-(.*?)</form>'
+ CNL2_FORMINPUT_REGEX = r'<input.*?name="%s".*?value="(.*?)"'
+ CNL2_JK_KEY = "jk"
+ CNL2_CRYPTED_KEY = "crypted"
+ DLC_LINK_REGEX = r'<a href=".*?" class="dlc_button" target="_blank">'
+ DLC_DOWNLOAD_URL = "http://www.relink.us/download.php"
+ WEB_FORWARD_REGEX = r"getFile\('(?P<link>.+)'\)";
+ WEB_FORWARD_URL = "http://www.relink.us/frame.php"
+ WEB_LINK_REGEX = r'<iframe name="Container" height="100%" frameborder="no" width="100%" src="(?P<link>.+)"></iframe>'
+
+
+ def setup(self):
+ self.fileid = None
+ self.package = None
+ self.password = None
+ self.html = None
+ self.captcha = False
+
+ def decrypt(self, pyfile):
+
+ # Init
+ self.initPackage(pyfile)
+
+ # Request package
+ self.requestPackage()
+
+ # Check for online
+ if not self.isOnline():
+ self.offline()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Get package name and folder
+ (package_name, folder_name) = self.getPackageInfo()
+
+ # Extract package links
+ package_links = []
+ for sources in self.PREFERRED_LINK_SOURCES:
+ package_links.extend(self.handleLinkSource(sources))
+ if package_links: # use only first source which provides links
+ break
+ package_links = set(package_links)
+
+ # Pack
+ if package_links:
+ self.packages = [(package_name, package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
+
+ def initPackage(self, pyfile):
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+ self.package = pyfile.package()
+ self.password = self.getPassword()
+ self.url = pyfile.url
+
+ def requestPackage(self):
+ self.html = self.load(self.url, decode = True)
+
+ def isOnline(self):
+ if self.OFFLINE_TOKEN in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isPasswordProtected(self):
+ if self.PASSWORD_TOKEN in self.html:
+ self.logDebug("Links are password protected")
+ return True
+
+ def isCaptchaProtected(self):
+ if self.CAPTCHA_TOKEN in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+ def unlockPasswordProtection(self):
+ self.logDebug("Submitting password [%s] for protected links" % self.password)
+ passwd_url = self.PASSWORD_SUBMIT_URL + "?id=%s" % self.fileid
+ passwd_data = { 'id': self.fileid, 'password': self.password, 'pw': 'submit' }
+ self.html = self.load(passwd_url, post=passwd_data, decode=True)
+
+ def unlockCaptchaProtection(self):
+ self.logDebug("Request user positional captcha resolving")
+ captcha_img_url = self.CAPTCHA_IMG_URL + "?id=%s" % self.fileid
+ coords = self.decryptCaptcha(captcha_img_url, forceUser=True, imgtype="png", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+ captcha_post_url = self.CAPTCHA_SUBMIT_URL + "?id=%s" % self.fileid
+ captcha_post_data = { 'button.x': coords[0], 'button.y': coords[1], 'captcha': 'submit' }
+ self.html = self.load(captcha_post_url, post=captcha_post_data, decode=True)
+
+ def getPackageInfo(self):
+ name = folder = None
+
+ # Try to get info from web
+ m = re.search(self.FILE_TITLE_REGEX, self.html)
+ if m is not None:
+ title = m.group(1).strip()
+ if not self.FILE_NOTITLE in title:
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+
+ # Fallback to defaults
+ if not name or not folder:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+
+ # Return package info
+ return name, folder
+
+ def handleErrors(self):
+ if self.PASSWORD_ERROR_ROKEN in self.html:
+ msg = "Incorrect password, please set right password on 'Edit package' form and retry"
+ self.logDebug(msg)
+ self.fail(msg)
+
+ if self.captcha:
+ if self.CAPTCHA_ERROR_ROKEN in self.html:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleLinkSource(self, source):
+ if source == 'cnl2':
+ return self.handleCNL2Links()
+ elif source == 'dlc':
+ return self.handleDLCLinks()
+ elif source == 'web':
+ return self.handleWEBLinks()
+ else:
+ self.fail('Unknown source [%s] (this is probably a bug)' % source)
+
+ def handleCNL2Links(self):
+ self.logDebug("Search for CNL2 links")
+ package_links = []
+ m = re.search(self.CNL2_FORM_REGEX, self.html, re.DOTALL)
+ if m is not None:
+ cnl2_form = m.group(1)
+ try:
+ (vcrypted, vjk) = self._getCipherParams(cnl2_form)
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.logDebug("Unable to decrypt CNL2 links")
+ return package_links
+
+ def handleDLCLinks(self):
+ self.logDebug('Search for DLC links')
+ package_links = []
+ m = re.search(self.DLC_LINK_REGEX, self.html)
+ if m is not None:
+ container_url = self.DLC_DOWNLOAD_URL + "?id=%s&dlc=1" % self.fileid
+ self.logDebug("Downloading DLC container link [%s]" % container_url)
+ try:
+ dlc = self.load(container_url)
+ dlc_filename = self.fileid + ".dlc"
+ dlc_filepath = os.path.join(self.config["general"]["download_folder"], dlc_filename)
+ f = open(dlc_filepath, "wb")
+ f.write(dlc)
+ f.close()
+ package_links.append(dlc_filepath)
+ except:
+ self.logDebug("Unable to download DLC container")
+ return package_links
+
+ def handleWEBLinks(self):
+ self.logDebug("Search for WEB links")
+ package_links = []
+ fw_params = re.findall(self.WEB_FORWARD_REGEX, self.html)
+ self.logDebug("Decrypting %d Web links" % len(fw_params))
+ for index, fw_param in enumerate(fw_params):
+ try:
+ fw_url = self.WEB_FORWARD_URL + "?%s" % fw_param
+ self.logDebug("Decrypting Web link %d, %s" % (index+1, fw_url))
+ fw_response = self.load(fw_url, decode=True)
+ dl_link = re.search(self.WEB_LINK_REGEX, fw_response).group('link')
+ package_links.append(dl_link)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link %s, %s" % (index, detail))
+ self.setWait(4)
+ self.wait()
+ return package_links
+
+ def _getCipherParams(self, cnl2_form):
+
+ # Get jk
+ jk_re = self.CNL2_FORMINPUT_REGEX % self.CNL2_JK_KEY
+ vjk = re.findall(jk_re, cnl2_form, re.IGNORECASE)
+
+ # Get crypted
+ crypted_re = self.CNL2_FORMINPUT_REGEX % RelinkUs.CNL2_CRYPTED_KEY
+ vcrypted = re.findall(crypted_re, cnl2_form, re.IGNORECASE)
+
+ # Log and return
+ self.logDebug("Detected %d crypted blocks" % len(vcrypted))
+ return vcrypted, vjk
+
+ def _getLinks(self, crypted, jk):
+
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Package has %d links" % len(links))
+ return links
diff --git a/pyload/plugins/crypter/SecuredIn.py b/pyload/plugins/crypter/SecuredIn.py
new file mode 100644
index 000000000..e41896c5f
--- /dev/null
+++ b/pyload/plugins/crypter/SecuredIn.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+from module.lib.BeautifulSoup import BeautifulSoup
+
+from math import ceil
+
+class SecuredIn(Crypter):
+ __name__ = "SecuredIn"
+ __type__ = "container"
+ __pattern__ = r"http://[\w\.]*?secured\.in/download-[\d]+-[\w]{8}\.html"
+ __version__ = "0.1"
+ __description__ = """secured.in Container Plugin"""
+ __author_name__ = ("mkaay")
+ __author_mail__ = ("mkaay@mkaay.de")
+
+ def __init__(self, parent):
+ Crypter.__init__(self, parent)
+ self.parent = parent
+ self.html = None
+ self.multi_dl = False
+
+ def file_exists(self):
+ return True
+
+ def proceed(self, url, location):
+ links = []
+ ajaxUrl = "http://secured.in/ajax-handler.php"
+ src = self.req.load(url, cookies=True)
+ soup = BeautifulSoup(src)
+ img = soup.find("img", attrs={"id":"captcha_img"})
+ for i in range(3):
+ form = soup.find("form", attrs={"id":"frm_captcha"})
+ captchaHash = form.find("input", attrs={"id":"captcha_hash"})["value"]
+ captchaUrl = "http://secured.in/%s" % img["src"]
+ captchaData = self.req.load(str(captchaUrl), cookies=True)
+ result = self.waitForCaptcha(captchaData, "jpg")
+ src = self.req.load(url, cookies=True, post={"captcha_key":result, "captcha_hash":captchaHash})
+ soup = BeautifulSoup(src)
+ img = soup.find("img", attrs={"id":"captcha_img"})
+ if not img:
+ files = soup.findAll("tr", attrs={"id":re.compile("file-\d+")})
+ dlIDPattern = re.compile("accessDownload\(\d, \d+, '(.*?)', \d\)")
+ cypher = self.Cypher()
+ for cfile in files:
+ m = dlIDPattern.search(cfile["onclick"])
+ if m:
+ crypted = self.req.load(ajaxUrl, cookies=True, post={"cmd":"download", "download_id":m.group(1)})
+ cypher.reset()
+ link = cypher.cypher(crypted)
+ links.append(link)
+ break
+ self.links = links
+
+ class Cypher():
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.iatwbfrd = [
+ 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2,
+ 0x858efc16, 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5,
+ 0x9c30d539, 0x2af26013, 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1,
+ 0xbd314b27, 0x78af2fda, 0x55605c60, 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
+ 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, 0x7a325381,
+ 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
+ 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842,
+ 0xf6e96c9a, 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
+ 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, 0xe06f75d8, 0x85c12073, 0x401a449f,
+ 0x56c16aa6, 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, 0x075372c9, 0x80991b7b,
+ 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, 0x68fb6faf,
+ 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+ 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8,
+ 0xdb3222f8, 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0,
+ 0x1a87562e, 0xdf1769db, 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98,
+ 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
+ 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f,
+ 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
+ 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266,
+ 0x80957705, 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
+ 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, 0x78c14389, 0xd95a537f, 0x207d5ba2,
+ 0x02e5b9c5, 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, 0xd60f573f, 0xbc9bc6e4,
+ 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, 0x53b02d5d,
+ 0xa99f8fa1, 0x08ba4799, 0x6e85076a
+ ]
+
+ self.olkemfjq = [
+ 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf,
+ 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b
+ ]
+
+ self.oqlaoymh = 0
+ self.oqmykrna = 0
+ self.pqmyzkid = 0
+ self.pldmjnde = 0
+ self.ldiwkqly = 0
+
+ self.plkodnyq = [
+ 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e,
+ 0xc700c47b, 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d,
+ 0xd5730a1d, 0x4cd04dc6, 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8,
+ 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
+ 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9,
+ 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
+ 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4,
+ 0x88f46dba, 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
+ 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, 0x4de81751, 0x3830dc8e, 0x379d5862,
+ 0x9320f991, 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, 0xa2ae0810, 0xdd6db224,
+ 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, 0xdda26a7e,
+ 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+ 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4,
+ 0xce6ea048, 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b,
+ 0x2f32c9b7, 0xa01fbac9, 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, 0x0339c32a,
+ 0xc6913667, 0x8df9317c, 0xe0b12b4f, 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
+ 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be,
+ 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
+ 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357,
+ 0xa6327623, 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
+ 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, 0x71126905, 0xb2040222, 0xb6cbcf7c,
+ 0xcd769c2b, 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, 0x85cbfe4e, 0x8ae88dd8,
+ 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, 0xb74e6132,
+ 0xce77e25b, 0x578fdfe3, 0x3ac372e6
+ ]
+
+ self.pnjzokye = None
+
+ self.thdlpsmy = [
+ 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7,
+ 0x500061af, 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785,
+ 0x7fac6dd0, 0x31cb8504, 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, 0x68dc1462,
+ 0xd7486900, 0x680ec0a4, 0x27a18dee, 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
+ 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, 0x6841e7f7,
+ 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
+ 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548,
+ 0xe4c66d22, 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
+ 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, 0x6b2395e0, 0x333e92e1, 0x3b240b62,
+ 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, 0x5449a36f, 0x877d48fa,
+ 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, 0xc67b5510,
+ 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+ 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc,
+ 0x782ef11c, 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386,
+ 0xd90cec6e, 0xd5abea2a, 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, 0xd1fd8346,
+ 0xf6381fb0, 0x7745ae04, 0xd736fccc, 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
+ 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, 0x846a0e79,
+ 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
+ 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83,
+ 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
+ 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, 0xbbcbee56, 0x90bcb6de, 0xebfc7da1,
+ 0xce591d76, 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, 0xed545578, 0x08fca5b5,
+ 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, 0xd79a3234,
+ 0x92638212, 0x670efa8e, 0x406000e0
+ ]
+
+ self.ybghjtik = [
+ 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c,
+ 0xc2b19ee1, 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5,
+ 0x4d2d38e6, 0xf0255dc1, 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, 0x687f3584,
+ 0x52a0e286, 0xb79c5305, 0xaa500737, 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
+ 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, 0xc8b57634,
+ 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
+ 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f,
+ 0x2e6b7124, 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
+ 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, 0x71dff89e, 0x10314e55, 0x81ac77d6,
+ 0x5f11199b, 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, 0x86e34570, 0xeae96fb1,
+ 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, 0xc6150eba,
+ 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+ 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad,
+ 0x5b6e2f84, 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239,
+ 0xd59e9e0b, 0xcbaade14, 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, 0x40685a32,
+ 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
+ 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, 0x8fd948e4,
+ 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
+ 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2,
+ 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
+ 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, 0x7cde3759, 0xcbee7460, 0x4085f2a7,
+ 0xce77326e, 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, 0x9e447a2e, 0xc3453484,
+ 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, 0x153e21e7,
+ 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7
+ ]
+
+ def cypher(self, code):
+ return self.lskdqpyr(code, "")
+
+ def lskdqpyr(self, alokfmth, yoaksjdh):
+ if self.pnjzokye is None or self.pnjzokye.lower() == yoaksjdh:
+ self.yoliukev(yoaksjdh)
+ self.pnjzokye = yoaksjdh
+ alokfmth = self.plaomtje(alokfmth)
+ ykaiumgp = ""
+ alokijuh = len(alokfmth)
+ lokimyas = self.ylomiktb(alokfmth[0:8])
+ palsiuzt = lokimyas[0]
+ tzghbndf = lokimyas[1]
+ awsedrft = [None, None]
+ for kiujzhqa in range(8, alokijuh, 8):
+ lokimyas = self.ylomiktb(alokfmth[kiujzhqa:kiujzhqa+8])
+ awsedrft[0] = lokimyas[0]
+ awsedrft[1] = lokimyas[1]
+ lokimyas = self.okaqnhlp(lokimyas[0], lokimyas[1])
+ lokimyas[0] ^= palsiuzt
+ lokimyas[1] ^= tzghbndf
+ palsiuzt = awsedrft[0]
+ tzghbndf = awsedrft[1]
+ ykaiumgp += self.ykijmtkd(lokimyas)
+ return ykaiumgp
+
+ def okaqnhlp(self, lahgrnvp, trenlpys):
+ ujhaqylw = 0
+ for yalmhopr in range(17, 1, -1):
+ lahgrnvp ^= self.ldiwkqly[yalmhopr]
+ trenlpys ^= (self.oqlaoymh[lahgrnvp >> 24 & 0xff] + self.oqmykrna[lahgrnvp >> 16 & 0xff] ^ self.pqmyzkid[lahgrnvp >> 8 & 0xff]) + self.pldmjnde[lahgrnvp & 0xff]
+ ujhaqylw = lahgrnvp
+ lahgrnvp = trenlpys
+ trenlpys = ujhaqylw
+ ujhaqylw = lahgrnvp
+ lahgrnvp = trenlpys
+ trenlpys = ujhaqylw
+ trenlpys ^= self.ldiwkqly[1]
+ lahgrnvp ^= self.ldiwkqly[0]
+ return [lahgrnvp, trenlpys]
+
+ def plaomtje(self, yoiumqpy):
+ qkailkzt = ""
+ xoliuzem = 0
+ lyomiujt = 0
+ yploemju = -1
+ for i in range(0, len(yoiumqpy)):
+ yploamzu = ord(yoiumqpy[i])
+ if ord('A') <= yploamzu <= ord('Z'):
+ xoliuzem = ord(yoiumqpy[i]) - 65
+ elif ord('a') <= yploamzu <= ord('z'):
+ xoliuzem = ord(yoiumqpy[i]) - 97 + 26
+ elif ord('0') <= yploamzu <= ord('9'):
+ xoliuzem = ord(yoiumqpy[i]) - 48 + 52
+ elif yploamzu == ord('+'):
+ xoliuzem = 62
+ elif yploamzu == ord('/'):
+ xoliuzem = 63
+ else:
+ continue
+ yploemju += 1
+
+ lxkdmizj = 0
+ switch = yploemju % 4
+ if switch == 0:
+ lyomiujt = xoliuzem
+ continue
+ elif switch == 1:
+ lxkdmizj = lyomiujt << 2 | xoliuzem >> 4
+ lyomiujt = xoliuzem & 0x0F
+ elif switch == 2:
+ lxkdmizj = lyomiujt << 4 | xoliuzem >> 2
+ lyomiujt = xoliuzem & 0x03
+ elif switch == 3:
+ lxkdmizj = lyomiujt << 6 | xoliuzem >> 0
+ lyomiujt = xoliuzem & 0x00
+ qkailkzt += unichr(lxkdmizj)
+ return qkailkzt
+
+ def qmyjuila(self, oqlamykt, yalkionj):
+ dolizmvw = 0
+ for iumswkya in range(0, 16):
+ oqlamykt ^= self.ldiwkqly[iumswkya]
+ yalkionj ^= (self.oqlaoymh[oqlamykt >> 24 & 0xff] + self.oqmykrna[oqlamykt >> 16 & 0xff] ^ self.pqmyzkid[oqlamykt >> 8 & 0xff]) + self.pldmjnde[oqlamykt & 0xff]
+ dolizmvw = oqlamykt
+ oqlamykt = yalkionj
+ yalkionj = dolizmvw
+ dolizmvw = oqlamykt
+ oqlamykt = yalkionj
+ yalkionj = dolizmvw
+ yalkionj ^= self.ldiwkqly[16]
+ oqlamykt ^= self.ldiwkqly[17]
+ return [oqlamykt, yalkionj]
+
+ def ykijmtkd(self, yoirlkqw):
+ loipamyu = len(yoirlkqw)
+ yoirlkqwchar = []
+ for ymujtnbq in range(0, loipamyu):
+ yoir = [yoirlkqw[ymujtnbq] >> 24 & 0xff, yoirlkqw[ymujtnbq] >> 16 & 0xff, yoirlkqw[ymujtnbq] >> 8 & 0xff, yoirlkqw[ymujtnbq] & 0xff]
+ for c in yoir:
+ yoirlkqwchar.append(chr(c))
+ return "".join(yoirlkqwchar)
+
+ def ylomiktb(self, lofiuzmq):
+ plokimqw = int(ceil(len(lofiuzmq) / 4.0))
+ lopkisdq = []
+ for ypoqlktz in range(0, plokimqw):
+ lopkisdq.append(ord(lofiuzmq[(ypoqlktz << 2) + 3]) + (ord(lofiuzmq[(ypoqlktz << 2) + 2]) << 8) + (ord(lofiuzmq[(ypoqlktz << 2) + 1]) << 16) + (ord(lofiuzmq[(ypoqlktz << 2)]) << 24))
+ return lopkisdq
+
+ def yoliukev(self, kaiumylq):
+ self.oqlaoymh = self.iatwbfrd
+ self.oqmykrna = self.ybghjtik
+ self.pqmyzkid = self.thdlpsmy
+ self.pldmjnde = self.plkodnyq
+
+ yaqpolft = [0 for i in range(len(kaiumylq))]
+
+ yaqwsedr = 0
+ btzqwsay = 0
+ while yaqwsedr < len(kaiumylq):
+ wlqoakmy = 0
+ for lopiuztr in range(0, 4):
+ wlqoakmy = wlqoakmy << 8 | ord(kaiumylq[yaqwsedr % len(kaiumylq)])
+ yaqwsedr += 1
+ yaqpolft[btzqwsay] = wlqoakmy
+ btzqwsay += 1
+ self.ldiwkqly = []
+ for btzqwsay in range(0, 18):
+ self.ldiwkqly.append(self.olkemfjq[btzqwsay])
+ yalopiuq = [0, 0]
+ for btzqwsay in range(0, 18, 2):
+ yalopiuq = self.qmyjuila(yalopiuq[0], yalopiuq[1])
+ self.ldiwkqly[btzqwsay] = yalopiuq[0]
+ self.ldiwkqly[btzqwsay + 1] = yalopiuq[1]
+ for btzqwsay in range(0, 256, 2):
+ yalopiuq = self.qmyjuila(yalopiuq[0], yalopiuq[1])
+ self.oqlaoymh[btzqwsay] = yalopiuq[0]
+ self.oqlaoymh[btzqwsay + 1] = yalopiuq[1]
+ for btzqwsay in range(0, 256, 2):
+ yalopiuq = self.qmyjuila(yalopiuq[0], yalopiuq[1])
+ self.oqmykrna[btzqwsay] = yalopiuq[0]
+ self.oqmykrna[btzqwsay + 1] = yalopiuq[1]
+ for btzqwsay in range(0, 256, 2):
+ yalopiuq = self.qmyjuila(yalopiuq[0], yalopiuq[1])
+ self.pqmyzkid[btzqwsay] = yalopiuq[0]
+ self.pqmyzkid[btzqwsay + 1] = yalopiuq[1]
+ for btzqwsay in range(0, 256, 2):
+ yalopiuq = self.qmyjuila(yalopiuq[0], yalopiuq[1])
+ self.pldmjnde[btzqwsay] = yalopiuq[0]
+ self.pldmjnde[btzqwsay + 1] = yalopiuq[1]
+
diff --git a/pyload/plugins/crypter/SerienjunkiesOrg.py b/pyload/plugins/crypter/SerienjunkiesOrg.py
new file mode 100644
index 000000000..89855ad67
--- /dev/null
+++ b/pyload/plugins/crypter/SerienjunkiesOrg.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+
+import re
+from time import sleep
+import random
+
+from BeautifulSoup import BeautifulSoup
+
+from module.plugins.Crypter import Crypter
+from module.unescape import unescape
+
+class SerienjunkiesOrg(Crypter):
+ __name__ = "SerienjunkiesOrg"
+ __type__ = "container"
+ __pattern__ = r"http://.*?(serienjunkies.org|dokujunkies.org)/.*?"
+ __version__ = "0.38"
+ __config__ = [
+ ("changeNameSJ", "Packagename;Show;Season;Format;Episode", "Take SJ.org name", "Show"),
+ ("changeNameDJ", "Packagename;Show;Format;Episode", "Take DJ.org name", "Show"),
+ ("randomPreferred", "bool", "Randomize Preferred-List", False),
+ ("hosterListMode", "OnlyOne;OnlyPreferred(One);OnlyPreferred(All);All", "Use for hosters (if supported)", "All"),
+ ("hosterList", "str", "Preferred Hoster list (comma separated)", "RapidshareCom,UploadedTo,NetloadIn,FilefactoryCom,FreakshareNet,FilebaseTo,HotfileCom,DepositfilesCom,EasyshareCom,KickloadCom"),
+ ("ignoreList", "str", "Ignored Hoster list (comma separated)", "MegauploadCom")
+ ]
+ __description__ = """serienjunkies.org Container Plugin"""
+ __author_name__ = ("mkaay", "godofdream")
+ __author_mail__ = ("mkaay@mkaay.de", "soilfiction@gmail.com")
+
+
+ def setup(self):
+ self.multiDL = False
+
+ def getSJSrc(self, url):
+ src = self.req.load(str(url))
+ if "This website is not available in your country" in src:
+ self.fail("Not available in your country")
+ if not src.find("Enter Serienjunkies") == -1:
+ sleep(1)
+ src = self.req.load(str(url))
+ return src
+
+ def handleShow(self, url):
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ packageName = self.pyfile.package().name
+ if self.getConfig("changeNameSJ") == "Show":
+ found = unescape(soup.find("h2").find("a").string.split(' &#8211;')[0])
+ if found:
+ packageName = found
+
+ nav = soup.find("div", attrs={"id": "scb"})
+
+ package_links = []
+ for a in nav.findAll("a"):
+ if self.getConfig("changeNameSJ") == "Show":
+ package_links.append(a["href"])
+ else:
+ package_links.append(a["href"] + "#hasName")
+ if self.getConfig("changeNameSJ") == "Show":
+ self.packages.append((packageName, package_links, packageName))
+ else:
+ self.core.files.addLinks(package_links, self.pyfile.package().id)
+
+
+ def handleSeason(self, url):
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ post = soup.find("div", attrs={"class": "post-content"})
+ ps = post.findAll("p")
+
+ seasonName = unescape(soup.find("a", attrs={"rel": "bookmark"}).string).replace("&#8211;", "-")
+ groups = {}
+ gid = -1
+ for p in ps:
+ if re.search("<strong>Sprache|<strong>Format", str(p)):
+ var = p.findAll("strong")
+ opts = {"Sprache": "", "Format": ""}
+ for v in var:
+ n = unescape(v.string).strip()
+ n = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', n)
+ if n.strip() not in opts:
+ continue
+ val = v.nextSibling
+ if not val:
+ continue
+ val = val.replace("|", "").strip()
+ val = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', val)
+ opts[n.strip()] = val.strip()
+ gid += 1
+ groups[gid] = {}
+ groups[gid]["ep"] = {}
+ groups[gid]["opts"] = opts
+ elif re.search("<strong>Download:", str(p)):
+ parts = str(p).split("<br />")
+ if re.search("<strong>", parts[0]):
+ ename = re.search('<strong>(.*?)</strong>',parts[0]).group(1).strip().decode("utf-8").replace("&#8211;", "-")
+ groups[gid]["ep"][ename] = {}
+ parts.remove(parts[0])
+ for part in parts:
+ hostername = re.search(" \| ([-a-zA-Z0-9]+\.\w+)",part)
+ if hostername:
+ hostername = hostername.group(1)
+ groups[gid]["ep"][ename][hostername] = []
+ links = re.findall('href="(.*?)"',part)
+ for link in links:
+ groups[gid]["ep"][ename][hostername].append(link + "#hasName")
+
+ links = []
+ for g in groups.values():
+ for ename in g["ep"]:
+ links.extend(self.getpreferred(g["ep"][ename]))
+ if self.getConfig("changeNameSJ") == "Episode":
+ self.packages.append((ename, links, ename))
+ links = []
+ package = "%s (%s, %s)" % (seasonName, g["opts"]["Format"], g["opts"]["Sprache"])
+ if self.getConfig("changeNameSJ") == "Format":
+ self.packages.append((package, links, package))
+ links = []
+ if (self.getConfig("changeNameSJ") == "Packagename") or re.search("#hasName", url):
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ elif (self.getConfig("changeNameSJ") == "Season") or not re.search("#hasName", url):
+ self.packages.append((seasonName, links, seasonName))
+
+ def handleEpisode(self, url):
+ src = self.getSJSrc(url)
+ if not src.find(
+ "Du hast das Download-Limit &uuml;berschritten! Bitte versuche es sp&auml;ter nocheinmal.") == -1:
+ self.fail(_("Downloadlimit reached"))
+ else:
+ soup = BeautifulSoup(src)
+ form = soup.find("form")
+ h1 = soup.find("h1")
+
+ if h1.get("class") == "wrap":
+ captchaTag = soup.find(attrs={"src": re.compile("^/secure/")})
+ if not captchaTag:
+ sleep(5)
+ self.retry()
+
+ captchaUrl = "http://download.serienjunkies.org" + captchaTag["src"]
+ result = self.decryptCaptcha(str(captchaUrl), imgtype="png")
+ sinp = form.find(attrs={"name": "s"})
+
+ self.req.lastURL = str(url)
+ sj = self.load(str(url), post={'s': sinp["value"], 'c': result, 'action': "Download"})
+
+ soup = BeautifulSoup(sj)
+ rawLinks = soup.findAll(attrs={"action": re.compile("^http://download.serienjunkies.org/")})
+
+ if not len(rawLinks) > 0:
+ sleep(1)
+ self.retry()
+ return
+
+ self.correctCaptcha()
+
+ links = []
+ for link in rawLinks:
+ frameUrl = link["action"].replace("/go-", "/frame/go-")
+ links.append(self.handleFrame(frameUrl))
+ if re.search("#hasName", url) or ((self.getConfig("changeNameSJ") == "Packagename") and (self.getConfig("changeNameDJ") == "Packagename")):
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ else:
+ if h1.text[2] == "_":
+ eName = h1.text[3:]
+ else:
+ eName = h1.text
+ self.packages.append((eName, links, eName))
+
+
+ def handleOldStyleLink(self, url):
+ sj = self.req.load(str(url))
+ soup = BeautifulSoup(sj)
+ form = soup.find("form", attrs={"action": re.compile("^http://serienjunkies.org")})
+ captchaTag = form.find(attrs={"src": re.compile("^/safe/secure/")})
+ captchaUrl = "http://serienjunkies.org" + captchaTag["src"]
+ result = self.decryptCaptcha(str(captchaUrl))
+ url = form["action"]
+ sinp = form.find(attrs={"name": "s"})
+
+ self.req.load(str(url), post={'s': sinp["value"], 'c': result, 'dl.start': "Download"}, cookies=False,
+ just_header=True)
+ decrypted = self.req.lastEffectiveURL
+ if decrypted == str(url):
+ self.retry()
+ self.core.files.addLinks([decrypted], self.pyfile.package().id)
+
+ def handleFrame(self, url):
+ self.req.load(str(url))
+ return self.req.lastEffectiveURL
+
+ def handleShowDJ(self, url):
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ post = soup.find("div", attrs={"id": "page_post"})
+ ps = post.findAll("p")
+ found = unescape(soup.find("h2").find("a").string.split(' &#8211;')[0])
+ if found:
+ seasonName = found
+
+ groups = {}
+ gid = -1
+ for p in ps:
+ if re.search("<strong>Sprache|<strong>Format", str(p)):
+ var = p.findAll("strong")
+ opts = {"Sprache": "", "Format": ""}
+ for v in var:
+ n = unescape(v.string).strip()
+ n = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', n)
+ if n.strip() not in opts:
+ continue
+ val = v.nextSibling
+ if not val:
+ continue
+ val = val.replace("|", "").strip()
+ val = re.sub(r"^([:]?)(.*?)([:]?)$", r'\2', val)
+ opts[n.strip()] = val.strip()
+ gid += 1
+ groups[gid] = {}
+ groups[gid]["ep"] = {}
+ groups[gid]["opts"] = opts
+ elif re.search("<strong>Download:", str(p)):
+ parts = str(p).split("<br />")
+ if re.search("<strong>", parts[0]):
+ ename = re.search('<strong>(.*?)</strong>',parts[0]).group(1).strip().decode("utf-8").replace("&#8211;", "-")
+ groups[gid]["ep"][ename] = {}
+ parts.remove(parts[0])
+ for part in parts:
+ hostername = re.search(" \| ([-a-zA-Z0-9]+\.\w+)",part)
+ if hostername:
+ hostername = hostername.group(1)
+ groups[gid]["ep"][ename][hostername] = []
+ links = re.findall('href="(.*?)"',part)
+ for link in links:
+ groups[gid]["ep"][ename][hostername].append(link + "#hasName")
+
+ links = []
+ for g in groups.values():
+ for ename in g["ep"]:
+ links.extend(self.getpreferred(g["ep"][ename]))
+ if self.getConfig("changeNameDJ") == "Episode":
+ self.packages.append((ename, links, ename))
+ links = []
+ package = "%s (%s, %s)" % (seasonName, g["opts"]["Format"], g["opts"]["Sprache"])
+ if self.getConfig("changeNameDJ") == "Format":
+ self.packages.append((package, links, package))
+ links = []
+ if (self.getConfig("changeNameDJ") == "Packagename") or re.search("#hasName", url):
+ self.core.files.addLinks(links, self.pyfile.package().id)
+ elif (self.getConfig("changeNameDJ") == "Show") or not re.search("#hasName", url):
+ self.packages.append((seasonName, links, seasonName))
+
+
+
+
+
+
+
+ def handleCategoryDJ(self, url):
+ package_links = []
+ src = self.getSJSrc(url)
+ soup = BeautifulSoup(src)
+ content = soup.find("div", attrs={"id": "content"})
+ for a in content.findAll("a", attrs={"rel": "bookmark"}):
+ package_links.append(a["href"])
+ self.core.files.addLinks(package_links, self.pyfile.package().id)
+
+ def decrypt(self, pyfile):
+ showPattern = re.compile("^http://serienjunkies.org/serie/(.*)/$")
+ seasonPattern = re.compile("^http://serienjunkies.org/.*?/(.*)/$")
+ episodePattern = re.compile("^http://download.serienjunkies.org/f-.*?.html(#hasName)?$")
+ oldStyleLink = re.compile("^http://serienjunkies.org/safe/(.*)$")
+ categoryPatternDJ = re.compile("^http://dokujunkies.org/.*?(.*)$")
+ showPatternDJ = re.compile("^http://dokujunkies.org/.*?/(.*)\.html(#hasName)?$")
+ framePattern = re.compile("^http://download.(serienjunkies.org|dokujunkies.org)/frame/go-.*?/$")
+ url = pyfile.url
+ if framePattern.match(url):
+ self.packages.append((self.pyfile.package().name, [self.handleFrame(url)], self.pyfile.package().name))
+ elif episodePattern.match(url):
+ self.handleEpisode(url)
+ elif oldStyleLink.match(url):
+ self.handleOldStyleLink(url)
+ elif showPattern.match(url):
+ self.handleShow(url)
+ elif showPatternDJ.match(url):
+ self.handleShowDJ(url)
+ elif seasonPattern.match(url):
+ self.handleSeason(url)
+ elif categoryPatternDJ.match(url):
+ self.handleCategoryDJ(url)
+
+ #selects the preferred hoster, after that selects any hoster (ignoring the one to ignore)
+ def getpreferred(self, hosterlist):
+
+ result = []
+ preferredList = self.getConfig("hosterList").strip().lower().replace('|',',').replace('.','').replace(';',',').split(',')
+ if (self.getConfig("randomPreferred") == True) and (self.getConfig("hosterListMode") in ["OnlyOne","OnlyPreferred(One)"]) :
+ random.shuffle(preferredList)
+ # we don't want hosters be read two times
+ hosterlist2 = hosterlist.copy()
+
+ for preferred in preferredList:
+ for Hoster in hosterlist:
+ if preferred == Hoster.lower().replace('.',''):
+ for Part in hosterlist[Hoster]:
+ self.logDebug("selected " + Part)
+ result.append(str(Part))
+ del(hosterlist2[Hoster])
+ if (self.getConfig("hosterListMode") in ["OnlyOne","OnlyPreferred(One)"]):
+ return result
+
+
+ ignorelist = self.getConfig("ignoreList").strip().lower().replace('|',',').replace('.','').replace(';',',').split(',')
+ if self.getConfig('hosterListMode') in ["OnlyOne","All"]:
+ for Hoster in hosterlist2:
+ if Hoster.strip().lower().replace('.','') not in ignorelist:
+ for Part in hosterlist2[Hoster]:
+ self.logDebug("selected2 " + Part)
+ result.append(str(Part))
+
+ if self.getConfig('hosterListMode') == "OnlyOne":
+ return result
+ return result
diff --git a/pyload/plugins/crypter/ShareLinksBiz.py b/pyload/plugins/crypter/ShareLinksBiz.py
new file mode 100644
index 000000000..b0e735896
--- /dev/null
+++ b/pyload/plugins/crypter/ShareLinksBiz.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+
+from Crypto.Cipher import AES
+from module.plugins.Crypter import Crypter
+from module.plugins.ReCaptcha import ReCaptcha
+import base64
+import binascii
+import re
+
+
+class ShareLinksBiz(Crypter):
+ __name__ = "ShareLinksBiz"
+ __type__ = "crypter"
+ __pattern__ = r"(?P<base>http://[\w\.]*?(share-links|s2l)\.biz)/(?P<id>_?[0-9a-z]+)(/.*)?"
+ __version__ = "1.12"
+ __description__ = """Share-Links.biz Crypter"""
+ __author_name__ = ("fragonib")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es")
+
+
+ def setup(self):
+ self.baseUrl = None
+ self.fileId = None
+ self.package = None
+ self.html = None
+ self.captcha = False
+
+ def decrypt(self, pyfile):
+
+ # Init
+ self.initFile(pyfile)
+
+ # Request package
+ url = self.baseUrl + '/' + self.fileId
+ self.html = self.load(url, decode=True)
+
+ # Unblock server (load all images)
+ self.unblockServer()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
+
+ # Extract package links
+ package_links = []
+ package_links.extend(self.handleWebLinks())
+ package_links.extend(self.handleContainers())
+ package_links.extend(self.handleCNL2())
+ package_links = set(package_links)
+
+ # Get package info
+ package_name, package_folder = self.getPackageInfo()
+
+ # Pack
+ self.packages = [(package_name, package_links, package_folder)]
+
+ def initFile(self, pyfile):
+ url = pyfile.url
+ if 's2l.biz' in url:
+ url = self.load(url, just_header=True)['location']
+ self.baseUrl = re.search(self.__pattern__, url).group(1)
+ self.fileId = re.match(self.__pattern__, url).group('id')
+ self.package = pyfile.package()
+
+ def isOnline(self):
+ if "No usable content was found" in self.html:
+ self.logDebug("File not found")
+ return False
+ return True
+
+ def isPasswordProtected(self):
+ if re.search(r'''<form.*?id="passwordForm".*?>''', self.html):
+ self.logDebug("Links are protected")
+ return True
+ return False
+
+ def isCaptchaProtected(self):
+ if '<map id="captchamap"' in self.html:
+ self.logDebug("Links are captcha protected")
+ return True
+ return False
+
+ def unblockServer(self):
+ imgs = re.findall("(/template/images/.*?\.gif)", self.html)
+ for img in imgs:
+ self.load(self.baseUrl + img)
+
+ def unlockPasswordProtection(self):
+ password = self.getPassword()
+ self.logDebug("Submitting password [%s] for protected links" % password)
+ post = {"password": password, 'login': 'Submit form'}
+ url = self.baseUrl + '/' + self.fileId
+ self.html = self.load(url, post=post, decode=True)
+
+ def unlockCaptchaProtection(self):
+ # Get captcha map
+ captchaMap = self._getCaptchaMap()
+ self.logDebug("Captcha map with [%d] positions" % len(captchaMap.keys()))
+
+ # Request user for captcha coords
+ m = re.search(r'<img src="/captcha.gif\?d=(.*?)&amp;PHPSESSID=(.*?)&amp;legend=1"', self.html)
+ captchaUrl = self.baseUrl + '/captcha.gif?d=%s&PHPSESSID=%s' % (m.group(1), m.group(2))
+ self.logDebug("Waiting user for correct position")
+ coords = self.decryptCaptcha(captchaUrl, forceUser=True, imgtype="gif", result_type='positional')
+ self.logDebug("Captcha resolved, coords [%s]" % str(coords))
+
+ # Resolve captcha
+ href = self._resolveCoords(coords, captchaMap)
+ if href is None:
+ self.logDebug("Invalid captcha resolving, retrying")
+ self.invalidCaptcha()
+ self.setWait(5, False)
+ self.wait()
+ self.retry()
+ url = self.baseUrl + href
+ self.html = self.load(url, decode=True)
+
+ def _getCaptchaMap(self):
+ map = {}
+ for m in re.finditer(r'<area shape="rect" coords="(.*?)" href="(.*?)"', self.html):
+ rect = eval('(' + m.group(1) + ')')
+ href = m.group(2)
+ map[rect] = href
+ return map
+
+ def _resolveCoords(self, coords, captchaMap):
+ x, y = coords
+ for rect, href in captchaMap.items():
+ x1, y1, x2, y2 = rect
+ if (x>=x1 and x<=x2) and (y>=y1 and y<=y2):
+ return href
+
+ def handleErrors(self):
+ if "The inserted password was wrong" in self.html:
+ self.logDebug("Incorrect password, please set right password on 'Edit package' form and retry")
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.captcha:
+ if "Your choice was wrong" in self.html:
+ self.logDebug("Invalid captcha, retrying")
+ self.invalidCaptcha()
+ self.setWait(5)
+ self.wait()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def getPackageInfo(self):
+ name = folder = None
+
+ # Extract from web package header
+ title_re = r'<h2><img.*?/>(.*)</h2>'
+ m = re.search(title_re, self.html, re.DOTALL)
+ if m is not None:
+ title = m.group(1).strip()
+ if 'unnamed' not in title:
+ name = folder = title
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+
+ # Fallback to defaults
+ if not name or not folder:
+ name = self.package.name
+ folder = self.package.folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+
+ # Return package info
+ return name, folder
+
+ def handleWebLinks(self):
+ package_links = []
+ self.logDebug("Handling Web links")
+
+ #@TODO: Gather paginated web links
+ pattern = r"javascript:_get\('(.*?)', \d+, ''\)"
+ ids = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Web links" % len(ids))
+ for i, id in enumerate(ids):
+ try:
+ self.logDebug("Decrypting Web link %d, [%s]" % (i+1, id))
+ dwLink = self.baseUrl + "/get/lnk/" + id
+ response = self.load(dwLink)
+ code = re.search(r'frm/(\d+)', response).group(1)
+ fwLink = self.baseUrl + "/get/frm/" + code
+ response = self.load(fwLink)
+ jscode = re.search(r'<script language="javascript">\s*eval\((.*)\)\s*</script>', response, re.DOTALL).group(1)
+ jscode = self.js.eval("f = %s" % jscode)
+ jslauncher = "window=''; parent={frames:{Main:{location:{href:''}}},location:''}; %s; parent.frames.Main.location.href"
+ dlLink = self.js.eval(jslauncher % jscode)
+ self.logDebug("JsEngine returns value [%s] for redirection link" % dlLink)
+ package_links.append(dlLink)
+ except Exception, detail:
+ self.logDebug("Error decrypting Web link [%s], %s" % (id, detail))
+ return package_links
+
+ def handleContainers(self):
+ package_links = []
+ self.logDebug("Handling Container links")
+
+ pattern = r"javascript:_get\('(.*?)', 0, '(rsdf|ccf|dlc)'\)"
+ containersLinks = re.findall(pattern, self.html)
+ self.logDebug("Decrypting %d Container links" % len(containersLinks))
+ for containerLink in containersLinks:
+ link = "%s/get/%s/%s" % (self.baseUrl, containerLink[1], containerLink[0])
+ package_links.append(link)
+ return package_links
+
+ def handleCNL2(self):
+ package_links = []
+ self.logDebug("Handling CNL2 links")
+
+ if '/lib/cnl2/ClicknLoad.swf' in self.html:
+ try:
+ (crypted, jk) = self._getCipherParams()
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+ return package_links
+
+ def _getCipherParams(self):
+
+ # Request CNL2
+ code = re.search(r'ClicknLoad.swf\?code=(.*?)"', self.html).group(1)
+ url = "%s/get/cnl2/%s" % (self.baseUrl, code)
+ response = self.load(url)
+ params = response.split(";;")
+
+ # Get jk
+ strlist = list(base64.standard_b64decode(params[1]))
+ strlist.reverse()
+ jk = ''.join(strlist)
+
+ # Get crypted
+ strlist = list(base64.standard_b64decode(params[2]))
+ strlist.reverse()
+ crypted = ''.join(strlist)
+
+ # Log and return
+ return crypted, jk
+
+ def _getLinks(self, crypted, jk):
+
+ # Get key
+ jreturn = self.js.eval("%s f()" % jk)
+ self.logDebug("JsEngine returns value [%s]" % jreturn)
+ key = binascii.unhexlify(jreturn)
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ text = text.replace("\x00", "").replace("\r", "")
+ links = text.split("\n")
+ links = filter(lambda x: x != "", links)
+
+ # Log and return
+ self.logDebug("Block has %d links" % len(links))
+ return links \ No newline at end of file
diff --git a/pyload/plugins/crypter/ShareRapidComFolder.py b/pyload/plugins/crypter/ShareRapidComFolder.py
new file mode 100644
index 000000000..cb7f37525
--- /dev/null
+++ b/pyload/plugins/crypter/ShareRapidComFolder.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+class ShareRapidComFolder(SimpleCrypter):
+ __name__ = "ShareRapidComFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?((share(-?rapid\.(biz|com|cz|info|eu|net|org|pl|sk)|-(central|credit|free|net)\.cz|-ms\.net)|(s-?rapid|rapids)\.(cz|sk))|(e-stahuj|mediatack|premium-rapidshare|rapidshare-premium|qiuck)\.cz|kadzet\.com|stahuj-zdarma\.eu|strelci\.net|universal-share\.com)/(slozka/.+)"
+ __version__ = "0.01"
+ __description__ = """Share-Rapid.com Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<td class="soubor"[^>]*><a href="([^"]+)">' \ No newline at end of file
diff --git a/pyload/plugins/crypter/SpeedLoadOrgFolder.py b/pyload/plugins/crypter/SpeedLoadOrgFolder.py
new file mode 100644
index 000000000..f85ede6f3
--- /dev/null
+++ b/pyload/plugins/crypter/SpeedLoadOrgFolder.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+class SpeedLoadOrgFolder(SimpleCrypter):
+ __name__ = "SpeedLoadOrgFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?speedload\.org/(\d+~f$|folder/\d+/)"
+ __version__ = "0.2"
+ __description__ = """Speedload Crypter Plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ LINK_PATTERN = r'<div class="link"><a href="(http://speedload.org/\w+)"'
+ TITLE_PATTERN = r'<title>Files of: (?P<title>[^<]+) folder</title>'
diff --git a/pyload/plugins/crypter/StealthTo.py b/pyload/plugins/crypter/StealthTo.py
new file mode 100644
index 000000000..cf7a79e9b
--- /dev/null
+++ b/pyload/plugins/crypter/StealthTo.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class StealthTo(Crypter):
+ __name__ = "StealthTo"
+ __type__ = "container"
+ __pattern__ = r"http://(www\.)?stealth.to/folder/"
+ __version__ = "0.1"
+ __description__ = """Stealth.to Container Plugin"""
+ __author_name__ = ("spoob")
+ __author_mail__ = ("spoob@pyload.org")
+
+ def __init__(self, parent):
+ Crypter.__init__(self, parent)
+ self.parent = parent
+ self.html = None
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ return True
+
+ def proceed(self, url, location):
+ url = self.parent.url
+ self.html = self.req.load(url, cookies=True)
+ temp_links = []
+ ids = []
+ ats = [] # authenticity_token
+ inputs = re.findall(r"(<(input|form)[^>]+)", self.html)
+ for input in inputs:
+ if re.search(r"name=\"authenticity_token\"",input[0]):
+ ats.append(re.search(r"value=\"([^\"]+)", input[0]).group(1))
+ if re.search(r"name=\"id\"",input[0]):
+ ids.append(re.search(r"value=\"([^\"]+)", input[0]).group(1))
+
+ for i in range(0, len(ids)):
+ self.req.load(url + "/web", post={"authenticity_token": ats[i], "id": str(ids[i]), "link": ("download_" + str(ids[i]))}, cookies=True)
+ new_html = self.req.load(url + "/web", post={"authenticity_token": ats[i], "id": str(ids[i]), "link": "1"}, cookies=True)
+ temp_links.append(re.search(r"iframe src=\"(.*)\" frameborder", new_html).group(1))
+
+ self.links = temp_links
diff --git a/pyload/plugins/crypter/TrailerzoneInfo.py b/pyload/plugins/crypter/TrailerzoneInfo.py
new file mode 100644
index 000000000..43a4fcce5
--- /dev/null
+++ b/pyload/plugins/crypter/TrailerzoneInfo.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class TrailerzoneInfo(Crypter):
+ __name__ = "TrailerzoneInfo"
+ __type__ = "crypter"
+ __pattern__ = r"http://(www\.)?trailerzone.info/.*?"
+ __version__ = "0.02"
+ __description__ = """TrailerZone.info Crypter Plugin"""
+ __author_name__ = ("godofdream")
+ __author_mail__ = ("soilfiction@gmail.com")
+
+ JS_KEY_PATTERN = r"<script>(.*)var t = window"
+
+ def decrypt(self, pyfile):
+ protectPattern = re.compile("http://(www\.)?trailerzone.info/protect.html.*?")
+ goPattern = re.compile("http://(www\.)?trailerzone.info/go.html.*?")
+ url = pyfile.url
+ if protectPattern.match(url):
+ self.handleProtect(url)
+ elif goPattern.match(url):
+ self.handleGo(url)
+
+ def handleProtect(self, url):
+ self.handleGo("http://trailerzone.info/go.html#:::" + url.split("#:::",1)[1])
+
+ def handleGo(self, url):
+
+ src = self.req.load(str(url))
+ pattern = re.compile(self.JS_KEY_PATTERN, re.DOTALL)
+ found = re.search(pattern, src)
+
+ # Get package info
+ package_links = []
+ try:
+ result = self.js.eval(found.group(1) + " decodeLink('" + url.split("#:::",1)[1] + "');")
+ result = str(result)
+ self.logDebug("RESULT: %s" % result)
+ package_links.append(result)
+ self.core.files.addLinks(package_links, self.pyfile.package().id)
+ except Exception, e:
+ self.logDebug(e)
+ self.fail('Could not extract any links by javascript')
diff --git a/pyload/plugins/crypter/UlozToFolder.py b/pyload/plugins/crypter/UlozToFolder.py
new file mode 100644
index 000000000..814d5240d
--- /dev/null
+++ b/pyload/plugins/crypter/UlozToFolder.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class UlozToFolder(Crypter):
+ __name__ = "UlozToFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://.*(uloz\.to|ulozto\.(cz|sk|net)|bagruj.cz|zachowajto.pl)/(m|soubory)/.*"
+ __version__ = "0.2"
+ __description__ = """Uloz.to Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FOLDER_PATTERN = r'<ul class="profile_files">(.*?)</ul>'
+ LINK_PATTERN = r'<br /><a href="/([^"]+)">[^<]+</a>'
+ NEXT_PAGE_PATTERN = r'<a class="next " href="/([^"]+)">&nbsp;</a>'
+
+ def decrypt(self, pyfile):
+ html = self.load(self.pyfile.url)
+
+ new_links = []
+ for i in range(1,100):
+ self.logInfo("Fetching links from page %i" % i)
+ found = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
+ if found is None: self.fail("Parse error (FOLDER)")
+
+ new_links.extend(re.findall(self.LINK_PATTERN, found.group(1)))
+ found = re.search(self.NEXT_PAGE_PATTERN, html)
+ if found:
+ html = self.load("http://ulozto.net/" + found.group(1))
+ else:
+ break
+ else:
+ self.logInfo("Limit of 99 pages reached, aborting")
+
+ if new_links:
+ self.core.files.addLinks(map(lambda s:"http://ulozto.net/%s" % s, new_links), self.pyfile.package().id)
+ else:
+ self.fail('Could not extract any links') \ No newline at end of file
diff --git a/pyload/plugins/crypter/UploadedToFolder.py b/pyload/plugins/crypter/UploadedToFolder.py
new file mode 100644
index 000000000..88d4e04e8
--- /dev/null
+++ b/pyload/plugins/crypter/UploadedToFolder.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+import re
+
+from module.plugins.internal.SimpleCrypter import SimpleCrypter
+
+
+class UploadedToFolder(SimpleCrypter):
+ __name__ = "UploadedToFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?(uploaded|ul)\.(to|net)/(f|folder|list)/(?P<id>\w+)"
+ __version__ = "0.3"
+ __description__ = """UploadedTo Crypter Plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ PLAIN_PATTERN = r'<small class="date"><a href="(?P<plain>[\w/]+)" onclick='
+ TITLE_PATTERN = r'<title>(?P<title>[^<]+)</title>'
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+
+ package_name, folder_name = self.getPackageNameAndFolder()
+
+ m = re.search(self.PLAIN_PATTERN, self.html)
+ if m:
+ plain_link = 'http://uploaded.net/' + m.group('plain')
+ else:
+ self.fail('Parse error - Unable to find plain url list')
+
+ self.html = self.load(plain_link)
+ package_links = self.html.split('\n')[:-1]
+ self.logDebug('Package has %d links' % len(package_links))
+
+ self.packages = [(package_name, package_links, folder_name)]
diff --git a/pyload/plugins/crypter/WiiReloadedOrg.py b/pyload/plugins/crypter/WiiReloadedOrg.py
new file mode 100644
index 000000000..ba101892d
--- /dev/null
+++ b/pyload/plugins/crypter/WiiReloadedOrg.py
@@ -0,0 +1,52 @@
+
+import re
+
+from module.plugins.Crypter import Crypter
+
+class WiiReloadedOrg(Crypter):
+ __name__ = "WiiReloadedOrg"
+ __type__ = "crypter"
+ __pattern__ = r"http://www\.wii-reloaded\.org/protect/get\.php\?i=.+"
+ __config__ = [("changeName", "bool", "Use Wii-Reloaded.org folder name", "True")]
+ __version__ = "0.1"
+ __description__ = """Wii-Reloaded.org Crypter Plugin"""
+ __author_name__ = ("hzpz")
+ __author_mail__ = ("none")
+
+
+ def decrypt(self, pyfile):
+ url = pyfile.url
+ src = self.req.load(str(url))
+
+ ids = re.findall(r"onClick=\"popup_dl\((.+)\)\"", src)
+ if len(ids) == 0:
+ self.fail("Unable to decrypt links, this plugin probably needs to be updated")
+
+ packageName = self.pyfile.package().name
+ if self.getConfig("changeName"):
+ packageNameMatch = re.search(r"<div id=\"foldername\">(.+)</div>", src)
+ if not packageNameMatch:
+ self.logWarning("Unable to get folder name, this plugin probably needs to be updated")
+ else:
+ packageName = packageNameMatch.group(1)
+
+ self.pyfile.package().password = "wii-reloaded.info"
+
+ self.logDebug("Processing %d links" % len(ids))
+ links = []
+ for id in ids:
+ self.req.lastURL = str(url)
+ header = self.req.load("http://www.wii-reloaded.org/protect/hastesosiehtsaus.php?i=" + id, just_header=True)
+ self.logDebug("Header:\n" + header)
+ redirectLocationMatch = re.search(r"^Location: (.+)$", header, flags=re.MULTILINE)
+ if not redirectLocationMatch:
+ self.offline()
+ redirectLocation = redirectLocationMatch.group(1)
+ self.logDebug(len(redirectLocation))
+ if not redirectLocation.startswith("http"):
+ self.offline()
+ self.logDebug("Decrypted link: %s" % redirectLocation)
+ links.append(redirectLocation)
+
+ self.logDebug("Decrypted %d links" % len(links))
+ self.packages.append((packageName, links, packageName)) \ No newline at end of file
diff --git a/pyload/plugins/crypter/XfilesharingProFolder.py b/pyload/plugins/crypter/XfilesharingProFolder.py
new file mode 100644
index 000000000..90e3044a3
--- /dev/null
+++ b/pyload/plugins/crypter/XfilesharingProFolder.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.Crypter import Crypter, Package
+import re
+
+class XfilesharingProFolder(Crypter):
+ __name__ = "XfilesharingProFolder"
+ __type__ = "crypter"
+ __pattern__ = r"http://(?:www\.)?((easybytez|turboupload|uploadville|file4safe|fileband|filebeep|grupload|247upload)\.com|(muchshare|annonhost).net|bzlink.us)/users/.*"
+ __version__ = "0.01"
+ __description__ = """Generic XfilesharingPro Folder Plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ LINK_PATTERN = r'<div class="link"><a href="([^"]+)" target="_blank">[^<]*</a></div>'
+ SUBFOLDER_PATTERN = r'<TD width="1%"><img src="[^"]*/images/folder2.gif"></TD><TD><a href="([^"]+)"><b>(?!\. \.<)([^<]+)</b></a></TD>'
+
+ def decryptURL(self, url):
+ return self.decryptFile(self.load(url, decode = True))
+
+ def decryptFile(self, html):
+ new_links = []
+
+ new_links.extend(re.findall(self.LINK_PATTERN, html))
+
+ subfolders = re.findall(self.SUBFOLDER_PATTERN, html)
+ #self.logDebug(subfolders)
+ for (url, name) in subfolders:
+ if self.package: name = "%s/%s" % (self.package.name, name)
+ new_links.append(Package(name, [url]))
+
+ if not new_links: self.fail('Could not extract any links')
+
+ return new_links \ No newline at end of file
diff --git a/pyload/plugins/crypter/YoutubeBatch.py b/pyload/plugins/crypter/YoutubeBatch.py
new file mode 100644
index 000000000..72b72aab7
--- /dev/null
+++ b/pyload/plugins/crypter/YoutubeBatch.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import json
+
+from module.plugins.Crypter import Crypter
+
+API_KEY = "AIzaSyCKnWLNlkX-L4oD1aEzqqhRw1zczeD6_k0"
+
+class YoutubeBatch(Crypter):
+ __name__ = "YoutubeBatch"
+ __type__ = "container"
+ __pattern__ = r"https?://(?:[^/]*?)youtube\.com/(?:(?:view_play_list|playlist|.*?feature=PlayList).*?[?&](?:list|p)=)([a-zA-Z0-9-_]+)"
+ __version__ = "0.93"
+ __description__ = """Youtube.com Channel Download Plugin"""
+ __author_name__ = ("RaNaN", "Spoob", "zoidberg", "roland")
+ __author_mail__ = ("RaNaN@pyload.org", "spoob@pyload.org", "zoidberg@mujmail.cz", "roland@enkore.de")
+
+ def get_videos(self, playlist_id, token=None):
+ url = "https://www.googleapis.com/youtube/v3/playlistItems?playlistId=%s&part=snippet&key=%s&maxResults=50" % (playlist_id, API_KEY)
+ if token:
+ url += "&pageToken=" + token
+
+ response = json.loads(self.load(url))
+
+ for item in response["items"]:
+ if item["kind"] == "youtube#playlistItem" and item["snippet"]["resourceId"]["kind"] == "youtube#video":
+ yield "http://youtube.com/watch?v=" + item["snippet"]["resourceId"]["videoId"]
+
+ if "nextPageToken" in response:
+ for item in self.get_videos(playlist_id, response["nextPageToken"]):
+ yield item
+
+ def decrypt(self, pyfile):
+ match_id = re.match(self.__pattern__, self.pyfile.url)
+ new_links = []
+ playlist_id = match_id.group(1)
+
+ new_links.extend(self.get_videos(playlist_id))
+
+ self.packages.append((self.pyfile.package().name, new_links, self.pyfile.package().name))
diff --git a/pyload/plugins/crypter/__init__.py b/pyload/plugins/crypter/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/crypter/__init__.py
diff --git a/pyload/plugins/hooks/Captcha9kw.py b/pyload/plugins/hooks/Captcha9kw.py
new file mode 100755
index 000000000..cd622b7cd
--- /dev/null
+++ b/pyload/plugins/hooks/Captcha9kw.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay, RaNaN, zoidberg
+"""
+from __future__ import with_statement
+
+from thread import start_new_thread
+from base64 import b64encode
+import cStringIO
+import pycurl
+import time
+
+from module.network.RequestFactory import getURL, getRequest
+from module.network.HTTPRequest import BadHeader
+
+from module.plugins.Hook import Hook
+
+class Captcha9kw(Hook):
+ __name__ = "Captcha9kw"
+ __version__ = "0.07"
+ __description__ = """send captchas to 9kw.eu"""
+ __config__ = [("activated", "bool", "Activated", False),
+ ("force", "bool", "Force CT even if client is connected", True),
+ ("https", "bool", "Enable HTTPS", "False"),
+ ("confirm", "bool", "Confirm Captcha (Cost +6)", "False"),
+ ("captchaperhour", "int", "Captcha per hour (max. 9999)", "9999"),
+ ("prio", "int", "Prio 1-10 (Cost +1-10)", "0"),
+ ("timeout", "int", "Timeout (max. 300)", "220"),
+ ("passkey", "password", "API key", ""),]
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ API_URL = "://www.9kw.eu/index.cgi"
+
+ def setup(self):
+ self.API_URL = "https"+self.API_URL if self.getConfig("https") else "http"+self.API_URL
+ self.info = {}
+
+ def getCredits(self):
+ response = getURL(self.API_URL, get = { "apikey": self.getConfig("passkey"), "pyload": "1", "source": "pyload", "action": "usercaptchaguthaben" })
+
+ if response.isdigit():
+ self.logInfo(_("%s credits left") % response)
+ self.info["credits"] = credits = int(response)
+ return credits
+ else:
+ self.logError(response)
+ return 0
+
+ def processCaptcha(self, task):
+ result = None
+
+ with open(task.captchaFile, 'rb') as f:
+ data = f.read()
+ data = b64encode(data)
+ self.logDebug("%s : %s" % (task.captchaFile, data))
+ if task.isPositional():
+ mouse = 1
+ else:
+ mouse = 0
+
+ response = getURL(self.API_URL, post = {
+ "apikey": self.getConfig("passkey"),
+ "prio": self.getConfig("prio"),
+ "confirm": self.getConfig("confirm"),
+ "captchaperhour": self.getConfig("captchaperhour"),
+ "maxtimeout": self.getConfig("timeout"),
+ "pyload": "1",
+ "source": "pyload",
+ "base64": "1",
+ "mouse": mouse,
+ "file-upload-01": data,
+ "action": "usercaptchaupload" })
+
+ if response.isdigit():
+ self.logInfo(_("New CaptchaID from upload: %s : %s") % (response,task.captchaFile))
+
+ for i in range(1, 100, 1):
+ response2 = getURL(self.API_URL, get = { "apikey": self.getConfig("passkey"), "id": response,"pyload": "1","source": "pyload", "action": "usercaptchacorrectdata" })
+
+ if(response2 != ""):
+ break;
+
+ time.sleep(3)
+
+ result = response2
+ task.data["ticket"] = response
+ self.logInfo("result %s : %s" % (response, result))
+ task.setResult(result)
+ else:
+ self.logError("Bad upload: %s" % response)
+ return False
+
+ def newCaptchaTask(self, task):
+ if not task.isTextual() and not task.isPositional():
+ return False
+
+ if not self.getConfig("passkey"):
+ return False
+
+ if self.core.isClientConnected() and not self.getConfig("force"):
+ return False
+
+ if self.getCredits() > 0:
+ task.handler.append(self)
+ task.setWaiting(self.getConfig("timeout"))
+ start_new_thread(self.processCaptcha, (task,))
+
+ else:
+ self.logError(_("Your Captcha 9kw.eu Account has not enough credits"))
+
+ def captchaCorrect(self, task):
+ if "ticket" in task.data:
+
+ try:
+ response = getURL(self.API_URL,
+ post={ "action": "usercaptchacorrectback",
+ "apikey": self.getConfig("passkey"),
+ "api_key": self.getConfig("passkey"),
+ "correct": "1",
+ "pyload": "1",
+ "source": "pyload",
+ "id": task.data["ticket"] }
+ )
+ self.logInfo("Request correct: %s" % response)
+
+ except BadHeader, e:
+ self.logError("Could not send correct request.", str(e))
+ else:
+ self.logError("No CaptchaID for correct request (task %s) found." % task)
+
+ def captchaInvalid(self, task):
+ if "ticket" in task.data:
+
+ try:
+ response = getURL(self.API_URL,
+ post={ "action": "usercaptchacorrectback",
+ "apikey": self.getConfig("passkey"),
+ "api_key": self.getConfig("passkey"),
+ "correct": "2",
+ "pyload": "1",
+ "source": "pyload",
+ "id": task.data["ticket"] }
+ )
+ self.logInfo("Request refund: %s" % response)
+
+ except BadHeader, e:
+ self.logError("Could not send refund request.", str(e))
+ else:
+ self.logError("No CaptchaID for not correct request (task %s) found." % task)
diff --git a/pyload/plugins/hooks/ReloadCc.py b/pyload/plugins/hooks/ReloadCc.py
new file mode 100644
index 000000000..dbd9d659b
--- /dev/null
+++ b/pyload/plugins/hooks/ReloadCc.py
@@ -0,0 +1,65 @@
+from module.plugins.internal.MultiHoster import MultiHoster
+
+from module.common.json_layer import json_loads
+from module.network.RequestFactory import getURL
+
+class ReloadCc(MultiHoster):
+ __name__ = "ReloadCc"
+ __version__ = "0.3"
+ __type__ = "hook"
+ __description__ = """Reload.cc hook plugin"""
+
+ __config__ = [("activated", "bool", "Activated", "False"),
+ ("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported):", "all"),
+ ("hosterList", "str", "Hoster list (comma separated)", "")]
+
+ __author_name__ = ("Reload Team")
+ __author_mail__ = ("hello@reload.cc")
+
+ interval = 0 # Disable periodic calls
+
+ def getHoster(self):
+ # If no accounts are available there will be no hosters available
+ if not self.account or not self.account.canUse():
+ print "ReloadCc: No accounts available"
+ return []
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ # Get supported hosters list from reload.cc using the json API v1
+ query_params = dict(
+ via='pyload',
+ v=1,
+ get_supported='true',
+ get_traffic='true',
+ user=user
+ )
+
+ try:
+ query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
+ except Exception:
+ query_params.update(dict(pwd=data['password']))
+
+ answer = getURL("http://api.reload.cc/login", get=query_params)
+ data = json_loads(answer)
+
+
+ # If account is not valid thera are no hosters available
+ if data['status'] != "ok":
+ print "ReloadCc: Status is not ok: %s" % data['status']
+ return []
+
+ # Extract hosters from json file
+ return data['msg']['supportedHosters']
+
+ def coreReady(self):
+ # Get account plugin and check if there is a valid account available
+ self.account = self.core.accountManager.getAccountPlugin("ReloadCc")
+ if not self.account.canUse():
+ self.account = None
+ self.logError("Please add a valid reload.cc account first and restart pyLoad.")
+ return
+
+ # Run the overwriten core ready which actually enables the multihoster hook
+ return MultiHoster.coreReady(self)
diff --git a/pyload/plugins/hoster/ARD.py b/pyload/plugins/hoster/ARD.py
new file mode 100644
index 000000000..cda783091
--- /dev/null
+++ b/pyload/plugins/hoster/ARD.py
@@ -0,0 +1,80 @@
+
+import subprocess
+import re
+import os.path
+import os
+
+from module.utils import save_join, save_path
+from module.plugins.Hoster import Hoster
+
+# Requires rtmpdump
+# by Roland Beermann
+
+class RTMP:
+ # TODO: Port to some RTMP-library like rtmpy or similar
+ # TODO?: Integrate properly into the API of pyLoad
+
+ command = "rtmpdump"
+
+ @classmethod
+ def download_rtmp_stream(cls, url, output_file, playpath=None):
+ opts = [
+ "-r", url,
+ "-o", output_file,
+ ]
+ if playpath:
+ opts.append("--playpath")
+ opts.append(playpath)
+
+ cls._invoke_rtmpdump(opts)
+
+ @classmethod
+ def _invoke_rtmpdump(cls, opts):
+ args = [
+ cls.command
+ ]
+ args.extend(opts)
+
+ return subprocess.check_call(args)
+
+class ARD(Hoster):
+ __name__ = "ARD Mediathek"
+ __version__ = "0.11"
+ __pattern__ = r"http://www\.ardmediathek\.de/.*"
+ __config__ = []
+
+ def process(self, pyfile):
+ site = self.load(pyfile.url)
+
+ avail_videos = re.findall(r"""mediaCollection.addMediaStream\(0, ([0-9]*), "([^\"]*)", "([^\"]*)", "[^\"]*"\);""", site)
+ avail_videos.sort(key=lambda videodesc: int(videodesc[0]), reverse=True) # The higher the number, the better the quality
+
+ quality, url, playpath = avail_videos[0]
+
+ pyfile.name = re.search(r"<h1>([^<]*)</h1>", site).group(1)
+
+ if url.startswith("http"):
+ # Best quality is available over HTTP. Very rare.
+ self.download(url)
+ else:
+ pyfile.setStatus("downloading")
+
+ download_folder = self.config['general']['download_folder']
+
+ location = save_join(download_folder, pyfile.package().folder)
+
+ if not os.path.exists(location):
+ os.makedirs(location, int(self.core.config["permission"]["folder"], 8))
+
+ if self.core.config["permission"]["change_dl"] and os.name != "nt":
+ try:
+ uid = getpwnam(self.config["permission"]["user"])[2]
+ gid = getgrnam(self.config["permission"]["group"])[2]
+
+ chown(location, uid, gid)
+ except Exception, e:
+ self.logWarning(_("Setting User and Group failed: %s") % str(e))
+
+ output_file = save_join(location, save_path(pyfile.name)) + os.path.splitext(playpath)[1]
+
+ RTMP.download_rtmp_stream(url, playpath=playpath, output_file=output_file)
diff --git a/pyload/plugins/hoster/AlldebridCom.py b/pyload/plugins/hoster/AlldebridCom.py
new file mode 100644
index 000000000..cdb5ccc08
--- /dev/null
+++ b/pyload/plugins/hoster/AlldebridCom.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+import re
+from urllib import unquote
+from random import randrange
+from module.plugins.Hoster import Hoster
+from module.common.json_layer import json_loads
+from module.utils import parseFileSize
+
+
+class AlldebridCom(Hoster):
+ __name__ = "AlldebridCom"
+ __version__ = "0.33"
+ __type__ = "hoster"
+
+ __pattern__ = r"https?://.*alldebrid\..*"
+ __description__ = """Alldebrid.com hoster plugin"""
+ __author_name__ = ("Andy, Voigt")
+ __author_mail__ = ("spamsales@online.de")
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if name.endswith("..."): #incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100, 999)
+ return name
+
+ def init(self):
+ self.tries = 0
+ self.chunkLimit = 3
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "AllDebrid")
+ self.fail("No AllDebrid account provided")
+
+ self.logDebug("AllDebrid: Old URL: %s" % pyfile.url)
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ else:
+ password = self.getPassword().splitlines()
+ password = "" if not password else password[0]
+
+ url = "http://www.alldebrid.com/service.php?link=%s&json=true&pw=%s" % (pyfile.url, password)
+ page = self.load(url)
+ data = json_loads(page)
+
+ self.logDebug("Json data: %s" % str(data))
+
+ if data["error"]:
+ if data["error"] == "This link isn't available on the hoster website.":
+ self.offline()
+ else:
+ self.logWarning(data["error"])
+ self.tempOffline()
+ else:
+ if self.pyfile.name and not self.pyfile.name.endswith('.tmp'):
+ self.pyfile.name = data["filename"]
+ self.pyfile.size = parseFileSize(data["filesize"])
+ new_url = data["link"]
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ self.logDebug("AllDebrid: New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"error": "<title>An error occured while processing your request</title>",
+ "empty": re.compile(r"^$")})
+
+ if check == "error":
+ self.retry(reason="An error occured while generating link.", wait_time=60)
+ elif check == "empty":
+ self.retry(reason="Downloaded File was empty.", wait_time=60)
diff --git a/pyload/plugins/hoster/BasePlugin.py b/pyload/plugins/hoster/BasePlugin.py
new file mode 100644
index 000000000..552e7bc73
--- /dev/null
+++ b/pyload/plugins/hoster/BasePlugin.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from urlparse import urlparse
+from re import search
+from urllib import unquote
+
+from module.network.HTTPRequest import BadHeader
+from module.plugins.Hoster import Hoster
+from module.utils import html_unescape, remove_chars
+
+class BasePlugin(Hoster):
+ __name__ = "BasePlugin"
+ __type__ = "hoster"
+ __pattern__ = r"^unmatchable$"
+ __version__ = "0.17"
+ __description__ = """Base Plugin when any other didn't match"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ """main function"""
+
+ #debug part, for api exerciser
+ if pyfile.url.startswith("DEBUG_API"):
+ self.multiDL = False
+ return
+
+ #TODO: remove debug
+ if pyfile.url.lower().startswith("debug"):
+ self.decryptCaptcha("http://forum.pyload.org/lib/tpl/pyload/images/pyload-logo-edited3.5-new-font-small.png", imgtype="png")
+ self.download("http://download.pyload.org/random100.bin")
+ return
+#
+# if pyfile.url == "79":
+# self.core.api.addPackage("test", [str(i) for i in range(80)], 1)
+#
+# return
+ if pyfile.url.startswith("http"):
+
+ try:
+ self.downloadFile(pyfile)
+ except BadHeader, e:
+ if e.code in (401, 403):
+ self.logDebug("Auth required")
+
+ account = self.core.accountManager.getAccountPlugin('Http')
+ servers = [ x['login'] for x in account.getAllAccounts() ]
+ server = urlparse(pyfile.url).netloc
+
+ if server in servers:
+ self.logDebug("Logging on to %s" % server)
+ self.req.addAuth(account.accounts[server]["password"])
+ else:
+ for pwd in pyfile.package().password.splitlines():
+ if ":" in pwd:
+ self.req.addAuth(pwd.strip())
+ break
+ else:
+ self.fail(_("Authorization required (username:password)"))
+
+ self.downloadFile(pyfile)
+ else:
+ raise
+
+ else:
+ self.fail("No Plugin matched and not a downloadable url.")
+
+
+ def downloadFile(self, pyfile):
+ url = pyfile.url
+
+ for i in range(5):
+ header = self.load(url, just_header = True)
+
+ # self.load does not raise a BadHeader on 404 responses, do it here
+ if header.has_key('code') and header['code'] == 404:
+ raise BadHeader(404)
+
+ if 'location' in header:
+ self.logDebug("Location: " + header['location'])
+ url = unquote(header['location'])
+ else:
+ break
+
+ name = html_unescape(unquote(urlparse(url).path.split("/")[-1]))
+
+ if 'content-disposition' in header:
+ self.logDebug("Content-Disposition: " + header['content-disposition'])
+ m = search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", header['content-disposition'])
+ if m:
+ disp = m.groupdict()
+ self.logDebug(disp)
+ if not disp['enc']: disp['enc'] = 'utf-8'
+ name = remove_chars(disp['name'], "\"';/").strip()
+ name = unicode(unquote(name), disp['enc'])
+
+ if not name: name = url
+ pyfile.name = name
+ self.logDebug("Filename: %s" % pyfile.name)
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/hoster/BayfilesCom.py b/pyload/plugins/hoster/BayfilesCom.py
new file mode 100644
index 000000000..8473468ba
--- /dev/null
+++ b/pyload/plugins/hoster/BayfilesCom.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.common.json_layer import json_loads
+from time import time
+
+class BayfilesCom(SimpleHoster):
+ __name__ = "BayfilesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?bayfiles\.com/file/\w+/\w+/.*"
+ __version__ = "0.04"
+ __description__ = """Bayfiles.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r'<p title="(?P<N>[^"]+)">[^<]*<strong>(?P<S>[0-9., ]+)(?P<U>[kKMG])i?B</strong></p>'
+ FILE_OFFLINE_PATTERN = r'(<p>The requested file could not be found.</p>|<title>404 Not Found</title>)'
+
+ WAIT_PATTERN = r'>Your IP [0-9.]* has recently downloaded a file\. Upgrade to premium or wait (\d+) minutes\.<'
+ VARS_PATTERN = r'var vfid = (\d+);\s*var delay = (\d+);'
+ LINK_PATTERN = r"javascript:window.location.href = '([^']+)';"
+ PREMIUM_LINK_PATTERN = r'(?:<a class="highlighted-btn" href="|(?=http://s\d+\.baycdn\.com/dl/))(.*?)"'
+
+ def handleFree(self):
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found:
+ self.setWait(int(found.group(1)) * 60)
+ self.wait()
+ self.retry()
+
+ # Get download token
+ found = re.search(self.VARS_PATTERN, self.html)
+ if not found: self.parseError('VARS')
+ vfid, delay = found.groups()
+
+ response = json_loads(self.load('http://bayfiles.com/ajax_download', get = {
+ "_": time() * 1000,
+ "action": "startTimer",
+ "vfid": vfid}, decode = True))
+
+ if not "token" in response or not response['token']:
+ self.fail('No token')
+
+ self.setWait(int(delay))
+ self.wait()
+
+ self.html = self.load('http://bayfiles.com/ajax_download', get = {
+ "token": response['token'],
+ "action": "getLink",
+ "vfid": vfid})
+
+ # Get final link and download
+ found = re.search(self.LINK_PATTERN, self.html)
+ if not found: self.parseError("Free link")
+ self.startDownload(found.group(1))
+
+ def handlePremium(self):
+ found = re.search(self.PREMIUM_LINK_PATTERN, self.html)
+ if not found: self.parseError("Premium link")
+ self.startDownload(found.group(1))
+
+ def startDownload(self, url):
+ self.logDebug("%s URL: %s" % ("Premium" if self.premium else "Free", url))
+ self.download(url)
+ # check download
+ check = self.checkDownload({
+ "waitforfreeslots": re.compile(r"<title>BayFiles</title>"),
+ "notfound": re.compile(r"<title>404 Not Found</title>")
+ })
+ if check == "waitforfreeslots":
+ self.retry(60, 300, "Wait for free slot")
+ elif check == "notfound":
+ self.retry(60, 300, "404 Not found")
+
+getInfo = create_getInfo(BayfilesCom)
diff --git a/pyload/plugins/hoster/BezvadataCz.py b/pyload/plugins/hoster/BezvadataCz.py
new file mode 100644
index 000000000..49299d463
--- /dev/null
+++ b/pyload/plugins/hoster/BezvadataCz.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class BezvadataCz(SimpleHoster):
+ __name__ = "BezvadataCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w*\.)*bezvadata.cz/stahnout/.*"
+ __version__ = "0.24"
+ __description__ = """BezvaData.cz"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<p><b>Soubor: (?P<N>[^<]+)</b></p>'
+ FILE_SIZE_PATTERN = r'<li><strong>Velikost:</strong> (?P<S>[^<]+)</li>'
+ FILE_OFFLINE_PATTERN = r'<title>BezvaData \| Soubor nenalezen</title>'
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True
+
+ def handleFree(self):
+ #download button
+ found = re.search(r'<a class="stahnoutSoubor".*?href="(.*?)"', self.html)
+ if not found: self.parseError("page1 URL")
+ url = "http://bezvadata.cz%s" % found.group(1)
+
+ #captcha form
+ self.html = self.load(url)
+ self.checkErrors()
+ for i in range(5):
+ action, inputs = self.parseHtmlForm('frm-stahnoutFreeForm')
+ if not inputs: self.parseError("FreeForm")
+
+ found = re.search(r'<img src="data:image/png;base64,(.*?)"', self.html)
+ if not found: self.parseError("captcha img")
+
+ #captcha image is contained in html page as base64encoded data but decryptCaptcha() expects image url
+ self.load, proper_load = self.loadcaptcha, self.load
+ try:
+ inputs['captcha'] = self.decryptCaptcha(found.group(1), imgtype='png')
+ finally:
+ self.load = proper_load
+
+ if '<img src="data:image/png;base64' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("No valid captcha code entered")
+
+ #download url
+ self.html = self.load("http://bezvadata.cz%s" % action, post=inputs)
+ self.checkErrors()
+ found = re.search(r'<a class="stahnoutSoubor2" href="(.*?)">', self.html)
+ if not found: self.parseError("page2 URL")
+ url = "http://bezvadata.cz%s" % found.group(1)
+ self.logDebug("DL URL %s" % url)
+
+ #countdown
+ found = re.search(r'id="countdown">(\d\d):(\d\d)<', self.html)
+ wait_time = (int(found.group(1)) * 60 + int(found.group(2)) + 1) if found else 120
+ self.setWait(wait_time, False)
+ self.wait()
+
+ self.download(url)
+
+ def checkErrors(self):
+ if 'images/button-download-disable.png' in self.html:
+ self.longWait(300, 24) #parallel dl limit
+ elif '<div class="infobox' in self.html:
+ self.tempOffline()
+
+ def loadcaptcha(self, data, *args, **kwargs):
+ return data.decode("base64")
+
+getInfo = create_getInfo(BezvadataCz)
diff --git a/pyload/plugins/hoster/BillionuploadsCom.py b/pyload/plugins/hoster/BillionuploadsCom.py
new file mode 100644
index 000000000..5b053d547
--- /dev/null
+++ b/pyload/plugins/hoster/BillionuploadsCom.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class BillionuploadsCom(XFileSharingPro):
+ __name__ = "BillionuploadsCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?billionuploads.com/\w{12}"
+ __version__ = "0.01"
+ __description__ = """billionuploads.com hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<b>Filename:</b>(?P<N>.*?)<br>'
+ FILE_SIZE_PATTERN = r'<b>Size:</b>(?P<S>.*?)<br>'
+ HOSTER_NAME = "billionuploads.com"
+
+getInfo = create_getInfo(BillionuploadsCom)
diff --git a/pyload/plugins/hoster/BitshareCom.py b/pyload/plugins/hoster/BitshareCom.py
new file mode 100644
index 000000000..5e117ff45
--- /dev/null
+++ b/pyload/plugins/hoster/BitshareCom.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+from __future__ import with_statement
+
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+
+class BitshareCom(SimpleHoster):
+ __name__ = "BitshareCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?bitshare\.com/(files/(?P<id1>[a-zA-Z0-9]+)(/(?P<name>.*?)\.html)?|\?f=(?P<id2>[a-zA-Z0-9]+))"
+ __version__ = "0.48"
+ __description__ = """Bitshare.Com File Download Hoster"""
+ __author_name__ = ("paulking", "fragonib")
+ __author_mail__ = (None, "fragonib[AT]yahoo[DOT]es")
+
+ HOSTER_DOMAIN = "bitshare.com"
+ FILE_OFFLINE_PATTERN = r'''(>We are sorry, but the requested file was not found in our database|>Error - File not available<|The file was deleted either by the uploader, inactivity or due to copyright claim)'''
+ FILE_NAME_PATTERN = r'Download:</td>\s*<td><input type="text" value="http://bitshare\.com/files/\w+/(?P<N>[^"]+)\.html"'
+ FILE_SIZE_PATTERN = r'- (?P<S>[\d.]+) (?P<U>\w+)</h1>'
+ FILE_AJAXID_PATTERN = r'var ajaxdl = "(.*?)";'
+ CAPTCHA_KEY_PATTERN = r"http://api\.recaptcha\.net/challenge\?k=(.*?) "
+ TRAFFIC_USED_UP = r"Your Traffic is used up for today. Upgrade to premium to continue!"
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if self.premium:
+ self.account.relogin(self.user)
+
+ self.pyfile = pyfile
+
+ # File id
+ m = re.match(self.__pattern__, self.pyfile.url)
+ self.file_id = max(m.group('id1'), m.group('id2'))
+ self.logDebug("File id is [%s]" % self.file_id)
+
+ # Load main page
+ self.req.cj.setCookie(self.HOSTER_DOMAIN, "language_selection", "EN")
+ self.html = self.load(self.pyfile.url, ref=False, decode=True)
+
+ # Check offline
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html) is not None:
+ self.offline()
+
+ # Check Traffic used up
+ if re.search(BitshareCom.TRAFFIC_USED_UP, self.html) is not None:
+ self.logInfo("Your Traffic is used up for today. Wait 1800 seconds or reconnect!")
+ self.logDebug("Waiting %d seconds." % 1800)
+ self.setWait(1800, True)
+ self.wantReconnect = True
+ self.wait()
+ self.retry()
+
+ # File name
+ m = re.search(BitshareCom.__pattern__, self.pyfile.url)
+ name1 = m.group('name') if m is not None else None
+ m = re.search(BitshareCom.FILE_INFO_PATTERN, self.html)
+ name2 = m.group('name') if m is not None else None
+ self.pyfile.name = max(name1, name2)
+
+ # Ajax file id
+ self.ajaxid = re.search(BitshareCom.FILE_AJAXID_PATTERN, self.html).group(1)
+ self.logDebug("File ajax id is [%s]" % self.ajaxid)
+
+ # This may either download our file or forward us to an error page
+ url = self.getDownloadUrl()
+ self.logDebug("Downloading file with url [%s]" % url)
+ self.download(url)
+
+
+ def getDownloadUrl(self):
+ # Return location if direct download is active
+ if self.premium:
+ header = self.load(self.pyfile.url, cookies = True, just_header = True)
+ if 'location' in header:
+ return header['location']
+
+ # Get download info
+ self.logDebug("Getting download info")
+ response = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request" : "generateID", "ajaxid" : self.ajaxid})
+ self.handleErrors(response, ':')
+ parts = response.split(":")
+ filetype = parts[0]
+ wait = int(parts[1])
+ captcha = int(parts[2])
+ self.logDebug("Download info [type: '%s', waiting: %d, captcha: %d]" % (filetype, wait, captcha))
+
+ # Waiting
+ if wait > 0:
+ self.logDebug("Waiting %d seconds." % wait)
+ if wait < 120:
+ self.setWait(wait, False)
+ self.wait()
+ else:
+ self.setWait(wait - 55, True)
+ self.wait()
+ self.retry()
+
+ # Resolve captcha
+ if captcha == 1:
+ self.logDebug("File is captcha protected")
+ id = re.search(BitshareCom.CAPTCHA_KEY_PATTERN, self.html).group(1)
+ # Try up to 3 times
+ for i in range(3):
+ self.logDebug("Resolving ReCaptcha with key [%s], round %d" % (id, i+1))
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(id)
+ response = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request" : "validateCaptcha", "ajaxid" : self.ajaxid, "recaptcha_challenge_field" : challenge, "recaptcha_response_field" : code})
+ if self.handleCaptchaErrors(response):
+ break
+
+
+ # Get download URL
+ self.logDebug("Getting download url")
+ response = self.load("http://bitshare.com/files-ajax/" + self.file_id + "/request.html",
+ post={"request" : "getDownloadURL", "ajaxid" : self.ajaxid})
+ self.handleErrors(response, '#')
+ url = response.split("#")[-1]
+
+ return url
+
+ def handleErrors(self, response, separator):
+ self.logDebug("Checking response [%s]" % response)
+ if "ERROR:Session timed out" in response:
+ self.retry()
+ elif "ERROR" in response:
+ msg = response.split(separator)[-1]
+ self.fail(msg)
+
+ def handleCaptchaErrors(self, response):
+ self.logDebug("Result of captcha resolving [%s]" % response)
+ if "SUCCESS" in response:
+ self.correctCaptcha()
+ return True
+ elif "ERROR:SESSION ERROR" in response:
+ self.retry()
+ self.logDebug("Wrong captcha")
+ self.invalidCaptcha()
+
+getInfo = create_getInfo(BitshareCom)
diff --git a/pyload/plugins/hoster/BoltsharingCom.py b/pyload/plugins/hoster/BoltsharingCom.py
new file mode 100644
index 000000000..2f42c8b23
--- /dev/null
+++ b/pyload/plugins/hoster/BoltsharingCom.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class BoltsharingCom(XFileSharingPro):
+ __name__ = "BoltsharingCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?boltsharing.com/\w{12}"
+ __version__ = "0.01"
+ __description__ = """Boltsharing.com hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ HOSTER_NAME = "boltsharing.com"
+
+getInfo = create_getInfo(BoltsharingCom)
diff --git a/pyload/plugins/hoster/CatShareNet.py b/pyload/plugins/hoster/CatShareNet.py
new file mode 100644
index 000000000..3289ef72c
--- /dev/null
+++ b/pyload/plugins/hoster/CatShareNet.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+
+class CatShareNet(SimpleHoster):
+ __name__ = "CatShareNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?catshare.net/\w{16}.*"
+ __version__ = "0.01"
+ __description__ = """CatShare.net Download Hoster"""
+ __author_name__ = ("z00nx")
+ __author_mail__ = ("z00nx0@gmail.com")
+
+ FILE_INFO_PATTERN = r'<h3 class="pull-left"[^>]+>(?P<N>.*)</h3>\s+<h3 class="pull-right"[^>]+>(?P<S>.*)</h3>'
+ FILE_OFFLINE_PATTERN = r'Podany plik zosta'
+ SECONDS_PATTERN = 'var\s+count\s+=\s+(\d+);'
+ RECAPTCHA_KEY = "6Lfln9kSAAAAANZ9JtHSOgxUPB9qfDFeLUI_QMEy"
+
+ def handleFree(self):
+ found = re.search(self.SECONDS_PATTERN, self.html)
+ seconds = int(found.group(1))
+ self.logDebug("Seconds found", seconds)
+ self.setWait(seconds + 1)
+ self.wait()
+ recaptcha = ReCaptcha(self)
+ challenge, code = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": code}
+ self.download(self.pyfile.url, post=post_data)
+ check = self.checkDownload({"html": re.compile("\A<!DOCTYPE html PUBLIC")})
+ if check == "html":
+ self.logDebug("Wrong captcha entered")
+ self.invalidCaptcha()
+ self.retry()
+
+getInfo = create_getInfo(CatShareNet)
diff --git a/pyload/plugins/hoster/ChipDe.py b/pyload/plugins/hoster/ChipDe.py
new file mode 100644
index 000000000..fcb84a300
--- /dev/null
+++ b/pyload/plugins/hoster/ChipDe.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Crypter import Crypter
+
+class ChipDe(Crypter):
+ __name__ = "ChipDe"
+ __type__ = "container"
+ __pattern__ = r"http://(?:www\.)?chip.de/video/.*\.html"
+ __version__ = "0.1"
+ __description__ = """Chip.de Container Plugin"""
+ __author_name__ = ('4Christopher')
+ __author_mail__ = ('4Christopher@gmx.de')
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url)
+ try:
+ url = re.search(r'"(http://video.chip.de/\d+?/.*)"', self.html).group(1)
+ self.logDebug('The file URL is %s' % url)
+ except:
+ self.fail('Failed to find the URL')
+
+ self.packages.append((self.pyfile.package().name, [ url ], self.pyfile.package().folder))
diff --git a/pyload/plugins/hoster/CloudzerNet.py b/pyload/plugins/hoster/CloudzerNet.py
new file mode 100644
index 000000000..6c02203ca
--- /dev/null
+++ b/pyload/plugins/hoster/CloudzerNet.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster
+from module.common.json_layer import json_loads
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.network.RequestFactory import getURL
+from module.utils import parseFileSize
+
+
+def getInfo(urls):
+ for url in urls:
+ header = getURL(url, just_header=True)
+ if 'Location: http://cloudzer.net/404' in header:
+ file_info = (url, 0, 1, url)
+ else:
+ if url.endswith('/'):
+ api_data = getURL(url + 'status')
+ else:
+ api_data = getURL(url + '/status')
+ name, size = api_data.splitlines()
+ size = parseFileSize(size)
+ file_info = (name, size, 2, url)
+ yield file_info
+
+
+class CloudzerNet(SimpleHoster):
+ __name__ = "CloudzerNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?(cloudzer\.net/file/|clz\.to/(file/)?)(?P<ID>\w+).*"
+ __version__ = "0.03"
+ __description__ = """Cloudzer.net hoster plugin"""
+ __author_name__ = ("gs", "z00nx", "stickell")
+ __author_mail__ = ("I-_-I-_-I@web.de", "z00nx0@gmail.com", "l.stickell@yahoo.it")
+
+ FILE_SIZE_PATTERN = '<span class="size">(?P<S>[^<]+)</span>'
+ WAIT_PATTERN = '<meta name="wait" content="(\d+)">'
+ FILE_OFFLINE_PATTERN = r'Please check the URL for typing errors, respectively'
+ CAPTCHA_KEY = '6Lcqz78SAAAAAPgsTYF3UlGf2QFQCNuPMenuyHF3'
+
+ def handleFree(self):
+ found = re.search(self.WAIT_PATTERN, self.html)
+ seconds = int(found.group(1))
+ self.logDebug("Found wait", seconds)
+ self.setWait(seconds + 1)
+ self.wait()
+ response = self.load('http://cloudzer.net/io/ticket/slot/%s' % self.file_info['ID'], post=' ', cookies=True)
+ self.logDebug("Download slot request response", response)
+ response = json_loads(response)
+ if response["succ"] is not True:
+ self.fail("Unable to get a download slot")
+
+ recaptcha = ReCaptcha(self)
+ challenge, response = recaptcha.challenge(self.CAPTCHA_KEY)
+ post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": response}
+ response = json_loads(self.load('http://cloudzer.net/io/ticket/captcha/%s' % self.file_info['ID'], post=post_data, cookies=True))
+ self.logDebug("Captcha check response", response)
+ self.logDebug("First check")
+
+ if "err" in response:
+ if response["err"] == "captcha":
+ self.logDebug("Wrong captcha")
+ self.invalidCaptcha()
+ self.retry()
+ elif "Sie haben die max" in response["err"] or "You have reached the max" in response["err"]:
+ self.logDebug("Download limit reached, waiting an hour")
+ self.setWait(3600, True)
+ self.wait()
+ if "type" in response:
+ if response["type"] == "download":
+ url = response["url"]
+ self.logDebug("Download link", url)
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/hoster/CramitIn.py b/pyload/plugins/hoster/CramitIn.py
new file mode 100644
index 000000000..171fba0ff
--- /dev/null
+++ b/pyload/plugins/hoster/CramitIn.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class CramitIn(XFileSharingPro):
+ __name__ = "CramitIn"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*cramit.in/\w{12}"
+ __version__ = "0.04"
+ __description__ = """Cramit.in hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r'<span class=t2>\s*(?P<N>.*?)</span>.*?<small>\s*\((?P<S>.*?)\)'
+ DIRECT_LINK_PATTERN = r'href="(http://cramit.in/file_download/.*?)"'
+ HOSTER_NAME = "cramit.in"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+getInfo = create_getInfo(CramitIn) \ No newline at end of file
diff --git a/pyload/plugins/hoster/CrockoCom.py b/pyload/plugins/hoster/CrockoCom.py
new file mode 100644
index 000000000..f075d073b
--- /dev/null
+++ b/pyload/plugins/hoster/CrockoCom.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+import re
+
+class CrockoCom(SimpleHoster):
+ __name__ = "CrockoCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?(crocko|easy-share).com/.*"
+ __version__ = "0.13"
+ __description__ = """Crocko Download Hoster"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<span class="fz24">Download:\s*<strong>(?P<N>.*)'
+ FILE_SIZE_PATTERN = r'<span class="tip1"><span class="inner">(?P<S>[^<]+)</span></span>'
+ FILE_OFFLINE_PATTERN = r"<h1>Sorry,<br />the page you're looking for <br />isn't here.</h1>"
+ DOWNLOAD_URL_PATTERN = r"window.location ='([^']+)';"
+ CAPTCHA_URL_PATTERN = re.compile(r"u='(/file_contents/captcha/\w+)';\s*w='(\d+)';")
+ CAPTCHA_KEY_PATTERN = re.compile(r'Recaptcha.create\("([^"]+)"')
+
+ FORM_PATTERN = r'<form method="post" action="([^"]+)">(.*?)</form>'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="?([^" ]+)"? value="?([^" ]+)"?[^>]*>'
+
+ FILE_NAME_REPLACEMENTS = [(r'<[^>]*>', '')]
+
+ def handleFree(self):
+ if "You need Premium membership to download this file." in self.html:
+ self.fail("You need Premium membership to download this file.")
+
+ url = False
+ for i in range(5):
+ found = re.search(self.CAPTCHA_URL_PATTERN, self.html)
+ if found:
+ url, wait_time = 'http://crocko.com' + found.group(1), found.group(2)
+ self.setWait(wait_time)
+ self.wait()
+ self.html = self.load(url)
+ else:
+ break
+
+ found = re.search(self.CAPTCHA_KEY_PATTERN, self.html)
+ if not found: self.parseError('Captcha KEY')
+ captcha_key = found.group(1)
+
+ found = re.search(self.FORM_PATTERN, self.html, re.DOTALL)
+ if not found: self.parseError('ACTION')
+ action, form = found.groups()
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+
+ recaptcha = ReCaptcha(self)
+
+ for i in range(5):
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key)
+ self.download(action, post = inputs)
+
+ check = self.checkDownload({
+ "captcha_err": self.CAPTCHA_KEY_PATTERN
+ })
+
+ if check == "captcha_err":
+ self.invalidCaptcha()
+ else:
+ break
+ else:
+ self.fail('No valid captcha solution received')
+
+getInfo = create_getInfo(CrockoCom)
+ \ No newline at end of file
diff --git a/pyload/plugins/hoster/CyberlockerCh.py b/pyload/plugins/hoster/CyberlockerCh.py
new file mode 100644
index 000000000..57dd26787
--- /dev/null
+++ b/pyload/plugins/hoster/CyberlockerCh.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class CyberlockerCh(XFileSharingPro):
+ __name__ = "CyberlockerCh"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?cyberlocker\.ch/\w{12}"
+ __version__ = "0.01"
+ __description__ = """Cyberlocker.ch hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ HOSTER_NAME = "cyberlocker.ch"
+
+getInfo = create_getInfo(CyberlockerCh)
diff --git a/pyload/plugins/hoster/CzshareCom.py b/pyload/plugins/hoster/CzshareCom.py
new file mode 100644
index 000000000..347427586
--- /dev/null
+++ b/pyload/plugins/hoster/CzshareCom.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+# Test links (random.bin):
+# http://czshare.com/5278880/random.bin
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError
+from module.utils import parseFileSize
+
+class CzshareCom(SimpleHoster):
+ __name__ = "CzshareCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w*\.)*czshare\.(com|cz)/(\d+/|download.php\?).*"
+ __version__ = "0.93"
+ __description__ = """CZshare.com"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<div class="tab" id="parameters">\s*<p>\s*Cel. n.zev: <a href=[^>]*>(?P<N>[^<]+)</a>'
+ FILE_SIZE_PATTERN = r'<div class="tab" id="category">(?:\s*<p>[^\n]*</p>)*\s*Velikost:\s*(?P<S>[0-9., ]+)(?P<U>[kKMG])i?B\s*</div>'
+ FILE_OFFLINE_PATTERN = r'<div class="header clearfix">\s*<h2 class="red">'
+
+ FILE_SIZE_REPLACEMENTS = [(' ', '')]
+ FILE_URL_REPLACEMENTS = [(r'http://[^/]*/download.php\?.*?id=(\w+).*', r'http://czshare.com/\1/x/')]
+ SH_CHECK_TRAFFIC = True
+
+ FREE_URL_PATTERN = r'<a href="([^"]+)" class="page-download">[^>]*alt="([^"]+)" /></a>'
+ FREE_FORM_PATTERN = r'<form action="download.php" method="post">\s*<img src="captcha.php" id="captcha" />(.*?)</form>'
+ PREMIUM_FORM_PATTERN = r'<form action="/profi_down.php" method="post">(.*?)</form>'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="([^"]+)" value="([^"]+)"[^>]*/>'
+ MULTIDL_PATTERN = r"<p><font color='red'>Z[^<]*PROFI.</font></p>"
+ USER_CREDIT_PATTERN = r'<div class="credit">\s*kredit: <strong>([0-9., ]+)([kKMG]i?B)</strong>\s*</div><!-- .credit -->'
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = True if self.premium else False
+ self.chunkLimit = 1
+
+ def checkTrafficLeft(self):
+ # check if user logged in
+ found = re.search(self.USER_CREDIT_PATTERN, self.html)
+ if not found:
+ self.account.relogin(self.user)
+ self.html = self.load(self.pyfile.url, cookies=True, decode=True)
+ found = re.search(self.USER_CREDIT_PATTERN, self.html)
+ if not found: return False
+
+ # check user credit
+ try:
+ credit = parseFileSize(found.group(1).replace(' ',''), found.group(2))
+ self.logInfo("Premium download for %i KiB of Credit" % (self.pyfile.size / 1024))
+ self.logInfo("User %s has %i KiB left" % (self.user, credit / 1024))
+ if credit < self.pyfile.size:
+ self.logInfo("Not enough credit to download file %s" % self.pyfile.name)
+ return False
+ except Exception, e:
+ # let's continue and see what happens...
+ self.logError('Parse error (CREDIT): %s' % e)
+
+ return True
+
+ def handlePremium(self):
+ # parse download link
+ try:
+ form = re.search(self.PREMIUM_FORM_PATTERN, self.html, re.DOTALL).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ except Exception, e:
+ self.logError("Parse error (FORM): %s" % e)
+ self.resetAccount()
+
+ # download the file, destination is determined by pyLoad
+ self.download("http://czshare.com/profi_down.php", post=inputs, disposition=True)
+ self.checkDownloadedFile()
+
+ def handleFree(self):
+ # get free url
+ found = re.search(self.FREE_URL_PATTERN, self.html)
+ if found is None:
+ raise PluginParseError('Free URL')
+ parsed_url = "http://czshare.com" + found.group(1)
+ self.logDebug("PARSED_URL:" + parsed_url)
+
+ # get download ticket and parse html
+ self.html = self.load(parsed_url, cookies=True, decode=True)
+ if re.search(self.MULTIDL_PATTERN, self.html):
+ self.longWait(300, 12)
+
+ try:
+ form = re.search(self.FREE_FORM_PATTERN, self.html, re.DOTALL).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ self.pyfile.size = int(inputs['size'])
+ except Exception, e:
+ self.logError(e)
+ raise PluginParseError('Form')
+
+ # get and decrypt captcha
+ captcha_url = 'http://czshare.com/captcha.php'
+ for i in range(5):
+ inputs['captchastring2'] = self.decryptCaptcha(captcha_url)
+ self.html = self.load(parsed_url, cookies=True, post=inputs, decode=True)
+ if u"<li>ZadanÃœ ověřovací kód nesouhlasí!</li>" in self.html:
+ self.invalidCaptcha()
+ elif re.search(self.MULTIDL_PATTERN, self.html):
+ self.longWait(300, 12)
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("No valid captcha code entered")
+
+ found = re.search("countdown_number = (\d+);", self.html)
+ self.setWait(int(found.group(1)) if found else 50)
+
+ # download the file, destination is determined by pyLoad
+ self.logDebug("WAIT URL", self.req.lastEffectiveURL)
+ found = re.search("free_wait.php\?server=(.*?)&(.*)", self.req.lastEffectiveURL)
+ if not found:
+ raise PluginParseError('Download URL')
+
+ url = "http://%s/download.php?%s" % (found.group(1), found.group(2))
+
+ self.wait()
+ self.multiDL = True
+ self.download(url)
+ self.checkDownloadedFile()
+
+ def checkDownloadedFile(self):
+ # check download
+ check = self.checkDownload({
+ "tempoffline": re.compile(r"^Soubor je do.*asn.* nedostupn.*$"),
+ "credit": re.compile(r"^Nem.*te dostate.*n.* kredit.$"),
+ "multi_dl": re.compile(self.MULTIDL_PATTERN),
+ "captcha_err": "<li>ZadanÃœ ověřovací kód nesouhlasí!</li>"
+ })
+
+ if check == "tempoffline":
+ self.fail("File not available - try later")
+ if check == "credit":
+ self.resetAccount()
+ elif check == "multi_dl":
+ self.longWait(300, 12)
+ elif check == "captcha_err":
+ self.invalidCaptcha()
+ self.retry()
+
+getInfo = create_getInfo(CzshareCom)
diff --git a/pyload/plugins/hoster/DailymotionCom.py b/pyload/plugins/hoster/DailymotionCom.py
new file mode 100644
index 000000000..1b411393d
--- /dev/null
+++ b/pyload/plugins/hoster/DailymotionCom.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from urllib import unquote
+from module.plugins.Hoster import Hoster
+
+class DailymotionCom(Hoster):
+ __name__ = 'DailymotionCom'
+ __type__ = 'hoster'
+ __pattern__ = r'http://www.dailymotion.com/.*'
+ __version__ = '0.1'
+ __description__ = """Dailymotion Video Download Hoster"""
+ __author_name__ = ("Peekayy")
+ __author_mail__ = ("peekayy.dev@gmail.com")
+
+ def process(self, pyfile):
+ html = self.load(pyfile.url, decode=True)
+
+ for pattern in (r'name="title" content="Dailymotion \\-(.*?)\\- ein Film',
+ r'class="title" title="(.*?)"',
+ r'<span class="title foreground" title="(.*?)">',
+ r'"(?:vs_videotitle|videoTitle|dm_title|ss_mediaTitle)": "(.*?)"'):
+ filename = re.search(pattern, html)
+ if filename is not None: break
+ else:
+ self.fail("Unable to find file name")
+
+ pyfile.name = filename.group(1)+'.mp4'
+ self.logDebug('Filename='+pyfile.name)
+ allLinksInfo = re.search(r'"sequence":"(.*?)"', html)
+ self.logDebug(allLinksInfo.groups())
+ allLinksInfo = unquote(allLinksInfo.group(1))
+
+ for quality in ('hd720URL', 'hqURL', 'sdURL', 'ldURL', ''):
+ dlLink = self.getQuality(quality, allLinksInfo)
+ if dlLink is not None: break
+ else:
+ self.fail(r'Unable to find video URL')
+
+ self.logDebug(dlLink)
+ self.download(dlLink)
+
+ def getQuality(self, quality, data):
+ link = re.search('"' + quality + '":"(http:[^<>"\']+)"', data)
+ if link is not None:
+ return link.group(1).replace('\\','') \ No newline at end of file
diff --git a/pyload/plugins/hoster/DataHu.py b/pyload/plugins/hoster/DataHu.py
new file mode 100644
index 000000000..7abd93d1f
--- /dev/null
+++ b/pyload/plugins/hoster/DataHu.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+# Test links (random.bin):
+# http://data.hu/get/6381232/random.bin
+
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class DataHu(SimpleHoster):
+ __name__ = "DataHu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?data.hu/get/\w+"
+ __version__ = "0.01"
+ __description__ = """Data.hu Download Hoster"""
+ __author_name__ = ("crash", "stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = ur'<title>(?P<N>.*) \((?P<S>[^)]+)\) let\xf6lt\xe9se</title>'
+ FILE_OFFLINE_PATTERN = ur'Az adott f\xe1jl nem l\xe9tezik'
+ DIRECT_LINK_PATTERN = r'<div class="download_box_button"><a href="([^"]+)">'
+
+ def handleFree(self):
+ self.resumeDownload = True
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ m = re.search(self.DIRECT_LINK_PATTERN, self.html)
+ if m:
+ url = m.group(1)
+ self.logDebug('Direct link: ' + url)
+ else:
+ self.parseError('Unable to get direct link')
+
+ self.download(url, disposition=True)
+
+
+getInfo = create_getInfo(DataHu)
diff --git a/pyload/plugins/hoster/DataportCz.py b/pyload/plugins/hoster/DataportCz.py
new file mode 100644
index 000000000..3dc581bf1
--- /dev/null
+++ b/pyload/plugins/hoster/DataportCz.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError
+from pycurl import FOLLOWLOCATION
+
+class DataportCz(SimpleHoster):
+ __name__ = "DataportCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:.*?\.)?dataport.cz/file/(.*)"
+ __version__ = "0.37"
+ __description__ = """Dataport.cz plugin - free only"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<span itemprop="name">(?P<N>[^<]+)</span>'
+ FILE_SIZE_PATTERN = r'<td class="fil">Velikost</td>\s*<td>(?P<S>[^<]+)</td>'
+ FILE_OFFLINE_PATTERN = r'<h2>Soubor nebyl nalezen</h2>'
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.dataport.cz/file/\1')]
+
+ CAPTCHA_URL_PATTERN = r'<section id="captcha_bg">\s*<img src="(.*?)"'
+ FREE_SLOTS_PATTERN = ur'Počet volnÜch slotů: <span class="darkblue">(\d+)</span><br />'
+
+ def handleFree(self):
+ captchas = {"1": "jkeG", "2": "hMJQ", "3": "vmEK", "4": "ePQM", "5": "blBd"}
+
+ for i in range(60):
+ action, inputs = self.parseHtmlForm('free_download_form')
+ self.logDebug(action, inputs)
+ if not action or not inputs:
+ raise PluginParseError('free_download_form')
+
+ if "captchaId" in inputs and inputs["captchaId"] in captchas:
+ inputs['captchaCode'] = captchas[inputs["captchaId"]]
+ else:
+ raise PluginParseError('captcha')
+
+ self.html = self.download("http://www.dataport.cz%s" % action, post = inputs)
+
+ check = self.checkDownload({"captcha": 'alert("\u0160patn\u011b opsan\u00fd k\u00f3d z obr\u00e1zu");',
+ "slot": 'alert("Je n\u00e1m l\u00edto, ale moment\u00e1ln\u011b nejsou'})
+ if check == "captcha":
+ raise PluginParseError('invalid captcha')
+ elif check == "slot":
+ self.logDebug("No free slots - wait 60s and retry")
+ self.setWait(60, False)
+ self.wait()
+ self.html = self.load(self.pyfile.url, decode = True)
+ continue
+ else:
+ break
+
+create_getInfo(DataportCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/DateiTo.py b/pyload/plugins/hoster/DateiTo.py
new file mode 100644
index 000000000..061881e4b
--- /dev/null
+++ b/pyload/plugins/hoster/DateiTo.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+class DateiTo(SimpleHoster):
+ __name__ = "DateiTo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?datei\.to/datei/(?P<ID>\w+)\.html"
+ __version__ = "0.02"
+ __description__ = """Datei.to plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'Dateiname:</td>\s*<td colspan="2"><strong>(?P<N>.*?)</'
+ FILE_SIZE_PATTERN = r'Dateigr&ouml;&szlig;e:</td>\s*<td colspan="2">(?P<S>.*?)</'
+ FILE_OFFLINE_PATTERN = r'>Datei wurde nicht gefunden<|>Bitte wÀhle deine Datei aus... <'
+ PARALELL_PATTERN = r'>Du lÀdst bereits eine Datei herunter<'
+
+ WAIT_PATTERN = r'countdown\({seconds: (\d+)'
+ DATA_PATTERN = r'url: "(.*?)", data: "(.*?)",'
+ RECAPTCHA_KEY_PATTERN = r'Recaptcha.create\("(.*?)"'
+
+ def handleFree(self):
+ url = 'http://datei.to/ajax/download.php'
+ data = {'P': 'I', 'ID': self.file_info['ID']}
+
+ recaptcha = ReCaptcha(self)
+
+ for i in range(10):
+ self.logDebug("URL", url, "POST", data)
+ self.html = self.load(url, post = data)
+ self.checkErrors()
+
+ if url.endswith('download.php') and 'P' in data:
+ if data['P'] == 'I':
+ self.doWait()
+
+ elif data['P'] == 'IV':
+ break
+
+ found = re.search(self.DATA_PATTERN, self.html)
+ if not found: self.parseError('data')
+ url = 'http://datei.to/' + found.group(1)
+ data = dict(x.split('=') for x in found.group(2).split('&'))
+
+ if url.endswith('recaptcha.php'):
+ found = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ recaptcha_key = found.group(1) if found else "6LdBbL8SAAAAAI0vKUo58XRwDd5Tu_Ze1DA7qTao"
+
+ data['recaptcha_challenge_field'], data['recaptcha_response_field'] = recaptcha.challenge(recaptcha_key)
+
+ else:
+ self.fail('Too bad...')
+
+ download_url = self.html
+ self.logDebug('Download URL', download_url)
+ self.download(download_url)
+
+ def checkErrors(self):
+ found = re.search(self.PARALELL_PATTERN, self.html)
+ if found:
+ found = re.search(self.WAIT_PATTERN, self.html)
+ wait_time = int(found.group(1)) if found else 30
+ self.setWait(wait_time + 1, False)
+ self.wait(300)
+ self.retry()
+
+ def doWait(self):
+ found = re.search(self.WAIT_PATTERN, self.html)
+ wait_time = int(found.group(1)) if found else 30
+ self.setWait(wait_time + 1, False)
+
+ self.load('http://datei.to/ajax/download.php', post = {'P': 'Ads'})
+ self.wait()
+
+getInfo = create_getInfo(DateiTo)
diff --git a/pyload/plugins/hoster/DdlstorageCom.py b/pyload/plugins/hoster/DdlstorageCom.py
new file mode 100644
index 000000000..5eaebf1d1
--- /dev/null
+++ b/pyload/plugins/hoster/DdlstorageCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class DdlstorageCom(XFileSharingPro):
+ __name__ = "DdlstorageCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?ddlstorage.com/\w{12}"
+ __version__ = "0.07"
+ __description__ = """DDLStorage.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<p class="sub_title"[^>]*>(?P<N>.+) \((?P<S>[^)]+)\)</p>'
+ HOSTER_NAME = "ddlstorage.com"
+
+
+getInfo = create_getInfo(DdlstorageCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/DebridItaliaCom.py b/pyload/plugins/hoster/DebridItaliaCom.py
new file mode 100644
index 000000000..1c2f4246b
--- /dev/null
+++ b/pyload/plugins/hoster/DebridItaliaCom.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+import re
+
+from module.plugins.Hoster import Hoster
+
+
+class DebridItaliaCom(Hoster):
+ __name__ = "DebridItaliaCom"
+ __version__ = "0.04"
+ __type__ = "hoster"
+ __pattern__ = r"https?://.*debriditalia\.com"
+ __description__ = """Debriditalia.com hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def init(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "DebridItalia")
+ self.fail("No DebridItalia account provided")
+
+ self.logDebug("Old URL: %s" % pyfile.url)
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ else:
+ url = "http://debriditalia.com/linkgen2.php?xjxfun=convertiLink&xjxargs[]=S<![CDATA[%s]]>" % pyfile.url
+ page = self.load(url)
+ self.logDebug("XML data: %s" % page)
+
+ if 'File not available' in page:
+ self.fail('File not available')
+ else:
+ new_url = re.search(r'<a href="(?:[^"]+)">(?P<direct>[^<]+)</a>', page).group('direct')
+
+ self.logDebug("New URL: %s" % new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload({"empty": re.compile(r"^$")})
+
+ if check == "empty":
+ self.retry(5, 120, 'Empty file downloaded')
diff --git a/pyload/plugins/hoster/DepositfilesCom.py b/pyload/plugins/hoster/DepositfilesCom.py
new file mode 100644
index 000000000..ee5fdf6af
--- /dev/null
+++ b/pyload/plugins/hoster/DepositfilesCom.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from urllib import unquote
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.network.RequestFactory import getURL
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+class DepositfilesCom(SimpleHoster):
+ __name__ = "DepositfilesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://[\w\.]*?(depositfiles\.com|dfiles\.eu)(/\w{1,3})?/files/[\w]+"
+ __version__ = "0.44"
+ __description__ = """Depositfiles.com Download Hoster"""
+ __author_name__ = ("spoob", "zoidberg")
+ __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'File name: <b title="(?P<N>[^"]+)'
+ FILE_SIZE_PATTERN = r'File size: <b>(?P<S>[0-9.]+)&nbsp;(?P<U>[kKMG])i?B</b>'
+ FILE_INFO_PATTERN = r'<script type="text/javascript">eval\( unescape\(\'(?P<N>.*?)\''
+ FILE_OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>'
+ FILE_URL_REPLACEMENTS = [(r"\.com(/.*?)?/files", ".com/en/files"), (r"\.html$", "")]
+ FILE_NAME_REPLACEMENTS = [(r'\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))), (r'.*<b title="(?P<N>[^"]+).*', "\g<N>" )]
+
+ RECAPTCHA_PATTERN = r"Recaptcha.create\('([^']+)'"
+ DOWNLOAD_LINK_PATTERN = r'<form id="downloader_file_form" action="(http://.+?\.(dfiles\.eu|depositfiles\.com)/.+?)" method="post"'
+
+ def setup(self):
+ self.multiDL = False
+ self.resumeDownload = self.premium
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, post={"gateway_result":"1"}, cookies = True)
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html): self.offline()
+
+ if re.search(r'File is checked, please try again in a minute.', self.html) is not None:
+ self.logInfo("DepositFiles.com: The file is being checked. Waiting 1 minute.")
+ self.setWait(61)
+ self.wait()
+ self.retry()
+
+ wait = re.search(r'html_download_api-limit_interval\">(\d+)</span>', self.html)
+ if wait:
+ wait_time = int(wait.group(1))
+ self.logInfo( "%s: Traffic used up. Waiting %d seconds." % (self.__name__, wait_time) )
+ self.setWait(wait_time)
+ self.wantReconnect = True
+ self.wait()
+ self.retry()
+
+ wait = re.search(r'>Try in (\d+) minutes or use GOLD account', self.html)
+ if wait:
+ wait_time = int(wait.group(1))
+ self.logInfo( "%s: All free slots occupied. Waiting %d minutes." % (self.__name__, wait_time) )
+ self.setWait(wait_time * 60, False)
+
+ wait = re.search(r'Please wait (\d+) sec', self.html)
+ if wait:
+ self.setWait(int(wait.group(1)))
+
+ found = re.search(r"var fid = '(\w+)';", self.html)
+ if not found: self.retry(wait_time=5)
+ params = {'fid' : found.group(1)}
+ self.logDebug ("FID: %s" % params['fid'])
+
+ captcha_key = '6LdRTL8SAAAAAE9UOdWZ4d0Ky-aeA7XfSqyWDM2m'
+ found = re.search(self.RECAPTCHA_PATTERN, self.html)
+ if found: captcha_key = found.group(1)
+ self.logDebug ("CAPTCHA_KEY: %s" % captcha_key)
+
+ self.wait()
+ recaptcha = ReCaptcha(self)
+
+ for i in range(5):
+ self.html = self.load("http://depositfiles.com/get_file.php", get = params)
+
+ if '<input type=button value="Continue" onclick="check_recaptcha' in self.html:
+ if not captcha_key: self.parseError('Captcha key')
+ if 'response' in params: self.invalidCaptcha()
+ params['challenge'], params['response'] = recaptcha.challenge(captcha_key)
+ self.logDebug(params)
+ continue
+
+ found = re.search(self.DOWNLOAD_LINK_PATTERN, self.html)
+ if found:
+ if 'response' in params: self.correctCaptcha()
+ link = unquote(found.group(1))
+ self.logDebug ("LINK: %s" % link)
+ break
+ else:
+ self.parseError('Download link')
+ else:
+ self.fail('No valid captcha response received')
+
+ try:
+ self.download(link, disposition = True)
+ except:
+ self.retry(wait_time = 60)
+
+ def handlePremium(self):
+ if '<span class="html_download_api-gold_traffic_limit">' in self.html:
+ self.logWarning("Download limit reached")
+ self.retry(25, 3600, "Download limit reached")
+ elif 'onClick="show_gold_offer' in self.html:
+ self.account.relogin(self.user)
+ self.retry()
+ link = unquote(re.search('<div id="download_url">\s*<a href="(http://.+?\.depositfiles.com/.+?)"', self.html).group(1))
+ self.multiDL = True
+ self.download(link, disposition = True)
+
+getInfo = create_getInfo(DepositfilesCom)
diff --git a/pyload/plugins/hoster/DlFreeFr.py b/pyload/plugins/hoster/DlFreeFr.py
new file mode 100644
index 000000000..67c2d6c17
--- /dev/null
+++ b/pyload/plugins/hoster/DlFreeFr.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns
+from module.common.json_layer import json_loads
+
+import pycurl
+from module.network.Browser import Browser
+from module.network.CookieJar import CookieJar
+
+class CustomBrowser(Browser):
+ def __init__(self, bucket=None, options={}):
+ Browser.__init__(self, bucket, options)
+
+ def load(self, *args, **kwargs):
+ post = kwargs.get("post")
+ if post is None:
+ if len(args) > 2:
+ post = args[2]
+ if post:
+ self.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.http.c.setopt(pycurl.POST, 1)
+ self.http.c.setopt(pycurl.CUSTOMREQUEST, "POST")
+ else:
+ self.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.http.c.setopt(pycurl.POST, 0)
+ self.http.c.setopt(pycurl.CUSTOMREQUEST, "GET")
+ return Browser.load(self, *args, **kwargs)
+
+"""
+Class to support adyoulike captcha service
+"""
+class AdYouLike():
+ ADYOULIKE_INPUT_PATTERN = r'Adyoulike.create\((.*?)\);'
+ ADYOULIKE_CALLBACK = r'Adyoulike.g._jsonp_5579316662423138'
+ ADYOULIKE_CHALLENGE_PATTERN = ADYOULIKE_CALLBACK + r'\((.*?)\)'
+
+ def __init__(self, plugin, engine = "adyoulike"):
+ self.plugin = plugin
+ self.engine = engine
+
+ def challenge(self, html):
+ adyoulike_data_string = None
+ found = re.search(self.ADYOULIKE_INPUT_PATTERN, html)
+ if found:
+ adyoulike_data_string = found.group(1)
+ else:
+ self.plugin.fail("Can't read AdYouLike input data")
+
+ ayl_data = json_loads(adyoulike_data_string) #{"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"},"all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}}
+
+ res = self.plugin.load(r'http://api-ayl.appspot.com/challenge?key=%(ayl_key)s&env=%(ayl_env)s&callback=%(callback)s' % {"ayl_key": ayl_data[self.engine]["key"], "ayl_env": ayl_data["all"]["env"], "callback": self.ADYOULIKE_CALLBACK})
+
+ found = re.search(self.ADYOULIKE_CHALLENGE_PATTERN, res)
+ challenge_string = None
+ if found:
+ challenge_string = found.group(1)
+ else:
+ self.plugin.fail("Invalid AdYouLike challenge")
+ challenge_data = json_loads(challenge_string)
+
+ return ayl_data, challenge_data
+
+ def result(self, ayl, challenge):
+ """
+ Adyoulike.g._jsonp_5579316662423138({"translations":{"fr":{"instructions_visual":"Recopiez « Soonnight » ci-dessous :"}},"site_under":true,"clickable":true,"pixels":{"VIDEO_050":[],"DISPLAY":[],"VIDEO_000":[],"VIDEO_100":[],"VIDEO_025":[],"VIDEO_075":[]},"medium_type":"image/adyoulike","iframes":{"big":"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\" height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},"shares":{},"id":256,"token":"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1","formats":{"small":{"y":300,"x":0,"w":300,"h":60},"big":{"y":0,"x":0,"w":300,"h":250},"hover":{"y":440,"x":0,"w":300,"h":60}},"tid":"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"})
+ """
+ response = None
+ try:
+ instructions_visual = challenge["translations"][ayl["all"]["lang"]]["instructions_visual"]
+ found = re.search(u".*«(.*)».*", instructions_visual)
+ if found:
+ response = found.group(1).strip()
+ else:
+ self.plugin.fail("Can't parse instructions visual")
+ except KeyError:
+ self.plugin.fail("No instructions visual")
+
+ #TODO: Supports captcha
+
+ if not response:
+ self.plugin.fail("AdYouLike result failed")
+
+ return {"_ayl_captcha_engine" : self.engine,
+ "_ayl_env" : ayl["all"]["env"],
+ "_ayl_tid" : challenge["tid"],
+ "_ayl_token_challenge" : challenge["token"],
+ "_ayl_response": response }
+
+class DlFreeFr(SimpleHoster):
+ __name__ = "DlFreeFr"
+ __type__ = "hoster"
+ __pattern__ = r"http://dl\.free\.fr/([a-zA-Z0-9]+|getfile\.pl\?file=/[a-zA-Z0-9]+)"
+ __version__ = "0.24"
+ __description__ = """dl.free.fr download hoster"""
+ __author_name__ = ("the-razer", "zoidberg", "Toilal")
+ __author_mail__ = ("daniel_ AT gmx DOT net", "zoidberg@mujmail.cz", "toilal.dev@gmail.com")
+
+ FILE_NAME_PATTERN = r"Fichier:</td>\s*<td[^>]*>(?P<N>[^>]*)</td>"
+ FILE_SIZE_PATTERN = r"Taille:</td>\s*<td[^>]*>(?P<S>[\d.]+[KMG])o"
+ FILE_OFFLINE_PATTERN = r"Erreur 404 - Document non trouv|Fichier inexistant|Le fichier demand&eacute; n'a pas &eacute;t&eacute; trouv&eacute;"
+ #FILE_URL_PATTERN = r'href="(?P<url>http://.*?)">T&eacute;l&eacute;charger ce fichier'
+
+ def setup(self):
+ self.limitDL = 5
+ self.resumeDownload = True
+ self.chunkLimit = 1
+
+ def init(self):
+ factory = self.core.requestFactory
+ self.req = CustomBrowser(factory.bucket, factory.getOptions())
+
+ def process(self, pyfile):
+ self.req.setCookieJar(None)
+
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+ valid_url = pyfile.url
+ headers = self.load(valid_url, just_header = True)
+
+ self.html = None
+ if headers.get('code') == 302:
+ valid_url = headers.get('location')
+ headers = self.load(valid_url, just_header = True)
+
+ if headers.get('code') == 200:
+ content_type = headers.get('content-type')
+ if content_type and content_type.startswith("text/html"):
+ # Undirect acces to requested file, with a web page providing it (captcha)
+ self.html = self.load(valid_url)
+ self.handleFree()
+ else:
+ # Direct access to requested file for users using free.fr as Internet Service Provider.
+ self.download(valid_url, disposition=True)
+ elif headers.get('code') == 404:
+ self.offline()
+ else:
+ self.fail("Invalid return code: " + str(headers.get('code')))
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('action="getfile.pl"')
+
+ adyoulike = AdYouLike(self)
+ ayl, challenge = adyoulike.challenge(self.html)
+ result = adyoulike.result(ayl, challenge)
+ inputs.update(result)
+
+ self.load("http://dl.free.fr/getfile.pl", post = inputs)
+ headers = self.getLastHeaders()
+ if headers.get("code") == 302 and headers.has_key("set-cookie") and headers.has_key("location"):
+ found = re.search("(.*?)=(.*?); path=(.*?); domain=(.*?)", headers.get("set-cookie"))
+ cj = CookieJar(__name__)
+ if found:
+ cj.setCookie(found.group(4), found.group(1), found.group(2), found.group(3))
+ else:
+ self.fail("Cookie error")
+ location = headers.get("location")
+ self.req.setCookieJar(cj)
+ self.download(location, disposition=True);
+ else:
+ self.fail("Invalid response")
+
+ def getLastHeaders(self):
+ #parse header
+ header = {"code": self.req.code}
+ for line in self.req.http.header.splitlines():
+ line = line.strip()
+ if not line or ":" not in line: continue
+
+ key, none, value = line.partition(":")
+ key = key.lower().strip()
+ value = value.strip()
+
+ if key in header:
+ if type(header[key]) == list:
+ header[key].append(value)
+ else:
+ header[key] = [header[key], value]
+ else:
+ header[key] = value
+ return header
+
+getInfo = create_getInfo(DlFreeFr)
diff --git a/pyload/plugins/hoster/EasybytezCom.py b/pyload/plugins/hoster/EasybytezCom.py
new file mode 100644
index 000000000..98691a641
--- /dev/null
+++ b/pyload/plugins/hoster/EasybytezCom.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class EasybytezCom(XFileSharingPro):
+ __name__ = "EasybytezCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?easybytez.com/(\w+).*"
+ __version__ = "0.17"
+ __description__ = """easybytez.com"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<span class="name">(?P<N>.+)</span><br>\s*<span class="size">(?P<S>[^<]+)</span>'
+ FILE_OFFLINE_PATTERN = r'<h1>File not available</h1>'
+
+ DIRECT_LINK_PATTERN = r'(http://(\w+\.(easyload|easybytez|zingload)\.(com|to)|\d+\.\d+\.\d+\.\d+)/files/\d+/\w+/[^"<]+)'
+ OVR_DOWNLOAD_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
+ OVR_KILL_LINK_PATTERN = r'<h2>Delete Link</h2>\s*<textarea[^>]*>([^<]+)'
+ ERROR_PATTERN = r'(?:class=["\']err["\'][^>]*>|<Center><b>)(.*?)</'
+
+ HOSTER_NAME = "easybytez.com"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+
+getInfo = create_getInfo(EasybytezCom)
diff --git a/pyload/plugins/hoster/EdiskCz.py b/pyload/plugins/hoster/EdiskCz.py
new file mode 100644
index 000000000..a253be0d9
--- /dev/null
+++ b/pyload/plugins/hoster/EdiskCz.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class EdiskCz(SimpleHoster):
+ __name__ = "EdiskCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w*\.)?edisk.(cz|sk|eu)/(stahni|sk/stahni|en/download)/.*"
+ __version__ = "0.21"
+ __description__ = """Edisk.cz"""
+ __author_name__ = ("zoidberg")
+
+ URL_PATTERN = r'<form name = "formular" action = "([^"]+)" method = "post">'
+ FILE_INFO_PATTERN = r'<span class="fl" title="(?P<N>[^"]+)">\s*.*?\((?P<S>[0-9.]*) (?P<U>[kKMG])i?B\)</h1></span>'
+ ACTION_PATTERN = r'/en/download/(\d+/.*\.html)'
+ DLLINK_PATTERN = r'http://.*edisk.cz.*\.html'
+ FILE_OFFLINE_PATTERN = r'<h3>This file does not exist due to one of the following:</h3><ul><li>'
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+ url = re.sub("/(stahni|sk/stahni)/", "/en/download/", pyfile.url)
+
+ self.logDebug('URL:' + url)
+
+ found = re.search(self.ACTION_PATTERN, url)
+ if found is None: self.parseError("ACTION")
+ action = found.group(1)
+
+ self.html = self.load(url, decode=True)
+ self.getFileInfo()
+
+ self.html = self.load(re.sub("/en/download/", "/en/download-slow/", url))
+
+ url = self.load(re.sub("/en/download/", "/x-download/", url), post={
+ "action": action
+ })
+
+ if not re.match(self.DLLINK_PATTERN, url):
+ self.fail("Unexpected server response")
+
+ self.download(url)
+
+getInfo = create_getInfo(EdiskCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/EgoFilesCom.py b/pyload/plugins/hoster/EgoFilesCom.py
new file mode 100644
index 000000000..274286cf1
--- /dev/null
+++ b/pyload/plugins/hoster/EgoFilesCom.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+# Test link (random.bin):
+# http://egofiles.com/mOZfMI1WLZ6HBkGG/random.bin
+
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+
+class EgoFilesCom(SimpleHoster):
+ __name__ = "EgoFilesCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(www\.)?egofiles.com/(\w+)"
+ __version__ = "0.13"
+ __description__ = """Egofiles.com Download Hoster"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<div class="down-file">\s+(?P<N>[^\t]+)\s+<div class="file-properties">\s+(File size|Rozmiar): (?P<S>[\w.]+) (?P<U>\w+) \|'
+ FILE_OFFLINE_PATTERN = r'(File size|Rozmiar): 0 KB'
+ WAIT_TIME_PATTERN = r'For next free download you have to wait <strong>((?P<m>\d*)m)? ?((?P<s>\d+)s)?</strong>'
+ DIRECT_LINK_PATTERN = r'<a href="(?P<link>[^"]+)">Download ></a>'
+ RECAPTCHA_KEY = '6LeXatQSAAAAAHezcjXyWAni-4t302TeYe7_gfvX'
+
+ def init(self):
+ self.file_info = {}
+ # Set English language
+ self.load("https://egofiles.com/ajax/lang.php?lang=en", just_header=True)
+
+ def process(self, pyfile):
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+ self.getFileInfo()
+
+ # Wait time between free downloads
+ if 'For next free download you have to wait' in self.html:
+ m = re.search(self.WAIT_TIME_PATTERN, self.html).groupdict('0')
+ waittime = int(m['m']) * 60 + int(m['s'])
+ self.setWait(waittime, True)
+ self.wait()
+
+ downloadURL = ''
+ recaptcha = ReCaptcha(self)
+ for i in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response}
+ self.html = self.load(self.pyfile.url, post=post_data, decode=True)
+ m = re.search(self.DIRECT_LINK_PATTERN, self.html)
+ if not m:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+ elif hasattr(m, 'group'):
+ downloadURL = m.group('link')
+ self.correctCaptcha()
+ break
+ else:
+ self.fail('Unknown error - Plugin may be out of date')
+
+ if not downloadURL:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.download(downloadURL, disposition=True)
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if header.has_key('location'):
+ self.logDebug('DIRECT LINK from header: ' + header['location'])
+ self.download(header['location'])
+ else:
+ self.html = self.load(self.pyfile.url, decode=True)
+ self.getFileInfo()
+ m = re.search(r'<a href="(?P<link>[^"]+)">Download ></a>', self.html)
+ if not m:
+ self.parseError('Unable to detect direct download url')
+ else:
+ self.logDebug('DIRECT URL from html: ' + m.group('link'))
+ self.download(m.group('link'), disposition=True)
+
+
+getInfo = create_getInfo(EgoFilesCom)
diff --git a/pyload/plugins/hoster/EuroshareEu.py b/pyload/plugins/hoster/EuroshareEu.py
new file mode 100644
index 000000000..5224dfd9f
--- /dev/null
+++ b/pyload/plugins/hoster/EuroshareEu.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class EuroshareEu(SimpleHoster):
+ __name__ = "EuroshareEu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w*\.)?euroshare.(eu|sk|cz|hu|pl)/file/.*"
+ __version__ = "0.25"
+ __description__ = """Euroshare.eu"""
+ __author_name__ = ("zoidberg")
+
+ FILE_INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>'
+ FILE_OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Poşadovaná stránka neexistuje!'
+
+ FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"'
+ ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor'
+ ERR_NOT_LOGGED_IN_PATTERN = r'href="/customer-zone/login/"'
+
+ FILE_URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")]
+
+ def setup(self):
+ self.multiDL = self.resumeDownload = self.premium
+ self.req.setOption("timeout", 120)
+
+ def handlePremium(self):
+ if self.ERR_NOT_LOGGED_IN_PATTERN in self.html:
+ self.account.relogin(self.user)
+ self.retry(reason="User not logged in")
+
+ self.download(self.pyfile.url.rstrip('/') + "/download/")
+
+ check = self.checkDownload({"login": re.compile(self.ERR_NOT_LOGGED_IN_PATTERN),
+ "json": re.compile(r'\{"status":"error".*?"message":"(.*?)"')
+ })
+ if check == "login" or (check == "json" and self.lastCheck.group(1) == "Access token expired"):
+ self.account.relogin(self.user)
+ self.retry(reason="Access token expired")
+ elif check == "json":
+ self.fail(self.lastCheck.group(1))
+
+ def handleFree(self):
+ if re.search(self.ERR_PARDL_PATTERN, self.html) is not None:
+ self.longWait(300, 12)
+
+ found = re.search(self.FREE_URL_PATTERN, self.html)
+ if found is None:
+ self.parseError("Parse error (URL)")
+ parsed_url = "http://euroshare.eu%s" % found.group(1)
+ self.logDebug("URL", parsed_url)
+ self.download(parsed_url, disposition=True)
+
+ check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)})
+ if check == "multi_dl":
+ self.longWait(300, 12)
+
+getInfo = create_getInfo(EuroshareEu) \ No newline at end of file
diff --git a/pyload/plugins/hoster/ExtabitCom.py b/pyload/plugins/hoster/ExtabitCom.py
new file mode 100644
index 000000000..41a43fab5
--- /dev/null
+++ b/pyload/plugins/hoster/ExtabitCom.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.common.json_layer import json_loads
+
+
+class ExtabitCom(SimpleHoster):
+ __name__ = "ExtabitCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w+\.)*extabit\.com/(file|go|fid)/(?P<ID>\w+)"
+ __version__ = "0.3"
+ __description__ = """Extabit.com"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<th>File:</th>\s*<td class="col-fileinfo">\s*<div title="(?P<N>[^"]+)">'
+ FILE_SIZE_PATTERN = r'<th>Size:</th>\s*<td class="col-fileinfo">(?P<S>[^<]+)</td>'
+ FILE_OFFLINE_PATTERN = r'<h1>File not found</h1>'
+ TEMP_OFFLINE_PATTERN = r">(File is temporary unavailable|No download mirror)<"
+
+ DOWNLOAD_LINK_PATTERN = r'"(http://guest\d+\.extabit\.com/[a-z0-9]+/.*?)"'
+
+ def handleFree(self):
+ if r">Only premium users can download this file" in self.html:
+ self.fail("Only premium users can download this file")
+
+ m = re.search(r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.html)
+ if m:
+ self.setWait(int(m.group(1)) * 60, True)
+ self.wait()
+ elif "The daily downloads limit from your IP is exceeded" in self.html:
+ self.setWait(3600, True)
+ self.wait()
+
+ self.logDebug("URL: " + self.req.http.lastEffectiveURL)
+ m = re.match(self.__pattern__, self.req.http.lastEffectiveURL)
+ fileID = m.group('ID') if m else self.file_info('ID')
+
+ m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.html)
+ if m:
+ recaptcha = ReCaptcha(self)
+ captcha_key = m.group(1)
+
+ for i in range(5):
+ get_data = {"type": "recaptcha"}
+ get_data["challenge"], get_data["capture"] = recaptcha.challenge(captcha_key)
+ response = json_loads(self.load("http://extabit.com/file/%s/" % fileID, get=get_data))
+ if "ok" in response:
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail("Invalid captcha")
+ else:
+ self.parseError('Captcha')
+
+ if not "href" in response: self.parseError('JSON')
+
+ self.html = self.load("http://extabit.com/file/%s%s" % (fileID, response['href']))
+ m = re.search(self.DOWNLOAD_LINK_PATTERN, self.html)
+ if not m:
+ self.parseError('Download URL')
+ url = m.group(1)
+ self.logDebug("Download URL: " + url)
+ self.download(url)
+
+
+getInfo = create_getInfo(ExtabitCom)
diff --git a/pyload/plugins/hoster/FastshareCz.py b/pyload/plugins/hoster/FastshareCz.py
new file mode 100644
index 000000000..1dbf9fe8f
--- /dev/null
+++ b/pyload/plugins/hoster/FastshareCz.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+# Test links (random.bin):
+# http://www.fastshare.cz/2141189/random.bin
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns
+
+
+class FastshareCz(SimpleHoster):
+ __name__ = "FastshareCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?fastshare.cz/\d+/.+"
+ __version__ = "0.15"
+ __description__ = """FastShare.cz"""
+ __author_name__ = ("zoidberg", "stickell")
+
+ FILE_INFO_PATTERN = r'<h1 class="dwp">(?P<N>[^<]+)</h1>\s*<div class="fileinfo">\s*(?:Velikost|Size)\s*: (?P<S>[^,]+),'
+ FILE_OFFLINE_PATTERN = ur'<td align=center>Tento soubor byl smazán'
+ FILE_URL_REPLACEMENTS = [('#.*','')]
+
+ FREE_URL_PATTERN = ur'<form method=post action=(/free/.*?)><b>Stáhnout FREE.*?<img src="([^"]*)">'
+ PREMIUM_URL_PATTERN = r'(http://data\d+\.fastshare\.cz/download\.php\?id=\d+\&[^\s\"\'<>]+)'
+ NOT_ENOUGH_CREDIC_PATTERN = "Nem.te dostate.n. kredit pro sta.en. tohoto souboru"
+
+ def process(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+ self.req.setOption("timeout", 120)
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.html = self.load(pyfile.url, decode = not self.SH_BROKEN_ENCODING, cookies = self.SH_COOKIES)
+ self.getFileInfo()
+ self.handleFree()
+
+ def handleFree(self):
+ if u">100% FREE slotů je plnÜch.<" in self.html:
+ self.setWait(60, False)
+ self.wait()
+ self.retry(120, "No free slots")
+
+ found = re.search(self.FREE_URL_PATTERN, self.html)
+ if not found: self.parseError("Free URL")
+ action, captcha_src = found.groups()
+ captcha = self.decryptCaptcha("http://www.fastshare.cz/" + captcha_src)
+ self.download("http://www.fastshare.cz/" + action, post = {"code": captcha, "submit": u"stáhnout"})
+
+ check = self.checkDownload({
+ "paralell_dl": "<title>FastShare.cz</title>|<script>alert\('Pres FREE muzete stahovat jen jeden soubor najednou.'\)"
+ })
+ self.logDebug(self.req.lastEffectiveURL, self.req.lastURL, self.req.code)
+
+ if check == "paralell_dl":
+ self.setWait(600, True)
+ self.wait()
+ self.retry(6, "Paralell download")
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header:
+ url = header['location']
+ else:
+ self.html = self.load(self.pyfile.url)
+ self.getFileInfo()
+ if self.NOT_ENOUGH_CREDIC_PATTERN in self.html:
+ self.logWarning('Not enough traffic left')
+ self.resetAccount()
+
+ found = re.search(self.PREMIUM_URL_PATTERN, self.html)
+ if not found: self.parseError("Premium URL")
+ url = found.group(1)
+
+ self.logDebug("PREMIUM URL: %s" % url)
+ self.download(url)
+
+ check = self.checkDownload({"credit": re.compile(self.NOT_ENOUGH_CREDIC_PATTERN)})
+ if check == "credit":
+ self.resetAccount()
+
+getInfo = create_getInfo(FastshareCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FileApeCom.py b/pyload/plugins/hoster/FileApeCom.py
new file mode 100644
index 000000000..f5182a788
--- /dev/null
+++ b/pyload/plugins/hoster/FileApeCom.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.Hoster import Hoster
+
+class FileApeCom(Hoster):
+ __name__ = "FileApeCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?fileape\.com/(index\.php\?act=download\&id=|dl/)\w+"
+ __version__ = "0.11"
+ __description__ = """FileApe Download Hoster"""
+ __author_name__ = ("espes")
+
+ def setup(self):
+ self.multiDL = False
+ self.html = None
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ self.html = self.load(self.pyfile.url)
+
+ if "This file is either temporarily unavailable or does not exist" in self.html:
+ self.offline()
+
+ self.html = self.load(self.pyfile.url+"&g=1")
+
+ continueMatch = re.search(r"window\.location = '(http://.*?)'", self.html)
+ if not continueMatch:
+ continueMatch = re.search(r"'(http://fileape\.com/\?act=download&t=[A-Za-z0-9_-]+)'", self.html)
+ if continueMatch:
+ continuePage = continueMatch.group(1)
+ else:
+ self.fail("Plugin Defect")
+
+ wait = 60
+ waitMatch = re.search("id=\"waitnumber\" style=\"font-size:2em; text-align:center; width:33px; height:33px;\">(\\d+)</span>", self.html)
+ if waitMatch:
+ wait = int(waitMatch.group(1))
+ self.setWait(wait+3)
+ self.wait()
+
+ self.html = self.load(continuePage)
+ linkMatch = \
+ re.search(r"<div style=\"text-align:center; font-size: 30px;\"><a href=\"(http://.*?)\"", self.html)
+ if not linkMatch:
+ linkMatch = re.search(r"\"(http://tx\d+\.fileape\.com/[a-z]+/.*?)\"", self.html)
+ if linkMatch:
+ link = linkMatch.group(1)
+ else:
+ self.fail("Plugin Defect")
+
+ pyfile.name = link.rpartition('/')[2]
+
+ self.download(link)
+
+ check = self.checkDownload({"exp": "Download ticket expired"})
+ if check == "exp":
+ self.logInfo("Ticket expired, retrying...")
+ self.retry() \ No newline at end of file
diff --git a/pyload/plugins/hoster/FilebeerInfo.py b/pyload/plugins/hoster/FilebeerInfo.py
new file mode 100644
index 000000000..216ecfbca
--- /dev/null
+++ b/pyload/plugins/hoster/FilebeerInfo.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
+
+
+class FilebeerInfo(DeadHoster):
+ __name__ = "FilebeerInfo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?filebeer\.info/(?!\d*~f)(?P<ID>\w+).*"
+ __version__ = "0.03"
+ __description__ = """Filebeer.info plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+
+getInfo = create_getInfo(FilebeerInfo) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FilecloudIo.py b/pyload/plugins/hoster/FilecloudIo.py
new file mode 100644
index 000000000..71af3e9dd
--- /dev/null
+++ b/pyload/plugins/hoster/FilecloudIo.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError
+from module.common.json_layer import json_loads
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.network.RequestFactory import getURL
+
+class FilecloudIo(SimpleHoster):
+ __name__ = "FilecloudIo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*(?:filecloud\.io|ifile\.it|mihd\.net)/(?P<ID>\w+).*"
+ __version__ = "0.01"
+ __description__ = """Filecloud.io (formerly Ifile.it) plugin - free account only"""
+ __author_name__ = ("zoidberg")
+
+ FILE_SIZE_PATTERN = r'{var __ab1 = (?P<S>\d+);}'
+ FILE_NAME_PATTERN = r'id="aliasSpan">(?P<N>.*?)&nbsp;&nbsp;<'
+ FILE_OFFLINE_PATTERN = r'l10n.(FILES__DOESNT_EXIST|REMOVED)'
+ TEMP_OFFLINE_PATTERN = r'l10n.FILES__WARNING'
+
+ UKEY_PATTERN = r"'ukey'\s*:'(\w+)',"
+ AB1_PATTERN = r"if\( __ab1 == '(\w+)' \)"
+ ERROR_MSG_PATTERN = r"var __error_msg\s*=\s*l10n\.(.*?);"
+ DOWNLOAD_LINK_PATTERN = r'"(http://s\d+.filecloud.io/%s/\d+/.*?)"'
+ RECAPTCHA_KEY_PATTERN = r"var __recaptcha_public\s*=\s*'([^']+)';"
+ RECAPTCHA_KEY = '6Lf5OdISAAAAAEZObLcx5Wlv4daMaASRov1ysDB1'
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+ def handleFree(self):
+ data = {"ukey": self.file_info['ID']}
+
+ found = re.search(self.AB1_PATTERN, self.html)
+ if not found:
+ raise PluginParseError("__AB1")
+ data["__ab1"] = found.group(1)
+
+ if not self.account:
+ self.fail("User not logged in")
+ elif not self.account.logged_in:
+ recaptcha = ReCaptcha(self)
+ captcha_challenge, captcha_response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ self.account.form_data = {"recaptcha_challenge_field" : captcha_challenge,
+ "recaptcha_response_field" : captcha_response}
+ self.account.relogin(self.user)
+ self.retry(max_tries = 2)
+
+ json_url = "http://filecloud.io/download-request.json"
+ response = self.load(json_url, post = data)
+ self.logDebug(response)
+ response = json_loads(response)
+
+ if "error" in response and response["error"]:
+ self.fail(response)
+
+ self.logDebug(response)
+ if response["captcha"]:
+ recaptcha = ReCaptcha(self)
+ found = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ captcha_key = found.group(1) if found else self.RECAPTCHA_KEY
+ data["ctype"] = "recaptcha"
+
+ for i in range(5):
+ data["recaptcha_challenge"], data["recaptcha_response"] = recaptcha.challenge(captcha_key)
+
+ json_url = "http://filecloud.io/download-request.json"
+ response = self.load(json_url, post = data)
+ self.logDebug(response)
+ response = json_loads(response)
+
+ if "retry" in response and response["retry"]:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("Incorrect captcha")
+
+ if response["dl"]:
+ self.html = self.load('http://filecloud.io/download.html')
+ found = re.search(self.DOWNLOAD_LINK_PATTERN % self.file_info['ID'], self.html)
+ if not found:
+ raise PluginParseError("Download URL")
+ download_url = found.group(1)
+ self.logDebug("Download URL: %s" % download_url)
+
+ if "size" in self.file_info and self.file_info['size']:
+ self.check_data = {"size": int(self.file_info['size'])}
+ self.download(download_url)
+ else:
+ self.fail("Unexpected server response")
+
+getInfo = create_getInfo(FilecloudIo) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FilefactoryCom.py b/pyload/plugins/hoster/FilefactoryCom.py
new file mode 100644
index 000000000..fdde1f9d7
--- /dev/null
+++ b/pyload/plugins/hoster/FilefactoryCom.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+# Test links (random.bin):
+# http://www.filefactory.com/file/ymxkmdud2o3/n/random.bin
+
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster
+from module.network.RequestFactory import getURL
+from module.utils import parseFileSize
+
+
+def getInfo(urls):
+ file_info = list()
+ list_ids = dict()
+
+ # Create a dict id:url. Will be used to retrieve original url
+ for url in urls:
+ m = re.search(FilefactoryCom.__pattern__, url)
+ list_ids[m.group('id')] = url
+
+ # WARN: There could be a limit of urls for request
+ post_data = {'func': 'links', 'links': '\n'.join(urls)}
+ rep = getURL('http://www.filefactory.com/tool/links.php', post=post_data, decode=True)
+
+ # Online links
+ for m in re.finditer(
+ r'innerText">\s*<h1 class="name">(?P<N>.+) \((?P<S>[\w.]+) (?P<U>\w+)\)</h1>\s*<p>http://www.filefactory.com/file/(?P<ID>\w+).*</p>\s*<p class="hidden size">',
+ rep):
+ file_info.append((m.group('N'), parseFileSize(m.group('S'), m.group('U')), 2, list_ids[m.group('ID')]))
+
+ # Offline links
+ for m in re.finditer(
+ r'innerText">\s*<h1>(http://www.filefactory.com/file/(?P<ID>\w+)/)</h1>\s*<p>\1</p>\s*<p class="errorResponse">Error: file not found</p>',
+ rep):
+ file_info.append((list_ids[m.group('ID')], 0, 1, list_ids[m.group('ID')]))
+
+ return file_info
+
+
+class FilefactoryCom(SimpleHoster):
+ __name__ = "FilefactoryCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(?:www\.)?filefactory\.com/file/(?P<id>[a-zA-Z0-9]+)"
+ __version__ = "0.41"
+ __description__ = """Filefactory.Com File Download Hoster"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ DIRECT_LINK_PATTERN = r'<section id="downloadLink">\s*<p class="textAlignCenter">\s*<a href="([^"]+)">[^<]+</a>\s*</p>\s*</section>'
+
+ def process(self, pyfile):
+ if not re.match(self.__pattern__ + r'/n/.+', pyfile.url): # Not in standard format
+ header = self.load(pyfile.url, just_header=True)
+ if 'location' in header:
+ self.pyfile.url = 'http://www.filefactory.com' + header['location']
+
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+ if "Currently only Premium Members can download files larger than" in self.html:
+ self.fail("File too large for free download")
+ elif "All free download slots on this server are currently in use" in self.html:
+ self.retry(50, 900, "All free slots are busy")
+
+ # Load the page that contains the direct link
+ url = re.search(r"document\.location\.host \+\s*'(.+)';", self.html)
+ if not url:
+ self.parseError('Unable to detect free link')
+ url = 'http://www.filefactory.com' + url.group(1)
+ self.html = self.load(url, decode=True)
+
+ # Free downloads wait time
+ waittime = re.search(r'id="startWait" value="(\d+)"', self.html)
+ if not waittime:
+ self.parseError('Unable to detect wait time')
+ self.setWait(int(waittime.group(1)))
+ self.wait()
+
+ # Parse the direct link and download it
+ direct = re.search(r'data-href-direct="(.*)" class="button', self.html)
+ if not direct:
+ self.parseError('Unable to detect free direct link')
+ direct = direct.group(1)
+ self.logDebug('DIRECT LINK: ' + direct)
+ self.download(direct, disposition=True)
+
+ check = self.checkDownload({"multiple": "You are currently downloading too many files at once.",
+ "error": '<div id="errorMessage">'})
+
+ if check == "multiple":
+ self.logDebug("Parallel downloads detected; waiting 15 minutes")
+ self.retry(wait_time=15 * 60, reason='Parallel downloads')
+ elif check == "error":
+ self.fail("Unknown error")
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header:
+ url = header['location'].strip()
+ if not url.startswith("http://"):
+ url = "http://www.filefactory.com" + url
+ elif 'content-disposition' in header:
+ url = self.pyfile.url
+ else:
+ html = self.load(self.pyfile.url)
+ found = re.search(self.DIRECT_LINK_PATTERN, html)
+ if found:
+ url = found.group(1)
+ else:
+ self.parseError('Unable to detect premium direct link')
+
+ self.logDebug('DIRECT PREMIUM LINK: ' + url)
+ self.download(url, disposition=True)
diff --git a/pyload/plugins/hoster/FilejungleCom.py b/pyload/plugins/hoster/FilejungleCom.py
new file mode 100644
index 000000000..fd833eef2
--- /dev/null
+++ b/pyload/plugins/hoster/FilejungleCom.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+from module.plugins.hoster.FileserveCom import FileserveCom, checkFile
+from module.plugins.Plugin import chunks
+
+class FilejungleCom(FileserveCom):
+ __name__ = "FilejungleCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?filejungle\.com/f/(?P<id>[^/]+).*"
+ __version__ = "0.51"
+ __description__ = """Filejungle.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ URLS = ['http://www.filejungle.com/f/', 'http://www.filejungle.com/check_links.php', 'http://www.filejungle.com/checkReCaptcha.php']
+ LINKCHECK_TR = r'<li>\s*(<div class="col1">.*?)</li>'
+ LINKCHECK_TD = r'<div class="(?:col )?col\d">(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ LONG_WAIT_PATTERN = r'<h1>Please wait for (\d+) (\w+)\s*to download the next file\.</h1>'
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100): yield checkFile(FilejungleCom, chunk) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FilepostCom.py b/pyload/plugins/hoster/FilepostCom.py
new file mode 100644
index 000000000..8e9df3f17
--- /dev/null
+++ b/pyload/plugins/hoster/FilepostCom.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+
+ changelog:
+ 0.27 - 2012-08-12 - hgg
+ fix "global name 'js_answer' is not defined" bug
+ fix captcha bug #1 (failed on non-english "captcha wrong" errors)
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.common.json_layer import json_loads
+from time import time
+
+class FilepostCom(SimpleHoster):
+ __name__ = "FilepostCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(?:www\.)?(?:filepost\.com/files|fp.io)/([^/]+).*"
+ __version__ = "0.27"
+ __description__ = """Filepost.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[0-9\.]+ [kKMG]i?B)</a>\' class="inp_text"/>'
+ #FILE_INFO_PATTERN = r'<h1>(?P<N>[^<]+)</h1>\s*<div class="ul">\s*<ul>\s*<li><span>Size:</span> (?P<S>[0-9.]+) (?P<U>[kKMG])i?B</li>'
+ FILE_OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
+ RECAPTCHA_KEY_PATTERN = r"Captcha.init\({\s*key:\s*'([^']+)'"
+ FLP_TOKEN_PATTERN = r"set_store_options\({token: '([^']+)'"
+
+ def handleFree(self):
+ # Find token and captcha key
+ file_id = re.search(self.__pattern__, self.pyfile.url).group(1)
+
+ found = re.search(self.FLP_TOKEN_PATTERN, self.html)
+ if not found: self.parseError("Token")
+ flp_token = found.group(1)
+
+ found = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ if not found: self.parseError("Captcha key")
+ captcha_key = found.group(1)
+
+ # Get wait time
+ get_dict = {'SID' : self.req.cj.getCookie('SID'), 'JsHttpRequest' : str(int(time()*10000)) + '-xml'}
+ post_dict = {'action' : 'set_download', 'token' : flp_token, 'code' : file_id}
+ wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
+
+ if wait_time > 0:
+ self.setWait(wait_time)
+ self.wait()
+
+ post_dict = {"token" : flp_token, "code" : file_id, "file_pass" : ''}
+
+ if 'var is_pass_exists = true;' in self.html:
+ # Solve password
+ for file_pass in self.getPassword().splitlines():
+ get_dict['JsHttpRequest'] = str(int(time()*10000)) + '-xml'
+ post_dict['file_pass'] = file_pass
+ self.logInfo("Password protected link, trying " + file_pass)
+
+ download_url = self.getJsonResponse(get_dict, post_dict, 'link')
+ if download_url:
+ break
+
+ else: self.fail("No or incorrect password")
+
+ else:
+ # Solve recaptcha
+ recaptcha = ReCaptcha(self)
+
+ for pokus in range(5):
+ get_dict['JsHttpRequest'] = str(int(time()*10000)) + '-xml'
+ if pokus:
+ post_dict["recaptcha_challenge_field"], post_dict["recaptcha_response_field"] = recaptcha.challenge(captcha_key)
+ self.logDebug(u"RECAPTCHA: %s : %s : %s" % (captcha_key, post_dict["recaptcha_challenge_field"], post_dict["recaptcha_response_field"]))
+
+ download_url = self.getJsonResponse(get_dict, post_dict, 'link')
+ if download_url:
+ if pokus: self.correctCaptcha()
+ break
+ elif pokus:
+ self.invalidCaptcha()
+
+ else: self.fail("Invalid captcha")
+
+ # Download
+ self.download(download_url)
+
+ def getJsonResponse(self, get_dict, post_dict, field):
+ json_response = json_loads(self.load('https://filepost.com/files/get/', get = get_dict, post = post_dict))
+ self.logDebug(json_response)
+
+ if not 'js' in json_response: self.parseError('JSON %s 1' % field)
+
+ # i changed js_answer to json_response['js'] since js_answer is nowhere set.
+ # i don't know the JSON-HTTP specs in detail, but the previous author
+ # accessed json_response['js']['error'] as well as js_answer['error'].
+ # see the two lines commented out with "# ~?".
+ if 'error' in json_response['js']:
+ if json_response['js']['error'] == 'download_delay':
+ self.retry(json_response['js']['params']['next_download'])
+ # ~? self.retry(js_answer['params']['next_download'])
+ elif 'Wrong file password' in json_response['js']['error']:
+ return None
+ elif 'You entered a wrong CAPTCHA code' in json_response['js']['error']:
+ return None
+ elif 'CAPTCHA Code nicht korrekt' in json_response['js']['error']:
+ return None
+ elif 'CAPTCHA' in json_response['js']['error']:
+ self.logDebug('error response is unknown, but mentions CAPTCHA -> return None')
+ return None
+ else:
+ self.fail(json_response['js']['error'])
+ # ~? self.fail(js_answer['error'])
+
+ if not 'answer' in json_response['js'] or not field in json_response['js']['answer']:
+ self.parseError('JSON %s 2' % field)
+
+ return json_response['js']['answer'][field]
+
+getInfo = create_getInfo(FilepostCom)
diff --git a/pyload/plugins/hoster/FilerNet.py b/pyload/plugins/hoster/FilerNet.py
new file mode 100644
index 000000000..8e8cee526
--- /dev/null
+++ b/pyload/plugins/hoster/FilerNet.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+# Test links (random.bin):
+# http://filer.net/get/ivgf5ztw53et3ogd
+# http://filer.net/get/hgo14gzcng3scbvv
+
+import pycurl
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+
+class FilerNet(SimpleHoster):
+ __name__ = "FilerNet"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(www\.)?filer\.net/get/(\w+)"
+ __version__ = "0.02"
+ __description__ = """Filer.net Download Hoster"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>\w+)</small></h1>'
+ FILE_OFFLINE_PATTERN = r'Nicht gefunden'
+ RECAPTCHA_KEY = '6LcFctISAAAAAAgaeHgyqhNecGJJRnxV1m_vAz3V'
+ DIRECT_LINK_PATTERN = r'href="([^"]+)">Get download</a>'
+
+ def process(self, pyfile):
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.req.setOption("timeout", 120)
+ self.html = self.load(self.pyfile.url, decode=not self.SH_BROKEN_ENCODING, cookies=self.SH_COOKIES)
+
+ # Wait between downloads
+ m = re.search(r'musst du <span id="time">(\d+)</span> Sekunden warten', self.html)
+ if m:
+ waittime = int(m.group(1))
+ self.retry(3, waittime, 'Wait between free downloads')
+
+ self.getFileInfo()
+
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ inputs = self.parseHtmlForm(input_names='token')[1]
+ if 'token' not in inputs:
+ self.parseError('Unable to detect token')
+ token = inputs['token']
+ self.logDebug('Token: ' + token)
+
+ self.html = self.load(self.pyfile.url, post={'token': token}, decode=True)
+
+ inputs = self.parseHtmlForm(input_names='hash')[1]
+ if 'hash' not in inputs:
+ self.parseError('Unable to detect hash')
+ hash_data = inputs['hash']
+ self.logDebug('Hash: ' + hash_data)
+
+ downloadURL = ''
+ recaptcha = ReCaptcha(self)
+ for i in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response,
+ 'hash': hash_data}
+
+ # Workaround for 0.4.9 just_header issue. In 0.5 clean the code using just_header
+ self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.load(self.pyfile.url, post=post_data)
+ self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
+
+ if 'location' in self.req.http.header:
+ location = re.search(r'location: (\S+)', self.req.http.header).group(1)
+ downloadURL = 'http://filer.net' + location
+ self.correctCaptcha()
+ break
+ else:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+
+ if not downloadURL:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.download(downloadURL, disposition=True)
+
+ def handlePremium(self):
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header: # Direct Download ON
+ dl = self.pyfile.url
+ else: # Direct Download OFF
+ html = self.load(self.pyfile.url)
+ m = re.search(self.DIRECT_LINK_PATTERN, html)
+ if not m:
+ self.parseError("Unable to detect direct link, try to enable 'Direct download' in your user settings")
+ dl = 'http://filer.net' + m.group(1)
+
+ self.logDebug('Direct link: ' + dl)
+ self.download(dl, disposition=True)
+
+
+getInfo = create_getInfo(FilerNet)
diff --git a/pyload/plugins/hoster/FilerioCom.py b/pyload/plugins/hoster/FilerioCom.py
new file mode 100644
index 000000000..7be0fa4f6
--- /dev/null
+++ b/pyload/plugins/hoster/FilerioCom.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class FilerioCom(XFileSharingPro):
+ __name__ = "FilerioCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*(filerio\.(in|com)|filekeen\.com)/\w{12}"
+ __version__ = "0.02"
+ __description__ = """FileRio.in hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_OFFLINE_PATTERN = '<b>&quot;File Not Found&quot;</b>|File has been removed due to Copyright Claim'
+ HOSTER_NAME = "filerio.in"
+ FILE_URL_REPLACEMENTS = [(r'http://.*?/','http://filerio.in/')]
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+getInfo = create_getInfo(FilerioCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FilesMailRu.py b/pyload/plugins/hoster/FilesMailRu.py
new file mode 100644
index 000000000..c7232f5dd
--- /dev/null
+++ b/pyload/plugins/hoster/FilesMailRu.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster, chunks
+from module.network.RequestFactory import getURL
+
+def getInfo(urls):
+ result = []
+ for chunk in chunks(urls, 10):
+ for url in chunk:
+ src = getURL(url)
+ if r'<div class="errorMessage mb10">' in src:
+ result.append((url, 0, 1, url))
+ elif r'Page cannot be displayed' in src:
+ result.append((url, 0, 1, url))
+ else:
+ try:
+ url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
+ file_name = re.search(url_pattern, src).group(0).split(', event)">')[1].split('</a>')[0]
+ result.append((file_name, 0, 2, url))
+ except:
+ pass
+
+
+ # status 1=OFFLINE, 2=OK, 3=UNKNOWN
+ # result.append((#name,#size,#status,#url))
+ yield result
+
+class FilesMailRu(Hoster):
+ __name__ = "FilesMailRu"
+ __type__ = "hoster"
+ __pattern__ = r"http://files\.mail\.ru/.*"
+ __version__ = "0.31"
+ __description__ = """Files.Mail.Ru One-Klick Hoster"""
+ __author_name__ = ("oZiRiz")
+ __author_mail__ = ("ich@oziriz.de")
+
+
+ def setup(self):
+ if not self.account:
+ self.multiDL = False
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url)
+ self.url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
+
+ #marks the file as "offline" when the pattern was found on the html-page'''
+ if r'<div class="errorMessage mb10">' in self.html:
+ self.offline()
+
+ elif r'Page cannot be displayed' in self.html:
+ self.offline()
+
+ #the filename that will be showed in the list (e.g. test.part1.rar)'''
+ pyfile.name = self.getFileName()
+
+ #prepare and download'''
+ if not self.account:
+ self.prepare()
+ self.download(self.getFileUrl())
+ self.myPostProcess()
+ else:
+ self.download(self.getFileUrl())
+ self.myPostProcess()
+
+ def prepare(self):
+ '''You have to wait some seconds. Otherwise you will get a 40Byte HTML Page instead of the file you expected'''
+ self.setWait(10)
+ self.wait()
+ return True
+
+ def getFileUrl(self):
+ '''gives you the URL to the file. Extracted from the Files.mail.ru HTML-page stored in self.html'''
+ file_url = re.search(self.url_pattern, self.html).group(0).split('<a href="')[1].split('" onclick="return Act')[0]
+ return file_url
+
+
+ def getFileName(self):
+ '''gives you the Name for each file. Also extracted from the HTML-Page'''
+ file_name = re.search(self.url_pattern, self.html).group(0).split(', event)">')[1].split('</a>')[0]
+ return file_name
+
+ def myPostProcess(self):
+ # searches the file for HTMl-Code. Sometimes the Redirect
+ # doesn't work (maybe a curl Problem) and you get only a small
+ # HTML file and the Download is marked as "finished"
+ # then the download will be restarted. It's only bad for these
+ # who want download a HTML-File (it's one in a million ;-) )
+ #
+ # The maximum UploadSize allowed on files.mail.ru at the moment is 100MB
+ # so i set it to check every download because sometimes there are downloads
+ # that contain the HTML-Text and 60MB ZEROs after that in a xyzfile.part1.rar file
+ # (Loading 100MB in to ram is not an option)
+ check = self.checkDownload({"html": "<meta name="}, read_size=50000)
+ if check == "html":
+ self.logInfo(_("There was HTML Code in the Downloaded File(%s)...redirect error? The Download will be restarted." % self.pyfile.name))
+ self.retry()
diff --git a/pyload/plugins/hoster/FileserveCom.py b/pyload/plugins/hoster/FileserveCom.py
new file mode 100644
index 000000000..c6f414b0e
--- /dev/null
+++ b/pyload/plugins/hoster/FileserveCom.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+"""
+
+import re
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.common.json_layer import json_loads
+from module.utils import parseFileSize
+from module.plugins.Plugin import chunks
+
+def checkFile(plugin, urls):
+ html = getURL(plugin.URLS[1], post = {"urls": "\n".join(urls)}, decode=True)
+
+ file_info = []
+ for li in re.finditer(plugin.LINKCHECK_TR, html, re.DOTALL):
+ try:
+ cols = re.findall(plugin.LINKCHECK_TD, li.group(1))
+ if cols:
+ file_info.append((
+ cols[1] if cols[1] != '--' else cols[0],
+ parseFileSize(cols[2]) if cols[2] != '--' else 0,
+ 2 if cols[3].startswith('Available') else 1,
+ cols[0]))
+ except Exception, e:
+ continue
+
+ return file_info
+
+class FileserveCom(Hoster):
+ __name__ = "FileserveCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?fileserve\.com/file/(?P<id>[^/]+).*"
+ __version__ = "0.51"
+ __description__ = """Fileserve.Com File Download Hoster"""
+ __author_name__ = ("jeix", "mkaay", "paul king", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.de", "mkaay@mkaay.de", "", "zoidberg@mujmail.cz")
+
+ URLS = ['http://www.fileserve.com/file/', 'http://www.fileserve.com/link-checker.php', 'http://www.fileserve.com/checkReCaptcha.php']
+ LINKCHECK_TR = r'<tr>\s*(<td>http://www.fileserve\.com/file/.*?)</tr>'
+ LINKCHECK_TD = r'<td>(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ CAPTCHA_KEY_PATTERN = r"var reCAPTCHA_publickey='(?P<key>[^']+)'"
+ LONG_WAIT_PATTERN = r'<li class="title">You need to wait (\d+) (\w+) to start another download\.</li>'
+ LINK_EXPIRED_PATTERN = "Your download link has expired"
+ DAILY_LIMIT_PATTERN = "Your daily download limit has been reached"
+ NOT_LOGGED_IN_PATTERN = '<form (name="loginDialogBoxForm"|id="login_form")|<li><a href="/login.php">Login</a></li>'
+
+ # shares code with FilejungleCom and UploadstationCom
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.premium else False
+
+ self.file_id = re.search(self.__pattern__, self.pyfile.url).group('id')
+ self.url = "%s%s" % (self.URLS[0], self.file_id)
+ self.logDebug("File ID: %s URL: %s" % (self.file_id, self.url))
+
+ def process(self, pyfile):
+ pyfile.name, pyfile.size, status, self.url = checkFile(self, [self.url])[0]
+ if status != 2: self.offline()
+ self.logDebug("File Name: %s Size: %d" % (pyfile.name, pyfile.size))
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ self.html = self.load(self.url)
+ action = self.load(self.url, post={"checkDownload": "check"}, decode=True)
+ action = json_loads(action)
+ self.logDebug(action)
+
+ if "fail" in action:
+ if action["fail"] == "timeLimit":
+ self.html = self.load(self.url,
+ post={"checkDownload": "showError",
+ "errorType": "timeLimit"},
+ decode=True)
+
+ self.doLongWait(re.search(self.LONG_WAIT_PATTERN, self.html))
+
+ elif action["fail"] == "parallelDownload":
+ self.logWarning(_("Parallel download error, now waiting 60s."))
+ self.retry(wait_time=60, reason="parallelDownload")
+
+ else:
+ self.fail("Download check returned %s" % action["fail"])
+
+ elif "success" in action:
+ if action["success"] == "showCaptcha":
+ self.doCaptcha()
+ self.doTimmer()
+ elif action["success"] == "showTimmer":
+ self.doTimmer()
+
+ else:
+ self.fail("Unknown server response")
+
+ # show download link
+ response = self.load(self.url, post={"downloadLink": "show"}, decode=True)
+ self.logDebug("show downloadLink response : %s" % response)
+ if "fail" in response:
+ self.fail("Couldn't retrieve download url")
+
+ # this may either download our file or forward us to an error page
+ self.download(self.url, post = {"download": "normal"})
+ self.logDebug(self.req.http.lastEffectiveURL)
+
+ check = self.checkDownload({"expired": self.LINK_EXPIRED_PATTERN,
+ "wait": re.compile(self.LONG_WAIT_PATTERN),
+ "limit": self.DAILY_LIMIT_PATTERN})
+
+ if check == "expired":
+ self.logDebug("Download link was expired")
+ self.retry()
+ elif check == "wait":
+ self.doLongWait(self.lastCheck)
+ elif check == "limit":
+ #download limited reached for today (not a exact time known)
+ self.setWait(180 * 60, True) # wait 3 hours
+ self.wait()
+ self.retry(max_tries=0)
+
+ self.thread.m.reconnecting.wait(3) # Ease issue with later downloads appearing to be in parallel
+
+ def doTimmer(self):
+ response = self.load(self.url,
+ post={"downloadLink": "wait"},
+ decode=True)
+ self.logDebug("wait response : %s" % response[:80])
+
+ if "fail" in response:
+ self.fail("Failed getting wait time")
+
+ if self.__name__ == "FilejungleCom":
+ found = re.search(r'"waitTime":(\d+)', response)
+ if not found: self.fail("Cannot get wait time")
+ wait_time = int(found.group(1))
+ else:
+ wait_time = int(response) + 3
+
+ self.setWait(wait_time)
+ self.wait()
+
+ def doCaptcha(self):
+ captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group("key")
+ recaptcha = ReCaptcha(self)
+
+ for i in range(5):
+ challenge, code = recaptcha.challenge(captcha_key)
+
+ response = json_loads(self.load(self.URLS[2],
+ post={'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': code,
+ 'recaptcha_shortencode_field': self.file_id}))
+ self.logDebug("reCaptcha response : %s" % response)
+ if not response["success"]:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else: self.fail("Invalid captcha")
+
+ def doLongWait(self, m):
+ wait_time = (int(m.group(1)) * {'seconds':1, 'minutes':60, 'hours':3600}[m.group(2)]) if m else 720
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry()
+
+ def handlePremium(self):
+ premium_url = None
+ if self.__name__ == "FileserveCom":
+ #try api download
+ response = self.load("http://app.fileserve.com/api/download/premium/",
+ post = {"username": self.user,
+ "password": self.account.getAccountData(self.user)["password"],
+ "shorten": self.file_id},
+ decode = True)
+ if response:
+ response = json_loads(response)
+ if response['error_code'] == "302": premium_url = response['next']
+ elif response['error_code'] in ["305", "500"]: self.tempOffline()
+ elif response['error_code'] in ["403", "605"]: self.resetAccount()
+ elif response['error_code'] in ["606", "607", "608"]: self.offline()
+ else: self.logError(response['error_code'], response['error_message'])
+
+ self.download(premium_url or self.pyfile.url)
+
+ if not premium_url:
+ check = self.checkDownload({"login": re.compile(self.NOT_LOGGED_IN_PATTERN)})
+
+ if check == "login":
+ self.account.relogin(self.user)
+ self.retry(reason=_("Not logged in."))
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100): yield checkFile(FileserveCom, chunk) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FileshareInUa.py b/pyload/plugins/hoster/FileshareInUa.py
new file mode 100644
index 000000000..9700b2d0a
--- /dev/null
+++ b/pyload/plugins/hoster/FileshareInUa.py
@@ -0,0 +1,78 @@
+from urllib import urlencode
+import re
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+from module.utils import parseFileSize
+
+class FileshareInUa(Hoster):
+ __name__ = "FileshareInUa"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?fileshare.in.ua/[A-Za-z0-9]+"
+ __version__ = "0.01"
+ __description__ = """fileshare.in.ua hoster plugin"""
+ __author_name__ = ("fwannmacher")
+ __author_mail__ = ("felipe@warhammerproject.com")
+
+ HOSTER_NAME = "fileshare.in.ua"
+ PATTERN_FILENAME = r'<h3 class="b-filename">(.*?)</h3>'
+ PATTERN_FILESIZE = r'<b class="b-filesize">(.*?)</b>'
+ PATTERN_OFFLINE = "This file doesn't exist, or has been removed."
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = True
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.html = self.load(pyfile.url, decode=True)
+
+ if not self._checkOnline():
+ self.offline()
+
+ self.pyfile.name = self._getName()
+
+ self.link = self._getLink()
+
+ if not self.link.startswith('http://'):
+ self.link = "http://fileshare.in.ua" + self.link
+
+ self.download(self.link)
+
+ def _checkOnline(self):
+ if re.search(self.PATTERN_OFFLINE, self.html):
+ return False
+ else:
+ return True
+
+ def _getName(self):
+ name = re.search(self.PATTERN_FILENAME, self.html)
+ if name is None:
+ self.fail("%s: Plugin broken." % self.__name__)
+
+ return name.group(1)
+
+ def _getLink(self):
+ return re.search("<a href=\"(/get/.+)\" class=\"b-button m-blue m-big\" >", self.html).group(1)
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+ html = getURL(url)
+
+ if re.search(FileshareInUa.PATTERN_OFFLINE, html):
+ result.append((url, 0, 1, url))
+ else:
+ name = re.search(FileshareInUa.PATTERN_FILENAME, html)
+
+ if name is None:
+ result.append((url, 0, 1, url))
+ continue
+
+ name = name.group(1)
+ size = re.search(FileshareInUa.PATTERN_FILESIZE, html)
+ size = parseFileSize(size.group(1))
+
+ result.append((name, size, 3, url))
+
+ yield result
diff --git a/pyload/plugins/hoster/FilezyNet.py b/pyload/plugins/hoster/FilezyNet.py
new file mode 100644
index 000000000..7c5729c2d
--- /dev/null
+++ b/pyload/plugins/hoster/FilezyNet.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+import re
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class FilezyNet(XFileSharingPro):
+ __name__ = "FilezyNet"
+ __type__ = "hoster"
+ __version__ = "0.1"
+ __pattern__ = r"http://filezy.net/.*/.*.html"
+ __description__ = """filezy.net hoster plugin"""
+
+ HOSTER_NAME = "filezy.net"
+
+ FILE_SIZE_PATTERN = r'<span class="plansize">(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</span>'
+ WAIT_PATTERN = r'<div id="countdown_str" class="seconds">\n<!--Wait--> <span id=".*?">(\d+)</span>'
+ DOWNLOAD_JS_PATTERN = r"<script type='text/javascript'>eval(.*)"
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = self.premium
+
+ def getDownloadLink(self):
+ self.logDebug("Getting download link")
+
+ data = self.getPostParameters()
+ self.html = self.load(self.pyfile.url, post = data, ref = True, decode = True)
+
+ obfuscated_js = re.search(self.DOWNLOAD_JS_PATTERN, self.html)
+ dl_file_now = self.js.eval(obfuscated_js.group(1))
+ link = re.search(self.DIRECT_LINK_PATTERN, dl_file_now)
+ return link.group(1)
+
+getInfo = create_getInfo(FilezyNet)
diff --git a/pyload/plugins/hoster/FlyFilesNet.py b/pyload/plugins/hoster/FlyFilesNet.py
new file mode 100644
index 000000000..0ffb76191
--- /dev/null
+++ b/pyload/plugins/hoster/FlyFilesNet.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import urllib
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.network.RequestFactory import getURL
+
+class FlyFilesNet(SimpleHoster):
+ __name__ = "FlyFilesNet"
+ __version__ = "0.1"
+ __type__ = "hoster"
+ __pattern__ = r'http://flyfiles\.net/.*'
+
+ SESSION_PATTERN = r'flyfiles\.net/(.*)/.*'
+ FILE_NAME_PATTERN = r'flyfiles\.net/.*/(.*)'
+
+ def process(self, pyfile):
+
+ pyfile.name = re.search(self.FILE_NAME_PATTERN, pyfile.url).group(1)
+ pyfile.name = urllib.unquote_plus(pyfile.name)
+
+ session = re.search(self.SESSION_PATTERN, pyfile.url).group(1)
+
+ url = "http://flyfiles.net"
+
+ # get download URL
+ parsed_url = getURL(url, post={"getDownLink": session}, cookies=True)
+ self.logDebug("Parsed URL: %s" % parsed_url)
+
+ if parsed_url == '#downlink|' or parsed_url == "#downlink|#":
+ self.logWarning("Could not get the download URL. Please wait 10 minutes.")
+ self.setWait(600, True) # wait 10 minutes
+ self.wait()
+ self.retry()
+
+ download_url = parsed_url.replace('#downlink|','')
+
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/FourSharedCom.py b/pyload/plugins/hoster/FourSharedCom.py
new file mode 100644
index 000000000..518ae2ae6
--- /dev/null
+++ b/pyload/plugins/hoster/FourSharedCom.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+import re
+
+class FourSharedCom(SimpleHoster):
+ __name__ = "FourSharedCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(www\.)?4shared(\-china)?\.com/(account/)?(download|get|file|document|photo|video|audio|mp3|office|rar|zip|archive|music)/.+?/.*"
+ __version__ = "0.29"
+ __description__ = """4Shared Download Hoster"""
+ __author_name__ = ("jeix", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.de", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<meta name="title" content="(?P<N>.+?)"'
+ FILE_SIZE_PATTERN = '<span title="Size: (?P<S>[0-9,.]+) (?P<U>[kKMG])i?B">'
+ FILE_OFFLINE_PATTERN = 'The file link that you requested is not valid\.|This file was deleted.'
+ FILE_NAME_REPLACEMENTS = [(r"&#(\d+).", lambda m: unichr(int(m.group(1))))]
+ FILE_SIZE_REPLACEMENTS = [(",", "")]
+
+ DOWNLOAD_BUTTON_PATTERN = 'id="btnLink" href="(.*?)"'
+ FID_PATTERN = 'name="d3fid" value="(.*?)"'
+ DOWNLOAD_URL_PATTERN = r'name="d3link" value="(.*?)"'
+
+ def handleFree(self):
+ if not self.account:
+ self.fail("User not logged in")
+
+ found = re.search(self.DOWNLOAD_BUTTON_PATTERN, self.html)
+ if found:
+ link = found.group(1)
+ else:
+ link = re.sub(r'/(download|get|file|document|photo|video|audio)/', r'/get/', self.pyfile.url)
+
+ self.html = self.load(link)
+
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if not found: self.parseError('Download link')
+ link = found.group(1)
+
+ try:
+ found = re.search(self.FID_PATTERN, self.html)
+ response = self.load('http://www.4shared.com/web/d2/getFreeDownloadLimitInfo?fileId=%s' % found.group(1))
+ self.logDebug(response)
+ except:
+ pass
+
+ self.setWait(20)
+ self.wait()
+ self.download(link)
+
+getInfo = create_getInfo(FourSharedCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/FreakshareCom.py b/pyload/plugins/hoster/FreakshareCom.py
new file mode 100644
index 000000000..156f697c3
--- /dev/null
+++ b/pyload/plugins/hoster/FreakshareCom.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+class FreakshareCom(Hoster):
+ __name__ = "FreakshareCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?freakshare\.(net|com)/files/\S*?/"
+ __version__ = "0.37"
+ __description__ = """Freakshare.com Download Hoster"""
+ __author_name__ = ("sitacuisses","spoob","mkaay", "Toilal")
+ __author_mail__ = ("sitacuisses@yahoo.de","spoob@pyload.org","mkaay@mkaay.de", "toilal.dev@gmail.com")
+
+ def setup(self):
+ self.html = None
+ self.wantReconnect = False
+ self.multiDL = False
+ self.req_opts = []
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ pyfile.url = pyfile.url.replace("freakshare.net/","freakshare.com/")
+
+ if self.account:
+ self.html = self.load(pyfile.url, cookies=False)
+ pyfile.name = self.get_file_name()
+ self.download(pyfile.url)
+
+ else:
+ self.prepare()
+ self.get_file_url()
+
+ self.download(self.pyfile.url, post=self.req_opts)
+
+
+ check = self.checkDownload({"bad": "bad try",
+ "paralell": "> Sorry, you cant download more then 1 files at time. <",
+ "empty": "Warning: Unknown: Filename cannot be empty",
+ "wrong_captcha": "Wrong Captcha!"})
+
+ if check == "bad":
+ self.fail("Bad Try.")
+ if check == "paralell":
+ self.setWait(300, True)
+ self.wait()
+ self.retry()
+ if check == "empty":
+ self.fail("File not downloadable")
+ if check == "wrong_captcha":
+ self.invalidCaptcha()
+ self.retry()
+
+ def prepare(self):
+ pyfile = self.pyfile
+
+ self.wantReconnect = False
+
+ self.download_html()
+
+ if not self.file_exists():
+ self.offline()
+
+ self.setWait( self.get_waiting_time() )
+
+ pyfile.name = self.get_file_name()
+ pyfile.size = self.get_file_size()
+
+ self.wait()
+
+ return True
+
+ def download_html(self):
+ self.load("http://freakshare.com/index.php", {"language": "EN"}); # Set english language in server session
+ self.html = self.load(self.pyfile.url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+ if not self.wantReconnect:
+ self.req_opts = self.get_download_options() # get the Post options for the Request
+ #file_url = self.pyfile.url
+ #return file_url
+ else:
+ self.offline()
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+ if not self.wantReconnect:
+ file_name = re.search(r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">([^ ]+)", self.html)
+ if file_name is not None:
+ file_name = file_name.group(1)
+ else:
+ file_name = self.pyfile.url
+ return file_name
+ else:
+ return self.pyfile.url
+
+ def get_file_size(self):
+ size = 0
+ if self.html is None:
+ self.download_html()
+ if not self.wantReconnect:
+ file_size_check = re.search(r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">[^ ]+ - ([^ ]+) (\w\w)yte", self.html)
+ if file_size_check is not None:
+ units = float(file_size_check.group(1).replace(",", ""))
+ pow = {'KB': 1, 'MB': 2, 'GB': 3}[file_size_check.group(2)]
+ size = int(units * 1024 ** pow)
+
+ return size
+
+ def get_waiting_time(self):
+ if self.html is None:
+ self.download_html()
+
+ if "Your Traffic is used up for today" in self.html:
+ self.wantReconnect = True
+ return 24*3600
+
+ timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[.\d]*;', self.html)
+ if timestring:
+ return int(timestring.group(1)) + 1 #add 1 sec as tenths of seconds are cut off
+ else:
+ return 60
+
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+ if re.search(r"This file does not exist!", self.html) is not None:
+ return False
+ else:
+ return True
+
+ def get_download_options(self):
+ re_envelope = re.search(r".*?value=\"Free\sDownload\".*?\n*?(.*?<.*?>\n*)*?\n*\s*?</form>", self.html).group(0) #get the whole request
+ to_sort = re.findall(r"<input\stype=\"hidden\"\svalue=\"(.*?)\"\sname=\"(.*?)\"\s\/>", re_envelope)
+ request_options = dict((n, v) for (v, n) in to_sort)
+
+ herewego = self.load(self.pyfile.url, None, request_options) # the actual download-Page
+
+ # comment this in, when it doesnt work
+ # with open("DUMP__FS_.HTML", "w") as fp:
+ # fp.write(herewego)
+
+ to_sort = re.findall(r"<input\stype=\".*?\"\svalue=\"(\S*?)\".*?name=\"(\S*?)\"\s.*?\/>", herewego)
+ request_options = dict((n, v) for (v, n) in to_sort)
+
+ # comment this in, when it doesnt work as well
+ #print "\n\n%s\n\n" % ";".join(["%s=%s" % x for x in to_sort])
+
+ challenge = re.search(r"http://api\.recaptcha\.net/challenge\?k=([0-9A-Za-z]+)", herewego)
+
+ if challenge:
+ re_captcha = ReCaptcha(self)
+ request_options["recaptcha_challenge_field"], request_options["recaptcha_response_field"] \
+ = re_captcha.challenge(challenge.group(1))
+
+ return request_options
diff --git a/pyload/plugins/hoster/FreevideoCz.py b/pyload/plugins/hoster/FreevideoCz.py
new file mode 100644
index 000000000..19eb77470
--- /dev/null
+++ b/pyload/plugins/hoster/FreevideoCz.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+
+ html = getURL(url)
+ if re.search(FreevideoCz.FILE_OFFLINE_PATTERN, html):
+ # File offline
+ result.append((url, 0, 1, url))
+ else:
+ result.append((url, 0, 2, url))
+ yield result
+
+class FreevideoCz(Hoster):
+ __name__ = "FreevideoCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://www.freevideo.cz/vase-videa/(.*)\.html"
+ __version__ = "0.2"
+ __description__ = """freevideo.cz"""
+ __author_name__ = ("zoidberg")
+
+ URL_PATTERN = r'clip: {\s*url: "([^"]+)"'
+ FILE_OFFLINE_PATTERN = r'<h2 class="red-corner-full">Str.nka nebyla nalezena</h2>'
+
+ def setup(self):
+ self.multiDL = True
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ found = re.search(self.URL_PATTERN, self.html)
+ if found is None: self.fail("Parse error (URL)")
+ download_url = found.group(1)
+
+ pyfile.name = re.search(self.__pattern__, pyfile.url).group(1) + ".mp4"
+
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/FshareVn.py b/pyload/plugins/hoster/FshareVn.py
new file mode 100644
index 000000000..926781b40
--- /dev/null
+++ b/pyload/plugins/hoster/FshareVn.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+from module.network.RequestFactory import getURL
+import re
+from time import strptime, mktime, gmtime
+
+def getInfo(urls):
+ for url in urls:
+ html = getURL('http://www.fshare.vn/check_link.php', post = {
+ "action" : "check_link",
+ "arrlinks" : url
+ }, decode = True)
+
+ file_info = parseFileInfo(FshareVn, url, html)
+
+ yield file_info
+
+def doubleDecode(m):
+ return m.group(1).decode('raw_unicode_escape')
+
+class FshareVn(SimpleHoster):
+ __name__ = "FshareVn"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?fshare.vn/file/.*"
+ __version__ = "0.16"
+ __description__ = """FshareVn Download Hoster"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r'<p>(?P<N>[^<]+)<\\/p>[\\trn\s]*<p>(?P<S>[0-9,.]+)\s*(?P<U>[kKMG])i?B<\\/p>'
+ FILE_OFFLINE_PATTERN = r'<div class=\\"f_left file_w\\"|<\\/p>\\t\\t\\t\\t\\r\\n\\t\\t<p><\\/p>\\t\\t\\r\\n\\t\\t<p>0 KB<\\/p>'
+ FILE_NAME_REPLACEMENTS = [("(.*)", doubleDecode)]
+ DOWNLOAD_URL_PATTERN = r'action="(http://download.*?)[#"]'
+ VIP_URL_PATTERN = r'<form action="([^>]+)" method="get" name="frm_download">'
+ WAIT_PATTERN = ur'Lượt tải xuống kế tiếp là:\s*(.*?)\s*<'
+
+ def process(self, pyfile):
+ self.html = self.load('http://www.fshare.vn/check_link.php', post = {
+ "action": "check_link",
+ "arrlinks": pyfile.url
+ }, decode = True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ self.checkDownloadedFile()
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode = True)
+
+ self.checkErrors()
+
+ action, inputs = self.parseHtmlForm('frm_download')
+ self.url = self.pyfile.url + action
+
+ if not inputs: self.parseError('FORM')
+ elif 'link_file_pwd_dl' in inputs:
+ for password in self.getPassword().splitlines():
+ self.logInfo('Password protected link, trying "%s"' % password)
+ inputs['link_file_pwd_dl'] = password
+ self.html = self.load(self.url, post=inputs, decode=True)
+ if not 'name="link_file_pwd_dl"' in self.html:
+ break
+ else:
+ self.fail("No or incorrect password")
+ else:
+ self.html = self.load(self.url, post=inputs, decode=True)
+
+ self.checkErrors()
+
+ found = re.search(r'var count = (\d+)', self.html)
+ self.setWait(int(found.group(1)) if found else 30)
+
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if not found: self.parseError('FREE DL URL')
+ self.url = found.group(1)
+ self.logDebug("FREE DL URL: %s" % self.url)
+
+ self.wait()
+ self.download(self.url)
+
+ def handlePremium(self):
+ self.download(self.pyfile.url)
+
+ def checkErrors(self):
+ if '/error.php?' in self.req.lastEffectiveURL or u"Liên kết bạn chọn khÃŽng tồn" in self.html:
+ self.offline()
+
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found:
+ self.logInfo("Wait until %s ICT" % found.group(1))
+ wait_until = mktime(strptime(found.group(1), "%d/%m/%Y %H:%M"))
+ self.setWait(wait_until - mktime(gmtime()) - 7 * 3600, True)
+ self.wait()
+ self.retry()
+ elif '<ul class="message-error">' in self.html:
+ self.logError("Unknown error occured or wait time not parsed")
+ self.retry(30, 120, "Unknown error")
+
+ def checkDownloadedFile(self):
+ # check download
+ check = self.checkDownload({
+ "not_found": ("<head><title>404 Not Found</title></head>")
+ })
+
+ if check == "not_found":
+ self.fail("File not found on server")
diff --git a/pyload/plugins/hoster/Ftp.py b/pyload/plugins/hoster/Ftp.py
new file mode 100644
index 000000000..c68f3b237
--- /dev/null
+++ b/pyload/plugins/hoster/Ftp.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: jeix
+ @author: mkaay
+"""
+from urlparse import urlparse, urljoin
+from urllib import quote, unquote
+import pycurl, re
+
+from module.plugins.Hoster import Hoster
+from module.network.HTTPRequest import BadHeader
+
+class Ftp(Hoster):
+ __name__ = "Ftp"
+ __version__ = "0.41"
+ __pattern__ = r'(ftps?|sftp)://(.*?:.*?@)?.*?/.*' # ftp://user:password@ftp.server.org/path/to/file
+ __type__ = "hoster"
+ __description__ = """A Plugin that allows you to download from an from an ftp directory"""
+ __author_name__ = ("jeix", "mkaay", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.com", "mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+ def setup(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ parsed_url = urlparse(pyfile.url)
+ netloc = parsed_url.netloc
+
+ pyfile.name = parsed_url.path.rpartition('/')[2]
+ try:
+ pyfile.name = unquote(str(pyfile.name)).decode('utf8')
+ except:
+ pass
+
+ if not "@" in netloc:
+ servers = [ x['login'] for x in self.account.getAllAccounts() ] if self.account else []
+
+ if netloc in servers:
+ self.logDebug("Logging on to %s" % netloc)
+ self.req.addAuth(self.account.accounts[netloc]["password"])
+ else:
+ for pwd in pyfile.package().password.splitlines():
+ if ":" in pwd:
+ self.req.addAuth(pwd.strip())
+ break
+
+ self.req.http.c.setopt(pycurl.NOBODY, 1)
+
+ try:
+ response = self.load(pyfile.url)
+ except pycurl.error, e:
+ self.fail("Error %d: %s" % e.args)
+
+ self.req.http.c.setopt(pycurl.NOBODY, 0)
+ self.logDebug(self.req.http.header)
+
+ found = re.search(r"Content-Length:\s*(\d+)", response)
+ if found:
+ pyfile.size = int(found.group(1))
+ self.download(pyfile.url)
+ else:
+ #Naive ftp directory listing
+ if re.search(r'^25\d.*?"', self.req.http.header, re.M):
+ pyfile.url = pyfile.url.rstrip('/')
+ pkgname = "/".join((pyfile.package().name,urlparse(pyfile.url).path.rpartition('/')[2]))
+ pyfile.url += '/'
+ self.req.http.c.setopt(48, 1) # CURLOPT_DIRLISTONLY
+ response = self.load(pyfile.url, decode = False)
+ links = [ pyfile.url + quote(x) for x in response.splitlines() ]
+ self.logDebug("LINKS", links)
+ self.core.api.addPackage(pkgname, links, 1)
+ #self.core.files.addLinks(links, pyfile.package().id)
+ else:
+ self.fail("Unexpected server response")
+
+ \ No newline at end of file
diff --git a/pyload/plugins/hoster/GamefrontCom.py b/pyload/plugins/hoster/GamefrontCom.py
new file mode 100644
index 000000000..34fda09d2
--- /dev/null
+++ b/pyload/plugins/hoster/GamefrontCom.py
@@ -0,0 +1,80 @@
+import re
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+from module.utils import parseFileSize
+
+class GamefrontCom(Hoster):
+ __name__ = "GamefrontCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?gamefront.com/files/[A-Za-z0-9]+"
+ __version__ = "0.02"
+ __description__ = """gamefront.com hoster plugin"""
+ __author_name__ = ("fwannmacher")
+ __author_mail__ = ("felipe@warhammerproject.com")
+
+ HOSTER_NAME = "gamefront.com"
+ PATTERN_FILENAME = r'<title>(.*?) | Game Front'
+ PATTERN_FILESIZE = r'<dt>File Size:</dt>[\n\s]*<dd>(.*?)</dd>'
+ PATTERN_OFFLINE = "This file doesn't exist, or has been removed."
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = False
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.html = self.load(pyfile.url, decode=True)
+
+ if not self._checkOnline():
+ self.offline()
+
+ self.pyfile.name = self._getName()
+
+ self.link = self._getLink()
+
+ if not self.link.startswith('http://'):
+ self.link = "http://www.gamefront.com/" + self.link
+
+ self.download(self.link)
+
+ def _checkOnline(self):
+ if re.search(self.PATTERN_OFFLINE, self.html):
+ return False
+ else:
+ return True
+
+ def _getName(self):
+ name = re.search(self.PATTERN_FILENAME, self.html)
+ if name is None:
+ self.fail("%s: Plugin broken." % self.__name__)
+
+ return name.group(1)
+
+ def _getLink(self):
+ self.html2 = self.load("http://www.gamefront.com/" + re.search("(files/service/thankyou\\?id=[A-Za-z0-9]+)", self.html).group(1))
+ self.link = re.search("<a href=\"(http://media[0-9]+\.gamefront.com/.*)\">click here</a>", self.html2)
+
+ return self.link.group(1).replace("&amp;", "&")
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+ html = getURL(url)
+
+ if re.search(GamefrontCom.PATTERN_OFFLINE, html):
+ result.append((url, 0, 1, url))
+ else:
+ name = re.search(GamefrontCom.PATTERN_FILENAME, html)
+
+ if name is None:
+ result.append((url, 0, 1, url))
+ continue
+
+ name = name.group(1)
+ size = re.search(GamefrontCom.PATTERN_FILESIZE, html)
+ size = parseFileSize(size.group(1))
+
+ result.append((name, size, 3, url))
+
+ yield result \ No newline at end of file
diff --git a/pyload/plugins/hoster/GigapetaCom.py b/pyload/plugins/hoster/GigapetaCom.py
new file mode 100644
index 000000000..28ba35abe
--- /dev/null
+++ b/pyload/plugins/hoster/GigapetaCom.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from random import randint
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pycurl import FOLLOWLOCATION
+
+class GigapetaCom(SimpleHoster):
+ __name__ = "GigapetaCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?gigapeta\.com/dl/\w+"
+ __version__ = "0.01"
+ __description__ = """GigaPeta.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ SH_COOKIES = [("http://gigapeta.com", "lang", "us")]
+ FILE_NAME_PATTERN = r'<img src=".*" alt="file" />-->\s*(?P<N>.*?)\s*</td>'
+ FILE_SIZE_PATTERN = r'<th>\s*Size\s*</th>\s*<td>\s*(?P<S>.*?)\s*</td>'
+ FILE_OFFLINE_PATTERN = r'<div id="page_error">'
+
+ def handleFree(self):
+ captcha_key = str(randint(1,100000000))
+ captcha_url = "http://gigapeta.com/img/captcha.gif?x=%s" % captcha_key
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ for i in range(5):
+ self.checkErrors()
+
+ captcha = self.decryptCaptcha(captcha_url)
+ self.html = self.load(self.pyfile.url, post = {
+ "captcha_key": captcha_key,
+ "captcha": captcha,
+ "download": "Download"})
+
+ found = re.search(r"Location\s*:\s*(.*)", self.req.http.header, re.I)
+ if found:
+ download_url = found.group(1)
+ break
+ elif "Entered figures don&#96;t coincide with the picture" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+ def checkErrors(self):
+ if "All threads for IP" in self.html:
+ self.logDebug("Your IP is already downloading a file - wait and retry")
+ self.setWait(300, True)
+ self.wait()
+ self.retry()
+
+getInfo = create_getInfo(GigapetaCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/HellshareCz.py b/pyload/plugins/hoster/HellshareCz.py
new file mode 100644
index 000000000..aa494e34e
--- /dev/null
+++ b/pyload/plugins/hoster/HellshareCz.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from math import ceil
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class HellshareCz(SimpleHoster):
+ __name__ = "HellshareCz"
+ __type__ = "hoster"
+ __pattern__ = r"(http://(?:.*\.)*hellshare\.(?:cz|com|sk|hu|pl)/[^?]*/\d+).*"
+ __version__ = "0.82"
+ __description__ = """Hellshare.cz - premium only"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<h1 id="filename"[^>]*>(?P<N>[^<]+)</h1>'
+ FILE_SIZE_PATTERN = r'<strong id="FileSize_master">(?P<S>[0-9.]*)&nbsp;(?P<U>[kKMG])i?B</strong>'
+ FILE_OFFLINE_PATTERN = r'<h1>File not found.</h1>'
+ SHOW_WINDOW_PATTERN = r'<a href="([^?]+/(\d+)/\?do=(fileDownloadButton|relatedFileDownloadButton-\2)-showDownloadWindow)"'
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.account else False
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account: self.fail("User not logged in")
+ pyfile.url = re.search(self.__pattern__, pyfile.url).group(1)
+ self.html = self.load(pyfile.url, decode = True)
+ self.getFileInfo()
+ if not self.checkTrafficLeft():
+ self.fail("Not enough traffic left for user %s." % self.user)
+
+ found = re.search(self.SHOW_WINDOW_PATTERN, self.html)
+ if not found: self.parseError('SHOW WINDOW')
+ self.url = "http://www.hellshare.com" + found.group(1)
+ self.logDebug("DOWNLOAD URL: " + self.url)
+
+ self.download(self.url)
+
+getInfo = create_getInfo(HellshareCz)
diff --git a/pyload/plugins/hoster/HellspyCz.py b/pyload/plugins/hoster/HellspyCz.py
new file mode 100644
index 000000000..9858c82b7
--- /dev/null
+++ b/pyload/plugins/hoster/HellspyCz.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class HellspyCz(SimpleHoster):
+ __name__ = "HellspyCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*(?:hellspy\.(?:cz|com|sk|hu|pl)|sciagaj.pl)(/\S+/\d+)/?.*"
+ __version__ = "0.27"
+ __description__ = """HellSpy.cz"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_SIZE_PATTERN = r'<span class="filesize right">(?P<S>[0-9.]+)\s*<span>(?P<U>[kKMG])i?B'
+ FILE_NAME_PATTERN = r'<h1 title="(?P<N>.*?)"'
+ FILE_OFFLINE_PATTERN = r'<h2>(404 - Page|File) not found</h2>'
+ FILE_URL_REPLACEMENTS = [(__pattern__, r"http://www.hellspy.com\1")]
+
+ CREDIT_LEFT_PATTERN = r'<strong>Credits: </strong>\s*(\d+)'
+ DOWNLOAD_AGAIN_PATTERN = r'<a id="button-download-start"[^>]*title="You can download the file without deducting your credit.">'
+ DOWNLOAD_URL_PATTERN = r"launchFullDownload\('([^']+)'"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+ def handleFree(self):
+ self.fail("Only premium users can download from HellSpy.cz")
+
+ def handlePremium(self):
+ # set PHPSESSID cookie
+ cj = self.account.getAccountCookies(self.user)
+ cj.setCookie(".hellspy.com", "PHPSESSID", self.account.phpsessid)
+ self.logDebug("PHPSESSID: " + cj.getCookie("PHPSESSID"))
+
+ info = self.account.getAccountInfo(self.user, True)
+ self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"]/1024))
+
+ if self.pyfile.size / 1024 > info["trafficleft"]:
+ self.logWarning("Not enough credit left to download file")
+
+ # get premium download URL and download
+ self.html = self.load(self.pyfile.url + "?download=1")
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if not found: self.parseError("Download URL")
+ url = found.group(1)
+ self.logDebug("Download URL: " + url)
+ self.download(url)
+
+ info = self.account.getAccountInfo(self.user, True)
+ self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"]/1024))
+
+getInfo = create_getInfo(HellspyCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/HotfileCom.py b/pyload/plugins/hoster/HotfileCom.py
new file mode 100644
index 000000000..2dfe6ec88
--- /dev/null
+++ b/pyload/plugins/hoster/HotfileCom.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+from module.network.RequestFactory import getURL
+from module.utils import chunks
+
+def getInfo(urls):
+ api_url_base = "http://api.hotfile.com/"
+
+ for chunk in chunks(urls, 90):
+ api_param_file = {"action":"checklinks","links": ",".join(chunk),"fields":"id,status,name,size"} #api only supports old style links
+ src = getURL(api_url_base, post=api_param_file, decode=True)
+ result = []
+ for i, res in enumerate(src.split("\n")):
+ if not res:
+ continue
+ fields = res.split(",")
+
+ if fields[1] in ("1", "2"):
+ status = 2
+ else:
+ status = 1
+
+ result.append((fields[2], int(fields[3]), status, chunk[i]))
+ yield result
+
+class HotfileCom(Hoster):
+ __name__ = "HotfileCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(www.)?hotfile\.com/dl/\d+/[0-9a-zA-Z]+/"
+ __version__ = "0.36"
+ __description__ = """Hotfile.com Download Hoster"""
+ __author_name__ = ("sitacuisses","spoob","mkaay","JoKoT3")
+ __author_mail__ = ("sitacuisses@yhoo.de","spoob@pyload.org","mkaay@mkaay.de","jokot3@gmail.com")
+
+ FILE_OFFLINE_PATTERN = r'File is removed'
+
+ def setup(self):
+ self.html = [None, None]
+ self.wantReconnect = False
+ self.htmlwithlink = None
+ self.url = None
+
+ if self.premium:
+ self.multiDL = True
+ self.resumeDownload = True
+ self.chunkLimit = -1
+ else:
+ self.multiDL = False
+ self.chunkLimit = 1
+
+ def apiCall(self, method, post, login=False):
+ if not self.account and login:
+ return
+ elif self.account and login:
+ return self.account.apiCall(method, post, self.user)
+ post.update({"action": method})
+ return self.load("http://api.hotfile.com/", post=post, decode=True)
+
+ def process(self, pyfile):
+ self.wantReconnect = False
+
+ args = {"links":self.pyfile.url, "fields":"id,status,name,size,sha1"}
+ resp = self.apiCall("checklinks", args)
+ self.api_data = {}
+ for k, v in zip(args["fields"].split(","), resp.strip().split(",")):
+ self.api_data[k] = v
+
+ if self.api_data["status"] == "0":
+ self.offline()
+
+ pyfile.name = self.api_data["name"]
+
+ if not self.premium:
+ self.downloadHTML()
+
+ if self.FILE_OFFLINE_PATTERN in self.html[0]:
+ self.offline()
+
+ self.setWait(self.getWaitTime())
+ self.wait()
+
+ self.freeDownload()
+ else:
+ dl = self.account.apiCall("getdirectdownloadlink", {"link":self.pyfile.url}, self.user)
+ #dl = unquote(dl).strip() <- Made problems
+ dl = dl.strip()
+ self.download(dl)
+
+ def downloadHTML(self):
+ self.html[0] = self.load(self.pyfile.url, get={"lang":"en"})
+
+ def freeDownload(self):
+
+ form_content = re.search(r"<form style=.*(\n<.*>\s*)*?[\n\t]?<tr>", self.html[0])
+ if form_content is None:
+ print self.html[0]
+ self.fail("Form not found in HTML. Can not proceed.")
+
+ form_content = form_content.group(0)
+ form_posts = dict(re.findall(r"<input\stype=hidden\sname=(\S*)\svalue=(\S*)>", form_content))
+
+ self.html[1] = self.load(self.pyfile.url, post=form_posts)
+
+ challenge = re.search(r"http://api\.recaptcha\.net/challenge\?k=([0-9A-Za-z]+)", self.html[1])
+
+ if challenge:
+ re_captcha = ReCaptcha(self)
+ challenge, result = re_captcha.challenge(challenge.group(1))
+
+ url = re.search(r'<form action="(/dl/[^"]+)', self.html[1] )
+
+ self.html[1] = self.load("http://hotfile.com"+url.group(1), post={"action": "checkcaptcha",
+ "recaptcha_challenge_field" : challenge,
+ "recaptcha_response_field": result})
+
+ if "Wrong Code. Please try again." in self.html[1]:
+ self.freeDownload()
+ return
+
+ file_url = re.search(r'a href="(http://hotfile\.com/get/\S*)"', self.html[1]).group(1)
+ self.download(file_url)
+
+ def getWaitTime(self):
+ free_limit_pattern = re.compile(r"timerend=d\.getTime\(\)\+(\d+);")
+ matches = free_limit_pattern.findall(self.html[0])
+ if matches:
+ wait_time = (sum([int(match) for match in matches])/1000) or 60
+ if wait_time > 300:
+ self.wantReconnect = True
+ return wait_time + 1
+ else:
+ self.fail("Don't know how long to wait. Cannot proceed.") \ No newline at end of file
diff --git a/pyload/plugins/hoster/HundredEightyUploadCom.py b/pyload/plugins/hoster/HundredEightyUploadCom.py
new file mode 100644
index 000000000..d8b744359
--- /dev/null
+++ b/pyload/plugins/hoster/HundredEightyUploadCom.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class HundredEightyUploadCom(XFileSharingPro):
+ __name__ = "HundredEightyUploadCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?180upload\.com/(\w+).*"
+ __version__ = "0.01"
+ __description__ = """180upload.com hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_NAME_PATTERN = r'Filename:</b></td><td nowrap>(?P<N>.+)</td></tr>-->'
+ FILE_SIZE_PATTERN = r'Size:</b></td><td>(?P<S>[\d.]+) (?P<U>[A-Z]+)\s*<small>'
+
+ HOSTER_NAME = "180upload.com"
+
+
+getInfo = create_getInfo(HundredEightyUploadCom)
diff --git a/pyload/plugins/hoster/IFileWs.py b/pyload/plugins/hoster/IFileWs.py
new file mode 100644
index 000000000..160fe641c
--- /dev/null
+++ b/pyload/plugins/hoster/IFileWs.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class IFileWs(XFileSharingPro):
+ __name__ = "IFileWs"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?ifile\.ws/\w+(/.+)?"
+ __version__ = "0.01"
+ __description__ = """Ifile.ws hoster plugin"""
+ __author_name__ = ("z00nx")
+ __author_mail__ = ("z00nx0@gmail.com")
+
+ FILE_INFO_PATTERN = '<h1\s+style="display:inline;">(?P<N>[^<]+)</h1>\s+\[(?P<S>[^]]+)\]'
+ FILE_OFFLINE_PATTERN = 'File Not Found|The file was removed by administrator'
+ HOSTER_NAME = "ifile.ws"
+ LONG_WAIT_PATTERN = "(?P<M>\d(?=\s+minutes)).*(?P<S>\d+(?=\s+seconds))"
+
+
+getInfo = create_getInfo(IFileWs)
diff --git a/pyload/plugins/hoster/IcyFilesCom.py b/pyload/plugins/hoster/IcyFilesCom.py
new file mode 100644
index 000000000..34737e560
--- /dev/null
+++ b/pyload/plugins/hoster/IcyFilesCom.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: godofdream
+"""
+
+import re
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+
+def getInfo(urls):
+ result = []
+ for url in urls:
+ html = getURL(url, decode=True)
+ if re.search(IcyFilesCom.FILE_OFFLINE_PATTERN, html):
+ # File offline
+ result.append((url, 0, 1, url))
+ else:
+ # Get file info
+ name = re.search(IcyFilesCom.FILE_NAME_PATTERN, html)
+ size = re.search(IcyFilesCom.SIZE_PATTERN, html)
+ if name is not None:
+ name = name.group(1)
+ size = (int(size.group(1)) * 1000000)
+ result.append((name, size, 2, url))
+ yield result
+
+
+class IcyFilesCom(Hoster):
+ __name__ = "IcyFilesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?icyfiles\.com/(.*)"
+ __version__ = "0.05"
+ __description__ = """IcyFiles.com plugin - free only"""
+ __author_name__ = ("godofdream")
+ __author_mail__ = ("soilfiction@gmail.com")
+
+ FILE_NAME_PATTERN = r'<div id="file">(.*?)</div>'
+ SIZE_PATTERN = r'<li>(\d+) <span>Size/mb'
+ FILE_OFFLINE_PATTERN = r'The requested File cant be found'
+ WAIT_LONGER_PATTERN = r'All download tickets are in use\. please try it again in a few seconds'
+ WAIT_PATTERN = r'<div class="counter">(\d+)</div>'
+ TOOMUCH_PATTERN = r'Sorry dude, you have downloaded too much\. Please wait (\d+) seconds'
+
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ # check if offline
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html):
+ self.offline()
+ # All Downloadtickets in use
+ timmy = re.search(self.WAIT_LONGER_PATTERN, self.html)
+ if timmy:
+ self.logDebug("waitforfreeslot")
+ self.waitForFreeSlot()
+ # Wait the waittime
+ timmy = re.search(self.WAIT_PATTERN, self.html)
+ if timmy:
+ self.logDebug("waiting", timmy.group(1))
+ self.setWait(int(timmy.group(1)) + 2, False)
+ self.wait()
+ # Downloaded to much
+ timmy = re.search(self.TOOMUCH_PATTERN, self.html)
+ if timmy:
+ self.logDebug("too much", timmy.group(1))
+ self.setWait(int(timmy.group(1)), True)
+ self.wait()
+ # Find Name
+ found = re.search(self.FILE_NAME_PATTERN, self.html)
+ if found is None:
+ self.fail("Parse error (NAME)")
+ pyfile.name = found.group(1)
+ # Get the URL
+ url = pyfile.url
+ found = re.search(self.__pattern__, url)
+ if found is None:
+ self.fail("Parse error (URL)")
+ download_url = "http://icyfiles.com/download.php?key=" + found.group(1)
+ self.download(download_url)
+ # check download
+ check = self.checkDownload({
+ "notfound": re.compile(r"^<head><title>404 Not Found</title>$"),
+ "skippedcountdown": re.compile(r"^Dont skip the countdown$"),
+ "waitforfreeslots": re.compile(self.WAIT_LONGER_PATTERN),
+ "downloadedtoomuch": re.compile(self.TOOMUCH_PATTERN)
+ })
+ if check == "skippedcountdown":
+ self.fail("Countdown error")
+ elif check == "notfound":
+ self.fail("404 Not found")
+ elif check == "waitforfreeslots":
+ self.waitForFreeSlot()
+ elif check == "downloadedtoomuch":
+ self.retry()
+
+ def waitForFreeSlot(self):
+ self.retry(60, 60, "Wait for free slot")
diff --git a/pyload/plugins/hoster/IfileIt.py b/pyload/plugins/hoster/IfileIt.py
new file mode 100644
index 000000000..02bcbfd40
--- /dev/null
+++ b/pyload/plugins/hoster/IfileIt.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.common.json_layer import json_loads
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.network.RequestFactory import getURL
+
+class IfileIt(SimpleHoster):
+ __name__ = "IfileIt"
+ __type__ = "hoster"
+ __pattern__ = r"^unmatchable$"
+ __version__ = "0.27"
+ __description__ = """Ifile.it"""
+ __author_name__ = ("zoidberg")
+
+ #EVAL_PATTERN = r'(eval\(function\(p,a,c,k,e,d\).*)'
+ #DEC_PATTERN = r"requestBtn_clickEvent[^}]*url:\s*([^,]+)"
+ DOWNLOAD_LINK_PATTERN = r'</span> If it doesn\'t, <a target="_blank" href="([^"]+)">'
+ RECAPTCHA_KEY_PATTERN = r"var __recaptcha_public\s*=\s*'([^']+)';"
+ FILE_INFO_PATTERN = r'<span style="cursor: default;[^>]*>\s*(?P<N>.*?)\s*&nbsp;\s*<strong>\s*(?P<S>[0-9.]+)\s*(?P<U>[kKMG])i?B\s*</strong>\s*</span>'
+ FILE_OFFLINE_PATTERN = r'<span style="cursor: default;[^>]*>\s*&nbsp;\s*<strong>\s*</strong>\s*</span>'
+ TEMP_OFFLINE_PATTERN = r'<span class="msg_red">Downloading of this file is temporarily disabled</span>'
+
+ def handleFree(self):
+ ukey = re.search(self.__pattern__, self.pyfile.url).group(1)
+ json_url = 'http://ifile.it/new_download-request.json'
+ post_data = {"ukey" : ukey, "ab": "0"}
+
+ json_response = json_loads(self.load(json_url, post = post_data))
+ self.logDebug(json_response)
+ if json_response['status'] == 3:
+ self.offline()
+
+ if json_response["captcha"]:
+ captcha_key = re.search(self.RECAPTCHA_KEY_PATTERN, self.html).group(1)
+ recaptcha = ReCaptcha(self)
+ post_data["ctype"] = "recaptcha"
+
+ for i in range(5):
+ post_data["recaptcha_challenge"], post_data["recaptcha_response"] = recaptcha.challenge(captcha_key)
+ json_response = json_loads(self.load(json_url, post = post_data))
+ self.logDebug(json_response)
+
+ if json_response["retry"]:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("Incorrect captcha")
+
+ if not "ticket_url" in json_response:
+ self.parseError("Download URL")
+
+ self.download(json_response["ticket_url"])
+
+getInfo = create_getInfo(IfileIt) \ No newline at end of file
diff --git a/pyload/plugins/hoster/IfolderRu.py b/pyload/plugins/hoster/IfolderRu.py
new file mode 100644
index 000000000..6accbc524
--- /dev/null
+++ b/pyload/plugins/hoster/IfolderRu.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from urllib import quote
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.network.RequestFactory import getURL
+
+class IfolderRu(SimpleHoster):
+ __name__ = "IfolderRu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:[^.]*\.)?(?:ifolder\.ru|rusfolder\.(?:com|net|ru))/(?:files/)?(?P<ID>\d+).*"
+ __version__ = "0.37"
+ __description__ = """rusfolder.com / ifolder.ru"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_SIZE_REPLACEMENTS = [(u'Кб', 'KB'), (u'Мб', 'MB'), (u'Гб', 'GB')]
+ FILE_NAME_PATTERN = ur'(?:<div><span>)?НазваМОе:(?:</span>)? <b>(?P<N>[^<]+)</b><(?:/div|br)>'
+ FILE_SIZE_PATTERN = ur'(?:<div><span>)?РазЌер:(?:</span>)? <b>(?P<S>[^<]+)</b><(?:/div|br)>'
+ FILE_OFFLINE_PATTERN = ur'<p>Ѐайл МПЌер <b>[^<]*</b> (Ме МайЎеМ|уЎалеМ) !!!</p>'
+
+ SESSION_ID_PATTERN = r'<a href=(http://ints.(?:rusfolder.com|ifolder.ru)/ints/sponsor/\?bi=\d*&session=([^&]+)&u=[^>]+)>'
+ INTS_SESSION_PATTERN = r'\(\'ints_session\'\);\s*if\(tag\)\{tag.value = "([^"]+)";\}'
+ HIDDEN_INPUT_PATTERN = r"var v = .*?name='([^']+)' value='1'"
+ DOWNLOAD_LINK_PATTERN = r'<a id="download_file_href" href="([^"]+)"'
+ WRONG_CAPTCHA_PATTERN = ur'<font color=Red>МеверМый кПЎ,<br>ввеЎОте еще раз</font><br>'
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.account else False
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ file_id = re.search(self.__pattern__, pyfile.url).group('ID')
+ self.html = self.load("http://rusfolder.com/%s" % file_id, cookies=True, decode=True)
+ self.getFileInfo()
+
+ url = re.search('<a href="(http://ints\..*?=)"', self.html).group(1)
+ self.html = self.load(url, cookies=True, decode=True)
+
+ url, session_id = re.search(self.SESSION_ID_PATTERN, self.html).groups()
+ self.html = self.load(url, cookies=True, decode=True)
+
+ url = "http://ints.rusfolder.com/ints/frame/?session=%s" % session_id
+ self.html = self.load(url, cookies=True)
+
+ self.setWait(31, False)
+ self.wait()
+
+ captcha_url = "http://ints.rusfolder.com/random/images/?session=%s" % session_id
+ for i in range(5):
+ self.html = self.load(url, cookies=True)
+ action, inputs = self.parseHtmlForm('ID="Form1"')
+ inputs['ints_session'] = re.search(self.INTS_SESSION_PATTERN, self.html).group(1)
+ inputs[re.search(self.HIDDEN_INPUT_PATTERN, self.html).group(1)] = '1'
+ inputs['confirmed_number'] = self.decryptCaptcha(captcha_url, cookies = True)
+ inputs['action'] = '1'
+ self.logDebug(inputs)
+
+ self.html = self.load(url, decode = True, cookies = True, post = inputs)
+ if self.WRONG_CAPTCHA_PATTERN in self.html:
+ self.invalidCaptcha()
+ else:
+ break;
+ else:
+ self.fail("Invalid captcha")
+
+ #self.html = self.load("http://rusfolder.com/%s?ints_code=%s" % (file_id, session_id), decode=True, cookies = True)
+
+ download_url = re.search(self.DOWNLOAD_LINK_PATTERN, self.html).group(1)
+ self.correctCaptcha()
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+getInfo = create_getInfo(IfolderRu) \ No newline at end of file
diff --git a/pyload/plugins/hoster/JumbofilesCom.py b/pyload/plugins/hoster/JumbofilesCom.py
new file mode 100644
index 000000000..9e8adb512
--- /dev/null
+++ b/pyload/plugins/hoster/JumbofilesCom.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.utils import html_unescape
+
+class JumbofilesCom(SimpleHoster):
+ __name__ = "JumbofilesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*jumbofiles.com/(\w{12}).*"
+ __version__ = "0.02"
+ __description__ = """JumboFiles.com hoster plugin"""
+ __author_name__ = ("godofdream")
+ __author_mail__ = ("soilfiction@gmail.com")
+
+ FILE_INFO_PATTERN = '<TR><TD>(?P<N>[^<]+?)\s*<small>\((?P<S>[\d.]+)\s*(?P<U>[KMG][bB])\)</small></TD></TR>'
+ FILE_OFFLINE_PATTERN = 'Not Found or Deleted / Disabled due to inactivity or DMCA'
+ DIRECT_LINK_PATTERN = '<meta http-equiv="refresh" content="10;url=(.+)">'
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = True
+
+ def handleFree(self):
+ ukey = re.search(self.__pattern__, self.pyfile.url).group(1)
+ post_data = {"id" : ukey, "op": "download3", "rand": ""}
+ html = self.load(self.pyfile.url, post = post_data, decode=True)
+ url = re.search(self.DIRECT_LINK_PATTERN, html).group(1)
+ self.logDebug("Download " + url)
+ self.download(url)
+
+getInfo = create_getInfo(JumbofilesCom)
diff --git a/pyload/plugins/hoster/LetitbitNet.py b/pyload/plugins/hoster/LetitbitNet.py
new file mode 100644
index 000000000..f5e2313ae
--- /dev/null
+++ b/pyload/plugins/hoster/LetitbitNet.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.common.json_layer import json_loads
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+
+class LetitbitNet(SimpleHoster):
+ __name__ = "LetitbitNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*(letitbit|shareflare).net/download/.*"
+ __version__ = "0.20"
+ __description__ = """letitbit.net"""
+ __author_name__ = ("zoidberg", "z00nx")
+ __author_mail__ = ("zoidberg@mujmail.cz", "z00nx0@gmail.com")
+
+ CHECK_URL_PATTERN = r"ajax_check_url\s*=\s*'((http://[^/]+)[^']+)';"
+ SECONDS_PATTERN = r"seconds\s*=\s*(\d+);"
+ CAPTCHA_CONTROL_FIELD = r"recaptcha_control_field\s=\s'(?P<value>[^']+)'"
+ FILE_INFO_PATTERN = r'<span[^>]*>File:.*?<span[^>]*>(?P<N>[^&]+).*</span>.*?\[(?P<S>[^\]]+)\]</span>'
+ FILE_OFFLINE_PATTERN = r'>File not found<'
+
+ DOMAIN = "http://letitbit.net"
+ FILE_URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "letitbit.net")]
+ RECAPTCHA_KEY = "6Lc9zdMSAAAAAF-7s2wuQ-036pLRbM0p8dDaQdAM"
+
+ def setup(self):
+ self.resumeDownload = True
+ #TODO confirm that resume works
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('id="ifree_form"')
+ if not action:
+ self.parseError("page 1 / ifree_form")
+ self.pyfile.size = float(inputs['sssize'])
+ self.logDebug(action, inputs)
+ inputs['desc'] = ""
+
+ self.html = self.load(self.DOMAIN + action, post=inputs, cookies=True)
+
+ """
+ action, inputs = self.parseHtmlForm('id="d3_form"')
+ if not action: self.parseError("page 2 / d3_form")
+ #self.logDebug(action, inputs)
+
+ self.html = self.load(action, post = inputs, cookies = True)
+
+ try:
+ ajax_check_url, captcha_url = re.search(self.CHECK_URL_PATTERN, self.html).groups()
+ found = re.search(self.SECONDS_PATTERN, self.html)
+ seconds = int(found.group(1)) if found else 60
+ self.setWait(seconds+1)
+ self.wait()
+ except Exception, e:
+ self.logError(e)
+ self.parseError("page 3 / js")
+ """
+
+ found = re.search(self.SECONDS_PATTERN, self.html)
+ seconds = int(found.group(1)) if found else 60
+ self.logDebug("Seconds found", seconds)
+ found = re.search(self.CAPTCHA_CONTROL_FIELD, self.html)
+ recaptcha_control_field = found.group(1)
+ self.logDebug("ReCaptcha control field found", recaptcha_control_field)
+ self.setWait(seconds + 1)
+ self.wait()
+
+ response = self.load("%s/ajax/download3.php" % self.DOMAIN, post=" ", cookies=True)
+ if response != '1':
+ self.parseError('Unknown response - ajax_check_url')
+ self.logDebug(response)
+
+ recaptcha = ReCaptcha(self)
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": response, "recaptcha_control_field": recaptcha_control_field}
+ self.logDebug("Post data to send", post_data)
+ response = self.load('%s/ajax/check_recaptcha.php' % self.DOMAIN, post=post_data, cookies=True)
+ self.logDebug(response)
+ if not response:
+ self.invalidCaptcha()
+ if response == "error_free_download_blocked":
+ self.logInfo("Daily limit reached, waiting 24 hours")
+ self.setWait(24 * 60 * 60)
+ self.wait()
+ if response == "error_wrong_captcha":
+ self.logInfo("Wrong Captcha")
+ self.invalidCaptcha()
+ self.retry()
+ elif response.startswith('['):
+ urls = json_loads(response)
+ elif response.startswith('http://'):
+ urls = [response]
+ else:
+ self.parseError("Unknown response - captcha check")
+
+ self.correctCaptcha()
+
+ for download_url in urls:
+ try:
+ self.logDebug("Download URL", download_url)
+ self.download(download_url)
+ break
+ except Exception, e:
+ self.logError(e)
+ else:
+ self.fail("Download did not finish correctly")
+
+getInfo = create_getInfo(LetitbitNet)
diff --git a/pyload/plugins/hoster/LoadTo.py b/pyload/plugins/hoster/LoadTo.py
new file mode 100644
index 000000000..0f99c272a
--- /dev/null
+++ b/pyload/plugins/hoster/LoadTo.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: halfman
+"""
+
+# Test links (random.bin):
+# http://www.load.to/dNsmgXRk4/random.bin
+# http://www.load.to/edbNTxcUb/random100.bin
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+
+class LoadTo(SimpleHoster):
+ __name__ = "LoadTo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?load\.to/\w+"
+ __version__ = "0.12"
+ __description__ = """Load.to hoster plugin"""
+ __author_name__ = ("halfman", "stickell")
+ __author_mail__ = ("Pulpan3@gmail.com", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<a [^>]+>(?P<N>.+)</a></h3>\s*Size: (?P<S>\d+) Bytes'
+ URL_PATTERN = r'<form method="post" action="(.+?)"'
+ FILE_OFFLINE_PATTERN = r'Can\'t find file. Please check URL.<br />'
+ WAIT_PATTERN = r'type="submit" value="Download \((\d+)\)"'
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+
+ self.html = self.load(pyfile.url, decode=True)
+
+ found = re.search(self.URL_PATTERN, self.html)
+ if not found:
+ self.parseError('URL')
+ download_url = found.group(1)
+
+ timmy = re.search(self.WAIT_PATTERN, self.html)
+ if timmy:
+ self.setWait(timmy.group(1))
+ self.wait()
+
+ self.download(download_url, disposition=True)
+
+
+getInfo = create_getInfo(LoadTo)
diff --git a/pyload/plugins/hoster/LuckyShareNet.py b/pyload/plugins/hoster/LuckyShareNet.py
new file mode 100644
index 000000000..08e44d9f6
--- /dev/null
+++ b/pyload/plugins/hoster/LuckyShareNet.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.lib.bottle import json_loads
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+
+class LuckyShareNet(SimpleHoster):
+ __name__ = "LuckyShareNet"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(www\.)?luckyshare.net/(?P<ID>\d{10,})"
+ __version__ = "0.02"
+ __description__ = """LuckyShare.net Download Hoster"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r"<h1 class='file_name'>(?P<N>\S+)</h1>\s*<span class='file_size'>Filesize: (?P<S>[\d.]+)(?P<U>\w+)</span>"
+ FILE_OFFLINE_PATTERN = 'There is no such file available'
+ RECAPTCHA_KEY = '6LdivsgSAAAAANWh-d7rPE1mus4yVWuSQIJKIYNw'
+
+ def parseJson(self, rep):
+ if 'AJAX Error' in rep:
+ html = self.load(self.pyfile.url, decode=True)
+ m = re.search(r"waitingtime = (\d+);", html)
+ if m:
+ waittime = int(m.group(1))
+ self.logDebug('You have to wait %d seconds between free downloads' % waittime)
+ self.retry(wait_time=waittime)
+ else:
+ self.parseError('Unable to detect wait time between free downloads')
+ elif 'Hash expired' in rep:
+ self.retry(reason='Hash expired')
+ return json_loads(rep)
+
+ # TODO: There should be a filesize limit for free downloads
+ # TODO: Some files could not be downloaded in free mode
+ def handleFree(self):
+ file_id = re.search(self.__pattern__, self.pyfile.url).group('ID')
+ self.logDebug('File ID: ' + file_id)
+ rep = self.load(r"http://luckyshare.net/download/request/type/time/file/" + file_id, decode=True)
+ self.logDebug('JSON: ' + rep)
+ json = self.parseJson(rep)
+
+ self.setWait(int(json['time']))
+ self.wait()
+
+ recaptcha = ReCaptcha(self)
+ for i in xrange(5):
+ challenge, response = recaptcha.challenge(self.RECAPTCHA_KEY)
+ rep = self.load(r"http://luckyshare.net/download/verify/challenge/%s/response/%s/hash/%s" %
+ (challenge, response, json['hash']), decode=True)
+ self.logDebug('JSON: ' + rep)
+ if 'link' in rep:
+ json.update(self.parseJson(rep))
+ self.correctCaptcha()
+ break
+ elif 'Verification failed' in rep:
+ self.logInfo('Wrong captcha')
+ self.invalidCaptcha()
+ else:
+ self.parseError('Unable to get downlaod link')
+
+ if not json['link']:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.logDebug('Direct URL: ' + json['link'])
+ self.download(json['link'])
+
+
+getInfo = create_getInfo(LuckyShareNet)
diff --git a/pyload/plugins/hoster/MediafireCom.py b/pyload/plugins/hoster/MediafireCom.py
new file mode 100644
index 000000000..1e856c41d
--- /dev/null
+++ b/pyload/plugins/hoster/MediafireCom.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+from module.plugins.internal.CaptchaService import SolveMedia
+from module.network.RequestFactory import getURL
+
+
+def replace_eval(js_expr):
+ return js_expr.replace(r'eval("', '').replace(r"\'", r"'").replace(r'\"', r'"')
+
+
+def checkHTMLHeader(url):
+ try:
+ for i in range(3):
+ header = getURL(url, just_header=True)
+ for line in header.splitlines():
+ line = line.lower()
+ if 'location' in line:
+ url = line.split(':', 1)[1].strip()
+ if 'error.php?errno=320' in url:
+ return url, 1
+ if not url.startswith('http://'): url = 'http://www.mediafire.com' + url
+ break
+ elif 'content-disposition' in line:
+ return url, 2
+ else:
+ break
+ except:
+ return url, 3
+
+ return url, 0
+
+
+def getInfo(urls):
+ for url in urls:
+ location, status = checkHTMLHeader(url)
+ if status:
+ file_info = (url, 0, status, url)
+ else:
+ file_info = parseFileInfo(MediafireCom, url, getURL(url, decode=True))
+ yield file_info
+
+
+class MediafireCom(SimpleHoster):
+ __name__ = "MediafireCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*mediafire\.com/(file/|(view/?|download.php)?\?)(\w{11}|\w{15})($|/)"
+ __version__ = "0.79"
+ __description__ = """Mediafire.com plugin - free only"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ DOWNLOAD_LINK_PATTERN = r'<div class="download_link"[^>]*(?:z-index:(?P<zindex>\d+))?[^>]*>\s*<a href="(?P<href>http://[^"]+)"'
+ JS_KEY_PATTERN = r"DoShow\('mfpromo1'\);[^{]*{((\w+)='';.*?)eval\(\2\);"
+ JS_ZMODULO_PATTERN = r"\('z-index'\)\) \% (\d+)\)\);"
+ SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.noscript\?k=([^"]+)'
+ PAGE1_ACTION_PATTERN = r'<link rel="canonical" href="([^"]+)"/>'
+ PASSWORD_PATTERN = r'<form name="form_password"'
+
+ FILE_NAME_PATTERN = r'<META NAME="description" CONTENT="(?P<N>[^"]+)"/>'
+ FILE_INFO_PATTERN = r"oFileSharePopup\.ald\('(?P<ID>[^']*)','(?P<N>[^']*)','(?P<S>[^']*)','','(?P<sha256>[^']*)'\)"
+ FILE_OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>'
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+ pyfile.url = re.sub(r'/view/?\?', '/?', pyfile.url)
+
+ self.url, result = checkHTMLHeader(pyfile.url)
+ self.logDebug('Location (%d): %s' % (result, self.url))
+
+ if result == 0:
+ self.html = self.load(self.url, decode=True)
+ self.checkCaptcha()
+ self.multiDL = True
+ self.check_data = self.getFileInfo()
+
+ if self.account:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ elif result == 1:
+ self.offline()
+ else:
+ self.multiDL = True
+ self.download(self.url, disposition=True)
+
+ def handleFree(self):
+ passwords = self.getPassword().splitlines()
+ while self.PASSWORD_PATTERN in self.html:
+ if len(passwords):
+ password = passwords.pop(0)
+ self.logInfo("Password protected link, trying " + password)
+ self.html = self.load(self.url, post={"downloadp": password})
+ else:
+ self.fail("No or incorrect password")
+
+ found = re.search(r'kNO = "(http://.*?)";', self.html)
+ if not found: self.parseError("Download URL")
+ download_url = found.group(1)
+ self.logDebug("DOWNLOAD LINK:", download_url)
+
+ self.download(download_url)
+
+ def checkCaptcha(self):
+ for i in xrange(5):
+ found = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if found:
+ captcha_key = found.group(1)
+ solvemedia = SolveMedia(self)
+ captcha_challenge, captcha_response = solvemedia.challenge(captcha_key)
+ self.html = self.load(self.url, post={"adcopy_challenge": captcha_challenge,
+ "adcopy_response": captcha_response}, decode=True)
+ else:
+ break
+ else:
+ self.fail("No valid recaptcha solution received")
diff --git a/pyload/plugins/hoster/MegaNz.py b/pyload/plugins/hoster/MegaNz.py
new file mode 100644
index 000000000..e5be4eeb7
--- /dev/null
+++ b/pyload/plugins/hoster/MegaNz.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+
+import re
+import random
+from array import array
+from os import remove
+from base64 import standard_b64decode
+
+from Crypto.Cipher import AES
+from Crypto.Util import Counter
+
+from module.common.json_layer import json
+from module.plugins.Hoster import Hoster
+
+#def getInfo(urls):
+# pass
+
+class MegaNz(Hoster):
+ __name__ = "MegaNz"
+ __type__ = "hoster"
+ __pattern__ = r"https?://([a-z0-9]+\.)?mega\.co\.nz/#!([a-zA-Z0-9!_\-]+)"
+ __version__ = "0.12"
+ __description__ = """mega.co.nz hoster plugin"""
+ __author_name__ = ("RaNaN", )
+ __author_mail__ = ("ranan@pyload.org", )
+
+ API_URL = "https://g.api.mega.co.nz/cs?id=%d"
+ FILE_SUFFIX = ".crypted"
+
+ def b64_decode(self, data):
+ data = data.replace("-", "+").replace("_", "/")
+ return standard_b64decode(data + '=' * (-len(data) % 4))
+
+ def getCipherKey(self, key):
+ """ Construct the cipher key from the given data """
+ a = array("I", key)
+ key_array = array("I", [a[0] ^ a[4], a[1] ^ a[5], a[2] ^ a[6], a[3] ^ a[7]])
+ return key_array
+
+ def callApi(self, **kwargs):
+ """ Dispatch a call to the api, see https://mega.co.nz/#developers """
+ # generate a session id, no idea where to obtain elsewhere
+ uid = random.randint(10 << 9, 10 ** 10)
+
+ resp = self.load(self.API_URL % uid, post=json.dumps([kwargs]))
+ self.logDebug("Api Response: " + resp)
+ return json.loads(resp)
+
+ def decryptAttr(self, data, key):
+
+ cbc = AES.new(self.getCipherKey(key), AES.MODE_CBC, "\0" * 16)
+ attr = cbc.decrypt(self.b64_decode(data))
+ self.logDebug("Decrypted Attr: " + attr)
+ if not attr.startswith("MEGA"):
+ self.fail(_("Decryption failed"))
+
+ # Data is padded, 0-bytes must be stripped
+ return json.loads(attr.replace("MEGA", "").rstrip("\0").strip())
+
+ def decryptFile(self, key):
+ """ Decrypts the file at lastDownload` """
+
+ # upper 64 bit of counter start
+ n = key[16:24]
+
+ # convert counter to long and shift bytes
+ ctr = Counter.new(128, initial_value=long(n.encode("hex"),16) << 64)
+ cipher = AES.new(self.getCipherKey(key), AES.MODE_CTR, counter=ctr)
+
+ self.pyfile.setStatus("decrypting")
+ f = open(self.lastDownload, "rb")
+ df = open(self.lastDownload.rsplit(self.FILE_SUFFIX)[0], "wb")
+
+ # TODO: calculate CBC-MAC for checksum
+
+ size = 2 ** 15 # buffer size, 32k
+ while True:
+ buf = f.read(size)
+ if not buf: break
+
+ df.write(cipher.decrypt(buf))
+
+ f.close()
+ df.close()
+ remove(self.lastDownload)
+
+ def process(self, pyfile):
+
+ key = None
+
+ # match is guaranteed because plugin was chosen to handle url
+ node = re.search(self.__pattern__, pyfile.url).group(2)
+ if "!" in node:
+ node, key = node.split("!")
+
+ self.logDebug("File id: %s | Key: %s" % (node, key))
+
+ if not key:
+ self.fail(_("No file key provided in the URL"))
+
+ # g is for requesting a download url
+ # this is similar to the calls in the mega js app, documentation is very bad
+ dl = self.callApi(a="g", g=1, p=node, ssl=1)[0]
+
+ if "e" in dl:
+ e = dl["e"]
+ # ETEMPUNAVAIL (-18): Resource temporarily not available, please try again later
+ if e == -18:
+ self.retry()
+ else:
+ self.fail(_("Error code:") + e)
+
+ # TODO: map other error codes, e.g
+ # EACCESS (-11): Access violation (e.g., trying to write to a read-only share)
+
+ key = self.b64_decode(key)
+ attr = self.decryptAttr(dl["at"], key)
+
+ pyfile.name = attr["n"] + self.FILE_SUFFIX
+
+ self.download(dl["g"])
+ self.decryptFile(key)
+
+ # Everything is finished and final name can be set
+ pyfile.name = attr["n"]
diff --git a/pyload/plugins/hoster/MegacrypterCom.py b/pyload/plugins/hoster/MegacrypterCom.py
new file mode 100644
index 000000000..9f012e5be
--- /dev/null
+++ b/pyload/plugins/hoster/MegacrypterCom.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+import re
+
+from module.common.json_layer import json
+from module.plugins.hoster.MegaNz import MegaNz
+
+
+class MegacrypterCom(MegaNz):
+ __name__ = "MegacrypterCom"
+ __type__ = "hoster"
+ __pattern__ = r"(https?://[a-z0-9]{0,10}\.?megacrypter\.com/[a-zA-Z0-9!_\-]+)"
+ __version__ = "0.1"
+ __description__ = """megacrypter plugin, based and inherits from RaNaN's MegaNz plugin"""
+ __author_name__ = ("GonzaloSR", )
+ __author_mail__ = ("gonzalo@gonzalosr.com", )
+
+ API_URL = "http://megacrypter.com/api"
+ FILE_SUFFIX = ".crypted"
+
+
+ def callApi(self, **kwargs):
+ """ Dispatch a call to the api, see megacrypter.com/api_doc """
+ self.logDebug("JSON request: " + json.dumps(kwargs))
+ resp = self.load(self.API_URL, post=json.dumps(kwargs))
+ self.logDebug("API Response: " + resp)
+ return json.loads(resp)
+
+
+ def process(self, pyfile):
+
+ key = None
+
+ # match is guaranteed because plugin was chosen to handle url
+ node = re.search(self.__pattern__, pyfile.url).group(1)
+
+
+ # get Mega.co.nz link info
+ info = self.callApi(link=node, m="info")
+
+ # get crypted file URL
+ dl = self.callApi(link=node, m="dl")
+
+
+ # TODO: map error codes, implement password protection
+ # if info["pass"] == true:
+ # crypted_file_key, md5_file_key = info["key"].split("#")
+
+
+ key = self.b64_decode(info["key"])
+
+ pyfile.name = info["name"] + self.FILE_SUFFIX
+
+ self.download(dl["url"])
+ self.decryptFile(key)
+
+ # Everything is finished and final name can be set
+ pyfile.name = info["name"]
+
+
diff --git a/pyload/plugins/hoster/MegasharesCom.py b/pyload/plugins/hoster/MegasharesCom.py
new file mode 100644
index 000000000..3fac633bc
--- /dev/null
+++ b/pyload/plugins/hoster/MegasharesCom.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from time import time
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class MegasharesCom(SimpleHoster):
+ __name__ = "MegasharesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w+\.)?megashares.com/.*"
+ __version__ = "0.21"
+ __description__ = """megashares.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = '<h1 class="black xxl"[^>]*title="(?P<N>[^"]+)">'
+ FILE_SIZE_PATTERN = '<strong><span class="black">Filesize:</span></strong> (?P<S>[0-9.]+) (?P<U>[kKMG])i?B<br />'
+ DOWNLOAD_URL_PATTERN = '<div id="show_download_button_%d"[^>]*>\s*<a href="([^"]+)">'
+ PASSPORT_LEFT_PATTERN = 'Your Download Passport is: <[^>]*>(\w+).*\s*You have\s*<[^>]*>\s*([0-9.]+) ([kKMG]i?B)'
+ PASSPORT_RENEW_PATTERN = 'Your download passport will renew in\s*<strong>(\d+)</strong>:<strong>(\d+)</strong>:<strong>(\d+)</strong>'
+ REACTIVATE_NUM_PATTERN = r'<input[^>]*id="random_num" value="(\d+)" />'
+ REACTIVATE_PASSPORT_PATTERN = r'<input[^>]*id="passport_num" value="(\w+)" />'
+ REQUEST_URI_PATTERN = r'var request_uri = "([^"]+)";'
+ NO_SLOTS_PATTERN = r'<dd class="red">All download slots for this link are currently filled'
+ FILE_OFFLINE_PATTERN = r'<dd class="red">(Invalid Link Request|Link has been deleted)'
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = True if self.premium else False
+
+ def handlePremium(self):
+ self.handleDownload(True)
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ if self.NO_SLOTS_PATTERN in self.html:
+ self.retry(wait_time = 300)
+
+ self.getFileInfo()
+ #if self.pyfile.size > 576716800: self.fail("This file is too large for free download")
+
+ # Reactivate passport if needed
+ found = re.search(self.REACTIVATE_PASSPORT_PATTERN, self.html)
+ if found:
+ passport_num = found.group(1)
+ request_uri = re.search(self.REQUEST_URI_PATTERN, self.html).group(1)
+
+ for i in range(5):
+ random_num = re.search(self.REACTIVATE_NUM_PATTERN, self.html).group(1)
+
+ verifyinput = self.decryptCaptcha("http://megashares.com/index.php?secgfx=gfx&random_num=%s" % random_num)
+ self.logInfo("Reactivating passport %s: %s %s" % (passport_num, random_num, verifyinput))
+
+ url = "http://d01.megashares.com%s&rs=check_passport_renewal" % request_uri + \
+ "&rsargs[]=%s&rsargs[]=%s&rsargs[]=%s" % (verifyinput, random_num, passport_num) + \
+ "&rsargs[]=replace_sec_pprenewal&rsrnd=%s" % str(int(time()*1000))
+ self.logDebug(url)
+ response = self.load(url)
+
+ if 'Thank you for reactivating your passport.' in response:
+ self.correctCaptcha()
+ self.retry(0)
+ else:
+ self.invalidCaptcha()
+ else: self.fail("Failed to reactivate passport")
+
+ # Check traffic left on passport
+ found = re.search(self.PASSPORT_LEFT_PATTERN, self.html)
+ if not found: self.fail('Passport not found')
+ self.logInfo("Download passport: %s" % found.group(1))
+ data_left = float(found.group(2)) * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[found.group(3)]
+ self.logInfo("Data left: %s %s (%d MB needed)" % (found.group(2), found.group(3), self.pyfile.size / 1048576))
+
+ if not data_left:
+ found = re.search(self.PASSPORT_RENEW_PATTERN, self.html)
+ renew = (found.group(1) + 60 * (found.group(2) + 60 * found.group(3))) if found else 600
+ self.retry(renew, 15, "Unable to get passport")
+
+ self.handleDownload(False)
+
+ def handleDownload(self, premium = False):
+ # Find download link;
+ found = re.search(self.DOWNLOAD_URL_PATTERN % (1 if premium else 2), self.html)
+ msg = '%s download URL' % ('Premium' if premium else 'Free')
+ if not found: self.parseError(msg)
+
+ download_url = found.group(1)
+ self.logDebug("%s: %s" % (msg, download_url))
+ self.download(download_url)
+
+getInfo = create_getInfo(MegasharesCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/MovReelCom.py b/pyload/plugins/hoster/MovReelCom.py
new file mode 100644
index 000000000..6f5f1d3f1
--- /dev/null
+++ b/pyload/plugins/hoster/MovReelCom.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.utils import html_unescape
+from module.network.RequestFactory import getURL
+
+class MovReelCom(SimpleHoster):
+ __name__ = "MovReelCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://movreel.com/.*"
+ __version__ = "1.00"
+ __description__ = """MovReel.com hoster plugin"""
+ __author_name__ = ("JorisV83")
+ __author_mail__ = ("jorisv83-pyload@yahoo.com")
+
+ FILE_INFO_PATTERN = r'You have requested <font color="red">http://movreel.com/.*/(?P<N>.+?)</font>.*\((?P<S>[\d.]+) (?P<U>..)\)</font>'
+ FILE_OFFLINE_PATTERN = r'<b>File Not Found</b>'
+
+ def setup(self):
+ self.resumeDownload = True
+ self.multiDL = False
+
+ def handleFree(self):
+
+ # Define search patterns
+ op_pattern = '<input type="hidden" name="op" value="(.*)">'
+ id_pattern = '<input type="hidden" name="id" value="(.*)">'
+ fn_pattern = '<input type="hidden" name="fname" value="(.*)">'
+ re_pattern = '<input type="hidden" name="referer" value="(.*)">'
+ ul_pattern = '<input type="hidden" name="usr_login" value="(.*)">'
+ rand_pattern = '<input type="hidden" name="rand" value="(.*)">'
+ link_pattern = "var file_link = '(.*)';"
+ downlimit_pattern = '<br><p class="err">You have reached the download-limit: .*</p>'
+
+ # Get HTML source
+ self.logDebug("Getting first HTML source")
+ html = self.load(self.pyfile.url)
+ self.logDebug(" > Done")
+
+ op_val = re.search(op_pattern, html).group(1)
+ id_val = re.search(id_pattern, html).group(1)
+ fn_val = re.search(fn_pattern, html).group(1)
+ re_val = re.search(re_pattern, html).group(1)
+ ul_val = re.search(ul_pattern, html).group(1)
+
+ # Debug values
+ self.logDebug(" > Op " + op_val)
+ self.logDebug(" > Id " + id_val)
+ self.logDebug(" > Fname " + fn_val)
+ self.logDebug(" > Referer " + re_val)
+ self.logDebug(" > User Login " + ul_val)
+
+ # Create post data
+ post_data = {"op" : op_val, "usr_login" : ul_val, "id" : id_val, "fname" : fn_val, "referer" : re_val, "method_free" : "+Free+Download"}
+
+ # Post and get new HTML source
+ self.logDebug("Getting second HTML source")
+ html = self.load(self.pyfile.url, post = post_data, decode=True)
+ self.logDebug(" > Done")
+
+ # Check download limit
+ if re.search(downlimit_pattern, html) is not None:
+ self.retry(3, 7200, "Download limit reached, wait 2h")
+
+ # Retrieve data
+ if re.search(op_pattern, html) is not None:
+ op_val = re.search(op_pattern, html).group(1)
+ else:
+ self.retry(3, 10, "Second html: no op found!!")
+
+ if re.search(id_pattern, html) is not None:
+ id_val = re.search(id_pattern, html).group(1)
+ else:
+ self.retry(3, 10, "Second html: no id found!!")
+
+ if re.search(rand_pattern, html) is not None:
+ rand_val = re.search(rand_pattern, html).group(1)
+ else:
+ self.retry(3, 10, "Second html: no rand found!!")
+
+ re_val = self.pyfile.url
+
+ # Debug values
+ self.logDebug(" > Op " + op_val)
+ self.logDebug(" > Id " + id_val)
+ self.logDebug(" > Rand " + rand_val)
+ self.logDebug(" > Referer " + re_val)
+
+ # Create post data
+ post_data = {"op" : op_val, "id" : id_val, "rand" : rand_val, "referer" : re_val, "method_free" : "+Free+Download", "method_premium" : "", "down_direct" : "1"}
+
+ # Post and get new HTML source
+ self.logDebug("Getting third HTML source")
+ html = self.load(self.pyfile.url, post = post_data, decode=True)
+ self.logDebug(" > Done")
+
+ # Get link value
+ if re.search(link_pattern, html) is not None:
+ link_val = re.search(link_pattern, html).group(1)
+ self.logDebug(" > Link " + link_val)
+ self.download(link_val)
+ else:
+ self.logDebug("No link found!!")
+ self.retry(3, 10, "No link found!!")
+
+getInfo = create_getInfo(MovReelCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/MultiDebridCom.py b/pyload/plugins/hoster/MultiDebridCom.py
new file mode 100644
index 000000000..7280504cb
--- /dev/null
+++ b/pyload/plugins/hoster/MultiDebridCom.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+############################################################################
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU Affero General Public License as #
+# published by the Free Software Foundation, either version 3 of the #
+# License, or (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU Affero General Public License for more details. #
+# #
+# You should have received a copy of the GNU Affero General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+############################################################################
+
+import re
+
+from module.plugins.Hoster import Hoster
+from module.common.json_layer import json_loads
+
+
+class MultiDebridCom(Hoster):
+ __name__ = "MultiDebridCom"
+ __version__ = "0.02"
+ __type__ = "hoster"
+ __pattern__ = r"http://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/dl/"
+ __description__ = """Multi-debrid.com hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ def init(self):
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Multi-debrid.com")
+ self.fail("No Multi-debrid.com account provided")
+
+ self.logDebug("Original URL: %s" % pyfile.url)
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ else:
+ page = self.req.load('http://multi-debrid.com/api.php',
+ get={'user': self.user, 'pass': self.account.getAccountData(self.user)['password'],
+ 'link': pyfile.url})
+ self.logDebug("JSON data: " + page)
+ page = json_loads(page)
+ if page['status'] != 'ok':
+ self.fail('Unable to unrestrict link')
+ new_url = page['link']
+
+ self.logDebug("Unrestricted URL: " + new_url)
+
+ self.download(new_url, disposition=True)
diff --git a/pyload/plugins/hoster/MultishareCz.py b/pyload/plugins/hoster/MultishareCz.py
new file mode 100644
index 000000000..af7aa94cf
--- /dev/null
+++ b/pyload/plugins/hoster/MultishareCz.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from random import random
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class MultishareCz(SimpleHoster):
+ __name__ = "MultishareCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?multishare.cz/stahnout/(?P<ID>\d+).*"
+ __version__ = "0.40"
+ __description__ = """MultiShare.cz"""
+ __author_name__ = ("zoidberg")
+
+ FILE_INFO_PATTERN = ur'(?:<li>Název|Soubor): <strong>(?P<N>[^<]+)</strong><(?:/li><li|br)>Velikost: <strong>(?P<S>[^<]+)</strong>'
+ FILE_OFFLINE_PATTERN = ur'<h1>Stáhnout soubor</h1><p><strong>PoşadovanÜ soubor neexistuje.</strong></p>'
+ FILE_SIZE_REPLACEMENTS = [('&nbsp;', '')]
+
+ def process(self, pyfile):
+ msurl = re.match(self.__pattern__, pyfile.url)
+ if msurl:
+ self.fileID = msurl.group('ID')
+ self.html = self.load(pyfile.url, decode = True)
+ self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+ else:
+ self.handleOverriden()
+
+ def handleFree(self):
+ self.download("http://www.multishare.cz/html/download_free.php?ID=%s" % self.fileID)
+
+ def handlePremium(self):
+ if not self.checkTrafficLeft():
+ self.logWarning("Not enough credit left to download file")
+ self.resetAccount()
+
+ self.download("http://www.multishare.cz/html/download_premium.php?ID=%s" % self.fileID)
+ self.checkTrafficLeft()
+
+ def handleOverriden(self):
+ if not self.premium:
+ self.fail("Only premium users can download from other hosters")
+
+ self.html = self.load('http://www.multishare.cz/html/mms_ajax.php', post = {"link": self.pyfile.url}, decode = True)
+ self.getFileInfo()
+
+ if not self.checkTrafficLeft():
+ self.fail("Not enough credit left to download file")
+
+ url = "http://dl%d.mms.multishare.cz/html/mms_process.php" % round(random()*10000*random())
+ params = {"u_ID" : self.acc_info["u_ID"], "u_hash" : self.acc_info["u_hash"], "link" : self.pyfile.url}
+ self.logDebug(url, params)
+ self.download(url, get = params)
+ self.checkTrafficLeft()
+
+getInfo = create_getInfo(MultishareCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/MyvideoDe.py b/pyload/plugins/hoster/MyvideoDe.py
new file mode 100644
index 000000000..f2d2082a7
--- /dev/null
+++ b/pyload/plugins/hoster/MyvideoDe.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+from module.unescape import unescape
+
+class MyvideoDe(Hoster):
+ __name__ = "MyvideoDe"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?myvideo.de/watch/"
+ __version__ = "0.9"
+ __description__ = """Myvideo.de Video Download Hoster"""
+ __author_name__ = ("spoob")
+ __author_mail__ = ("spoob@pyload.org")
+
+ def setup(self):
+ self.html = None
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.download_html()
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ self.html = self.load(self.pyfile.url)
+
+ def get_file_url(self):
+ videoId = re.search(r"addVariable\('_videoid','(.*)'\);p.addParam\('quality'", self.html).group(1)
+ videoServer = re.search("rel='image_src' href='(.*)thumbs/.*' />", self.html).group(1)
+ file_url = videoServer + videoId + ".flv"
+ return file_url
+
+ def get_file_name(self):
+ file_name_pattern = r"<h1 class='globalHd'>(.*)</h1>"
+ return unescape(re.search(file_name_pattern, self.html).group(1).replace("/", "") + '.flv')
+
+ def file_exists(self):
+ self.download_html()
+ self.load(str(self.pyfile.url), cookies=False, just_header=True)
+ if self.req.lastEffectiveURL == "http://www.myvideo.de/":
+ return False
+ return True
diff --git a/pyload/plugins/hoster/NarodRu.py b/pyload/plugins/hoster/NarodRu.py
new file mode 100644
index 000000000..335860de9
--- /dev/null
+++ b/pyload/plugins/hoster/NarodRu.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from random import random
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class NarodRu(SimpleHoster):
+ __name__ = "NarodRu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?narod(\.yandex)?\.ru/(disk|start/[0-9]+\.\w+-narod\.yandex\.ru)/(?P<ID>\d+)/.+"
+ __version__ = "0.1"
+ __description__ = """Narod.ru"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<dt class="name">(?:<[^<]*>)*(?P<N>[^<]+)</dt>'
+ FILE_SIZE_PATTERN = r'<dd class="size">(?P<S>\d[^<]*)</dd>'
+ FILE_OFFLINE_PATTERN = r'<title>404</title>|Ѐайл уЎалеМ с сервОса|ЗакПМчОлся срПк храМеМОя файла\.'
+
+ FILE_SIZE_REPLACEMENTS = [(u'КБ', 'KB'), (u'МБ', 'MB'), (u'ГБ', 'GB')]
+ FILE_URL_REPLACEMENTS = [("narod.yandex.ru/", "narod.ru/"), (r"/start/[0-9]+\.\w+-narod\.yandex\.ru/([0-9]{6,15})/\w+/(\w+)", r"/disk/\1/\2")]
+
+ CAPTCHA_PATTERN = r'<number url="(.*?)">(\w+)</number>'
+ DOWNLOAD_LINK_PATTERN = r'<a class="h-link" rel="yandex_bar" href="(.+?)">'
+
+ def handleFree(self):
+ for i in range(5):
+ self.html = self.load('http://narod.ru/disk/getcapchaxml/?rnd=%d' % int(random() * 777))
+ found = re.search(self.CAPTCHA_PATTERN, self.html)
+ if not found: self.parseError('Captcha')
+ post_data = {"action": "sendcapcha"}
+ captcha_url, post_data['key'] = found.groups()
+ post_data['rep'] = self.decryptCaptcha(captcha_url)
+
+ self.html = self.load(self.pyfile.url, post = post_data, decode = True)
+ found = re.search(self.DOWNLOAD_LINK_PATTERN, self.html)
+ if found:
+ url = 'http://narod.ru' + found.group(1)
+ self.correctCaptcha()
+ break
+ elif u'<b class="error-msg"><strong>ОшОблОсь?</strong>' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.parseError('Download link')
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.logDebug('Download link: ' + url)
+ self.download(url)
+
+getInfo = create_getInfo(NarodRu) \ No newline at end of file
diff --git a/pyload/plugins/hoster/NetloadIn.py b/pyload/plugins/hoster/NetloadIn.py
new file mode 100644
index 000000000..c59080158
--- /dev/null
+++ b/pyload/plugins/hoster/NetloadIn.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from time import sleep, time
+
+
+from module.utils import chunks
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+
+def getInfo(urls):
+ ## returns list of tuples (name, size (in bytes), status (see FileDatabase), url)
+
+
+ apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id="
+ id_regex = re.compile(NetloadIn.__pattern__)
+ urls_per_query = 80
+
+ for chunk in chunks(urls, urls_per_query):
+ ids = ""
+ for url in chunk:
+ match = id_regex.search(url)
+ if match:
+ ids = ids + match.group(1) +";"
+
+ api = getURL(apiurl+ids, decode = True)
+
+ if api is None or len(api) < 10:
+ print "Netload prefetch: failed "
+ return
+ if api.find("unknown_auth") >= 0:
+ print "Netload prefetch: Outdated auth code "
+ return
+
+ result = []
+
+ for i, r in enumerate(api.splitlines()):
+ try:
+ tmp = r.split(";")
+ try:
+ size = int(tmp[2])
+ except:
+ size = 0
+ result.append( (tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i] ) )
+ except:
+ print "Netload prefetch: Error while processing response: "
+ print r
+
+ yield result
+
+class NetloadIn(Hoster):
+ __name__ = "NetloadIn"
+ __type__ = "hoster"
+ __pattern__ = r"https?://.*netload\.in/(?:datei(.*?)(?:\.htm|/)|index.php?id=10&file_id=)"
+ __version__ = "0.43"
+ __description__ = """Netload.in Download Hoster"""
+ __author_name__ = ("spoob", "RaNaN", "Gregy")
+ __author_mail__ = ("spoob@pyload.org", "ranan@pyload.org", "gregy@gregy.cz")
+
+ def setup(self):
+ self.multiDL = False
+ if self.premium:
+ self.multiDL = True
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ self.url = pyfile.url
+ self.prepare()
+ self.pyfile.setStatus("downloading")
+ self.proceed(self.url)
+
+ def prepare(self):
+ self.download_api_data()
+
+ if self.api_data and self.api_data["filename"]:
+ self.pyfile.name = self.api_data["filename"]
+
+ if self.premium:
+ self.logDebug("Netload: Use Premium Account")
+ return True
+
+ if self.download_html():
+ return True
+ else:
+ self.fail("Failed")
+ return False
+
+ def download_api_data(self, n=0):
+ url = self.url
+ id_regex = re.compile(self.__pattern__)
+ match = id_regex.search(url)
+
+ if match:
+ #normalize url
+ self.url = 'http://www.netload.in/datei%s.htm' % match.group(1)
+ self.logDebug("URL: %s" % self.url)
+ else:
+ self.api_data = False
+ return
+
+ apiurl = "http://api.netload.in/info.php"
+ src = self.load(apiurl, cookies=False, get={"file_id": match.group(1), "auth": "Zf9SnQh9WiReEsb18akjvQGqT0I830e8", "bz": "1", "md5": "1"}, decode = True).strip()
+ if not src and n <= 3:
+ sleep(0.2)
+ self.download_api_data(n+1)
+ return
+
+ self.logDebug("Netload: APIDATA: "+src)
+ self.api_data = {}
+ if src and ";" in src and src not in ("unknown file_data", "unknown_server_data", "No input file specified."):
+ lines = src.split(";")
+ self.api_data["exists"] = True
+ self.api_data["fileid"] = lines[0]
+ self.api_data["filename"] = lines[1]
+ self.api_data["size"] = lines[2]
+ self.api_data["status"] = lines[3]
+ if self.api_data["status"] == "online":
+ self.api_data["checksum"] = lines[4].strip()
+ else:
+ self.api_data = False #check manually since api data is useless sometimes
+
+ if lines[0] == lines[1] and lines[2] == "0": #useless api data
+ self.api_data = False
+ else:
+ self.api_data = False
+
+ def final_wait(self, page):
+ wait_time = self.get_wait_time(page)
+ self.setWait(wait_time)
+ self.logDebug("Netload: final wait %d seconds" % wait_time)
+ self.wait()
+ self.url = self.get_file_url(page)
+
+ def download_html(self):
+ self.logDebug("Netload: Entering download_html")
+ page = self.load(self.url, decode=True)
+ t = time() + 30
+
+ if "/share/templates/download_hddcrash.tpl" in page:
+ self.logError("Netload HDD Crash")
+ self.fail(_("File temporarily not available"))
+
+ if not self.api_data:
+ self.logDebug("API Data may be useless, get details from html page")
+
+ if "* The file was deleted" in page:
+ self.offline()
+
+ name = re.search(r'class="dl_first_filename">([^<]+)', page, re.MULTILINE)
+ # the found filename is not truncated
+ if name:
+ name = name.group(1).strip()
+ if not name.endswith(".."):
+ self.pyfile.name = name
+
+ captchawaited = False
+ for i in range(10):
+
+ if not page:
+ page = self.load(self.url)
+ t = time() + 30
+
+ if "/share/templates/download_hddcrash.tpl" in page:
+ self.logError("Netload HDD Crash")
+ self.fail(_("File temporarily not available"))
+
+ self.logDebug("Netload: try number %d " % i)
+
+ if ">Your download is being prepared.<" in page:
+ self.logDebug("Netload: We will prepare your download")
+ self.final_wait(page)
+ return True
+ if ">An access request has been made from IP address <" in page:
+ wait = self.get_wait_time(page)
+ if wait == 0:
+ self.logDebug("Netload: Wait was 0 setting 30")
+ wait = 30
+ self.logInfo(_("Netload: waiting between downloads %d s." % wait))
+ self.wantReconnect = True
+ self.setWait(wait)
+ self.wait()
+
+ return self.download_html()
+
+
+ self.logDebug("Netload: Trying to find captcha")
+
+ try:
+ url_captcha_html = "http://netload.in/" + re.search('(index.php\?id=10&amp;.*&amp;captcha=1)', page).group(1).replace("amp;", "")
+ except:
+ page = None
+ continue
+
+ try:
+ page = self.load(url_captcha_html, cookies=True)
+ captcha_url = "http://netload.in/" + re.search('(share/includes/captcha.php\?t=\d*)', page).group(1)
+ except:
+ self.logDebug("Netload: Could not find captcha, try again from beginning")
+ captchawaited = False
+ continue
+
+ file_id = re.search('<input name="file_id" type="hidden" value="(.*)" />', page).group(1)
+ if not captchawaited:
+ wait = self.get_wait_time(page)
+ if i == 0: self.pyfile.waitUntil = time() # don't wait contrary to time on web site
+ else: self.pyfile.waitUntil = t
+ self.logInfo(_("Netload: waiting for captcha %d s.") % (self.pyfile.waitUntil - time()))
+ #self.setWait(wait)
+ self.wait()
+ captchawaited = True
+
+ captcha = self.decryptCaptcha(captcha_url)
+ page = self.load("http://netload.in/index.php?id=10", post={"file_id": file_id, "captcha_check": captcha}, cookies=True)
+
+ return False
+
+
+ def get_file_url(self, page):
+ try:
+ file_url_pattern = r"<a class=\"Orange_Link\" href=\"(http://.+)\".?>Or click here"
+ attempt = re.search(file_url_pattern, page)
+ if attempt is not None:
+ return attempt.group(1)
+ else:
+ self.logDebug("Netload: Backup try for final link")
+ file_url_pattern = r"<a href=\"(.+)\" class=\"Orange_Link\">Click here"
+ attempt = re.search(file_url_pattern, page)
+ return "http://netload.in/"+attempt.group(1)
+ except:
+ self.logDebug("Netload: Getting final link failed")
+ return None
+
+ def get_wait_time(self, page):
+ wait_seconds = int(re.search(r"countdown\((.+),'change\(\)'\)", page).group(1)) / 100
+ return wait_seconds
+
+
+ def proceed(self, url):
+ self.logDebug("Netload: Downloading..")
+
+ self.download(url, disposition=True)
+
+ check = self.checkDownload({"empty": re.compile(r"^$"), "offline": re.compile("The file was deleted")})
+
+ if check == "empty":
+ self.logInfo(_("Downloaded File was empty"))
+ self.retry()
+ elif check == "offline":
+ self.offline()
+
diff --git a/pyload/plugins/hoster/NovafileCom.py b/pyload/plugins/hoster/NovafileCom.py
new file mode 100644
index 000000000..dfd18761c
--- /dev/null
+++ b/pyload/plugins/hoster/NovafileCom.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class NovafileCom(XFileSharingPro):
+ __name__ = "NovafileCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*novafile\.com/\w{12}"
+ __version__ = "0.01"
+ __description__ = """novafile.com hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_SIZE_PATTERN = r'<div class="size">(?P<S>.+?)</div>'
+ #FILE_OFFLINE_PATTERN = '<b>&quot;File Not Found&quot;</b>|File has been removed due to Copyright Claim'
+ FORM_PATTERN = r'name="F\d+"'
+ ERROR_PATTERN = r'class="alert[^"]*alert-separate"[^>]*>\s*(?:<p>)?(.*?)\s*</'
+ DIRECT_LINK_PATTERN = r'<a href="(http://s\d+\.novafile\.com/.*?)" class="btn btn-green">Download File</a>'
+
+ HOSTER_NAME = "novafile.com"
+
+ def setup(self):
+ self.multiDL = False
+
+getInfo = create_getInfo(NovafileCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/NowDownloadEu.py b/pyload/plugins/hoster/NowDownloadEu.py
new file mode 100644
index 000000000..126ca3d89
--- /dev/null
+++ b/pyload/plugins/hoster/NowDownloadEu.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from random import random
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.utils import fixup
+
+class NowDownloadEu(SimpleHoster):
+ __name__ = "NowDownloadEu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?nowdownload\.(eu|co)/dl/(?P<ID>[a-z0-9]+)"
+ __version__ = "0.02"
+ __description__ = """NowDownloadEu"""
+ __author_name__ = ("godofdream")
+ FILE_INFO_PATTERN = r'Downloading</span> <br> (?P<N>.*) (?P<S>[0-9,.]+) (?P<U>[kKMG])i?B </h4>'
+ FILE_OFFLINE_PATTERN = r'(This file does not exist!)'
+ FILE_TOKEN_PATTERN = r'"(/api/token\.php\?token=[a-z0-9]+)"'
+ FILE_CONTINUE_PATTERN = r'"(/dl2/[a-z0-9]+/[a-z0-9]+)"'
+ FILE_WAIT_PATTERN = r'\.countdown\(\{until: \+(\d+),'
+ FILE_DOWNLOAD_LINK = r'"(http://f\d+\.nowdownload\.eu/dl/[a-z0-9]+/[a-z0-9]+/[^<>"]*?)"'
+
+ FILE_NAME_REPLACEMENTS = [("&#?\w+;", fixup), (r'<[^>]*>', '')]
+
+ def setup(self):
+ self.wantReconnect = False
+ self.multiDL = True
+ self.chunkLimit = -1
+ self.resumeDownload = True
+
+ def handleFree(self):
+ tokenlink = re.search(self.FILE_TOKEN_PATTERN, self.html)
+ continuelink = re.search(self.FILE_CONTINUE_PATTERN, self.html)
+ if (not tokenlink) or (not continuelink): self.fail('Plugin out of Date')
+
+ wait = 60
+ found = re.search(self.FILE_WAIT_PATTERN, self.html)
+ if found: wait = int(found.group(1))
+
+ self.html = self.load("http://www.nowdownload.eu" + str(tokenlink.group(1)))
+ self.setWait(wait)
+ self.wait()
+
+ self.html = self.load("http://www.nowdownload.eu" + str(continuelink.group(1)))
+
+ url = re.search(self.FILE_DOWNLOAD_LINK, self.html)
+ if not url: self.fail('Download Link not Found (Plugin out of Date?)')
+ self.logDebug('Download link: ' + str(url.group(1)))
+ self.download(str(url.group(1)))
+
+getInfo = create_getInfo(NowDownloadEu)
diff --git a/pyload/plugins/hoster/OneFichierCom.py b/pyload/plugins/hoster/OneFichierCom.py
new file mode 100644
index 000000000..c7c3384e9
--- /dev/null
+++ b/pyload/plugins/hoster/OneFichierCom.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class OneFichierCom(SimpleHoster):
+ __name__ = "OneFichierCom"
+ __type__ = "hoster"
+ __pattern__ = r"(http://(\w+)\.((1fichier|d(es)?fichiers|pjointe)\.(com|fr|net|org)|(cjoint|mesfichiers|piecejointe|oi)\.(org|net)|tenvoi\.(com|org|net)|dl4free\.com|alterupload\.com|megadl.fr))"
+ __version__ = "0.47"
+ __description__ = """1fichier.com download hoster"""
+ __author_name__ = ("fragonib", "the-razer", "zoidberg","imclem")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es", "daniel_ AT gmx DOT net", "zoidberg@mujmail.cz","imclem on github")
+
+ FILE_NAME_PATTERN = r'">File name :</th>\s*<td>(?P<N>[^<]+)</td>'
+ FILE_SIZE_PATTERN = r'<th>File size :</th>\s*<td>(?P<S>[^<]+)</td>'
+ FILE_OFFLINE_PATTERN = r'The (requested)? file (could not be found|has been deleted)'
+ FILE_URL_REPLACEMENTS = [(r'(http://[^/]*).*', r'\1/en/')]
+
+ DOWNLOAD_LINK_PATTERN = r'<br/>&nbsp;<br/>&nbsp;<br/>&nbsp;\s+<a href="(?P<url>http://.*?)"'
+ PASSWORD_PROTECTED_TOKEN = "protected by password"
+ WAITING_PATTERN = "Warning ! Without premium status, you can download only one file at a time and you must wait up to (\d+) minutes between each downloads."
+ def process(self, pyfile):
+ found = re.search(self.__pattern__, pyfile.url)
+ file_id = found.group(2)
+ url = "http://%s.%s/en/" % (found.group(2), found.group(3))
+ self.html = self.load(url, decode = True)
+
+ found = re.search(self.WAITING_PATTERN, self.html)
+ if found:
+ self.waitAndRetry(int(found.group(1)) * 60)
+
+ self.getFileInfo()
+
+ url, inputs = self.parseHtmlForm('action="http://%s' % file_id)
+ if not url or not inputs:
+ self.parseError("Download link not found")
+
+ # Check for protection
+ if "pass" in inputs:
+ inputs['pass'] = self.getPassword()
+
+ self.download(url, post = inputs)
+
+ # Check download
+ self.checkDownloadedFile()
+
+ def checkDownloadedFile(self):
+ check = self.checkDownload({"wait": self.WAITING_PATTERN})
+ if check == "wait":
+ self.waitAndRetry(int(self.lastcheck.group(1)) * 60)
+
+ def waitAndRetry(self, wait_time):
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry()
+
+getInfo = create_getInfo(OneFichierCom)
diff --git a/pyload/plugins/hoster/PornhostCom.py b/pyload/plugins/hoster/PornhostCom.py
new file mode 100644
index 000000000..ef7961d81
--- /dev/null
+++ b/pyload/plugins/hoster/PornhostCom.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+
+class PornhostCom(Hoster):
+ __name__ = "PornhostCom"
+ __type__ = "hoster"
+ __pattern__ = r'http://[\w\.]*?pornhost\.com/([0-9]+/[0-9]+\.html|[0-9]+)'
+ __version__ = "0.2"
+ __description__ = """Pornhost.com Download Hoster"""
+ __author_name__ = ("jeix")
+ __author_mail__ = ("jeix@hasnomail.de")
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+
+ ### old interface
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+
+ file_url = re.search(r'download this file</label>.*?<a href="(.*?)"', self.html)
+ if not file_url:
+ file_url = re.search(r'"(http://dl[0-9]+\.pornhost\.com/files/.*?/.*?/.*?/.*?/.*?/.*?\..*?)"', self.html)
+ if not file_url:
+ file_url = re.search(r'width: 894px; height: 675px">.*?<img src="(.*?)"', self.html)
+ if not file_url:
+ file_url = re.search(r'"http://file[0-9]+\.pornhost\.com/[0-9]+/.*?"', self.html) # TODO: fix this one since it doesn't match
+
+ file_url = file_url.group(1).strip()
+
+ return file_url
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ name = re.search(r'<title>pornhost\.com - free file hosting with a twist - gallery(.*?)</title>', self.html)
+ if not name:
+ name = re.search(r'id="url" value="http://www\.pornhost\.com/(.*?)/"', self.html)
+ if not name:
+ name = re.search(r'<title>pornhost\.com - free file hosting with a twist -(.*?)</title>', self.html)
+ if not name:
+ name = re.search(r'"http://file[0-9]+\.pornhost\.com/.*?/(.*?)"', self.html)
+
+ name = name.group(1).strip() + ".flv"
+
+ return name
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+
+ if re.search(r'gallery not found', self.html) is not None \
+ or re.search(r'You will be redirected to', self.html) is not None:
+ return False
+ else:
+ return True
+
+
diff --git a/pyload/plugins/hoster/PornhubCom.py b/pyload/plugins/hoster/PornhubCom.py
new file mode 100644
index 000000000..c431004d8
--- /dev/null
+++ b/pyload/plugins/hoster/PornhubCom.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+
+class PornhubCom(Hoster):
+ __name__ = "PornhubCom"
+ __type__ = "hoster"
+ __pattern__ = r'http://[\w\.]*?pornhub\.com/view_video\.php\?viewkey=[\w\d]+'
+ __version__ = "0.5"
+ __description__ = """Pornhub.com Download Hoster"""
+ __author_name__ = ("jeix")
+ __author_mail__ = ("jeix@hasnomail.de")
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+
+ url = "http://www.pornhub.com//gateway.php"
+ video_id = self.pyfile.url.split('=')[-1]
+ # thanks to jD team for this one v
+ post_data = "\x00\x03\x00\x00\x00\x01\x00\x0c\x70\x6c\x61\x79\x65\x72\x43\x6f\x6e\x66\x69\x67\x00\x02\x2f\x31\x00\x00\x00\x44\x0a\x00\x00\x00\x03\x02\x00"
+ post_data += chr(len(video_id))
+ post_data += video_id
+ post_data += "\x02\x00\x02\x2d\x31\x02\x00\x20"
+ post_data += "add299463d4410c6d1b1c418868225f7"
+
+ content = self.req.load(url, post=str(post_data))
+
+ new_content = ""
+ for x in content:
+ if ord(x) < 32 or ord(x) > 176:
+ new_content += '#'
+ else:
+ new_content += x
+
+ content = new_content
+
+ file_url = re.search(r'flv_url.*(http.*?)##post_roll', content).group(1)
+
+ return file_url
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ match = re.search(r'<title[^>]+>([^<]+) - ', self.html)
+ if match:
+ name = match.group(1)
+ else:
+ matches = re.findall('<h1>(.*?)</h1>', self.html)
+ if len(matches) > 1:
+ name = matches[1]
+ else:
+ name = matches[0]
+
+ return name + '.flv'
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+
+ if re.search(r'This video is no longer in our database or is in conversion', self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/Premium4Me.py b/pyload/plugins/hoster/Premium4Me.py
new file mode 100644
index 000000000..0bdb22bd7
--- /dev/null
+++ b/pyload/plugins/hoster/Premium4Me.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from urllib import quote
+from os.path import exists
+from os import remove
+
+from pyload.plugins.Hoster import Hoster
+from pyload.utils.fs import fs_encode
+
+class Premium4Me(Hoster):
+ __name__ = "Premium4Me"
+ __version__ = "0.08"
+ __type__ = "hoster"
+
+ __pattern__ = r"http://premium.to/.*"
+ __description__ = """Premium.to hoster plugin"""
+ __author_name__ = ("RaNaN", "zoidberg", "stickell")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ def setup(self):
+ self.resumeDownload = True
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "premium.to")
+ self.fail("No premium.to account provided")
+
+ self.logDebug("premium.to: Old URL: %s" % pyfile.url)
+
+ tra = self.getTraffic()
+
+ #raise timeout to 2min
+ self.req.setOption("timeout", 120)
+
+ self.download(
+ "http://premium.to/api/getfile.php?authcode=%s&link=%s" % (self.account.authcode, quote(pyfile.url, "")),
+ disposition=True)
+
+ check = self.checkDownload({"nopremium": "No premium account available"})
+
+ if check == "nopremium":
+ self.retry(60, 300, 'No premium account available')
+
+ err = ''
+ if self.req.http.code == '420':
+ # Custom error code send - fail
+ lastDownload = fs_encode(self.lastDownload)
+
+ if exists(lastDownload):
+ f = open(lastDownload, "rb")
+ err = f.read(256).strip()
+ f.close()
+ remove(lastDownload)
+ else:
+ err = 'File does not exist'
+
+ trb = self.getTraffic()
+ self.logInfo("Filesize: %d, Traffic used %d, traffic left %d" % (pyfile.size, tra - trb, trb))
+
+ if err: self.fail(err)
+
+ def getTraffic(self):
+ try:
+ traffic = int(self.load("http://premium.to/api/traffic.php?authcode=%s" % self.account.authcode))
+ except:
+ traffic = 0
+ return traffic
diff --git a/pyload/plugins/hoster/PremiumizeMe.py b/pyload/plugins/hoster/PremiumizeMe.py
new file mode 100644
index 000000000..7f332e58b
--- /dev/null
+++ b/pyload/plugins/hoster/PremiumizeMe.py
@@ -0,0 +1,47 @@
+from pyload.plugins.Hoster import Hoster
+from pyload.utils import json_loads
+
+class PremiumizeMe(Hoster):
+ __name__ = "PremiumizeMe"
+ __version__ = "0.12"
+ __type__ = "hoster"
+ __description__ = """Premiumize.Me hoster plugin"""
+
+ # Since we want to allow the user to specify the list of hoster to use we let MultiHoster.coreReady create the regex patterns for us using getHosters in our PremiumizeMe hook.
+ __pattern__ = None
+
+ __author_name__ = ("Florian Franzen")
+ __author_mail__ = ("FlorianFranzen@gmail.com")
+
+ def process(self, pyfile):
+ # Check account
+ if not self.account or not self.account.isUsable():
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "premiumize.me")
+ self.fail("No valid premiumize.me account provided")
+
+ # In some cases hostsers do not supply us with a filename at download, so we are going to set a fall back filename (e.g. for freakshare or xfileshare)
+ self.pyfile.name = self.pyfile.name.split('/').pop() # Remove everthing before last slash
+
+ # Correction for automatic assigned filename: Removing html at end if needed
+ suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"]
+ temp = self.pyfile.name.split('.')
+ if temp.pop() in suffix_to_remove:
+ self.pyfile.name = ".".join(temp)
+
+
+ # Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)
+ answer = self.load("https://api.premiumize.me/pm-api/v1.php?method=directdownloadlink&params[login]=%s&params[pass]=%s&params[link]=%s" % (self.account.loginname, self.account.password, self.pyfile.url))
+ data = json_loads(answer)
+
+ # Check status and decide what to do
+ status = data['status']
+ if status == 200:
+ self.download(data['result']['location'], disposition=True)
+ elif status == 400:
+ self.fail("Invalid link")
+ elif status == 404:
+ self.offline()
+ elif status >= 500:
+ self.tempOffline()
+ else:
+ self.fail(data['statusmessage'])
diff --git a/pyload/plugins/hoster/PutlockerCom.py b/pyload/plugins/hoster/PutlockerCom.py
new file mode 100644
index 000000000..b2016472d
--- /dev/null
+++ b/pyload/plugins/hoster/PutlockerCom.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: jeix
+"""
+
+# http://www.putlocker.com/file/83C174C844583CF7
+
+import re
+
+from module.plugins.internal.SimpleHoster import SimpleHoster
+
+
+class PutlockerCom(SimpleHoster):
+ __name__ = "PutlockerCom"
+ __type__ = "hoster"
+ __pattern__ = r'http://(www\.)?putlocker\.com/(file|embed)/[A-Z0-9]+'
+ __version__ = "0.25"
+ __description__ = """Putlocker.Com"""
+ __author_name__ = ("jeix", "stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_OFFLINE_PATTERN = r"This file doesn't exist, or has been removed."
+ FILE_INFO_PATTERN = r'site-content">\s*<h1>(?P<N>.+)<strong>\( (?P<S>[^)]+) \)</strong></h1>'
+
+ def handleFree(self):
+ self.pyfile.url = re.sub(r'http://putlocker\.com', r'http://www.putlocker.com', self.pyfile.url)
+
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ link = self._getLink()
+ if not link.startswith('http://'):
+ link = "http://www.putlocker.com" + link
+ self.download(link, disposition=True)
+
+ def _getLink(self):
+ hash_data = re.search(r'<input type="hidden" value="([a-z0-9]+)" name="hash">', self.html)
+ if not hash_data:
+ self.parseError('Unable to detect hash')
+
+ post_data = {"hash": hash_data.group(1), "confirm": "Continue+as+Free+User"}
+ self.html = self.load(self.pyfile.url, post=post_data)
+ if ">You have exceeded the daily stream limit for your country\\. You can wait until tomorrow" in self.html or \
+ "(>This content server has been temporarily disabled for upgrades|Try again soon\\. You can still download it below\\.<)" in self.html:
+ self.retry(wait_time=2 * 60 * 60, reason="Download limit exceeded or server disabled")
+
+ patterns = (r'(/get_file\.php\?id=[A-Z0-9]+&key=[A-Za-z0-9=]+&original=1)',
+ r"(/get_file\.php\?download=[A-Z0-9]+&key=[a-z0-9]+)",
+ r"(/get_file\.php\?download=[A-Z0-9]+&key=[a-z0-9]+&original=1)",
+ r'<a href="/gopro\.php">Tired of ads and waiting\? Go Pro!</a>[\t\n\rn ]+</div>[\t\n\rn ]+<a href="(/.*?)"')
+ for pattern in patterns:
+ link = re.search(pattern, self.html)
+ if link:
+ break
+ else:
+ link = re.search(r"playlist: '(/get_file\.php\?stream=[A-Za-z0-9=]+)'", self.html)
+ if link:
+ self.html = self.load("http://www.putlocker.com" + link.group(1))
+ link = re.search(r'media:content url="(http://.*?)"', self.html)
+ if not link:
+ link = re.search("\"(http://media\\-b\\d+\\.putlocker\\.com/download/\\d+/.*?)\"", self.html)
+ else:
+ self.parseError('Unable to detect a download link')
+
+ return link.group(1).replace("&amp;", "&")
diff --git a/pyload/plugins/hoster/QuickshareCz.py b/pyload/plugins/hoster/QuickshareCz.py
new file mode 100644
index 000000000..4932c4702
--- /dev/null
+++ b/pyload/plugins/hoster/QuickshareCz.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pycurl import FOLLOWLOCATION
+
+class QuickshareCz(SimpleHoster):
+ __name__ = "QuickshareCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://.*quickshare.cz/stahnout-soubor/.*"
+ __version__ = "0.54"
+ __description__ = """Quickshare.cz"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<th width="145px">Název:</th>\s*<td style="word-wrap:break-word;">(?P<N>[^<]+)</td>'
+ FILE_SIZE_PATTERN = r'<th>Velikost:</th>\s*<td>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</td>'
+ FILE_OFFLINE_PATTERN = r'<script type="text/javascript">location.href=\'/chyba\';</script>'
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode = True)
+ self.getFileInfo()
+
+ # parse js variables
+ self.jsvars = dict((x, y.strip("'")) for x,y in re.findall(r"var (\w+) = ([0-9.]+|'[^']*')", self.html))
+ self.logDebug(self.jsvars)
+ pyfile.name = self.jsvars['ID3']
+
+ # determine download type - free or premium
+ if self.premium:
+ if 'UU_prihlasen' in self.jsvars:
+ if self.jsvars['UU_prihlasen'] == '0':
+ self.logWarning('User not logged in')
+ self.relogin(self.user)
+ self.retry()
+ elif float(self.jsvars['UU_kredit']) < float(self.jsvars['kredit_odecet']):
+ self.logWarning('Not enough credit left')
+ self.premium = False
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ check = self.checkDownload({"err": re.compile(r"\AChyba!")}, max_size=100)
+ if check == "err":
+ self.fail("File not found or plugin defect")
+
+ def handleFree(self):
+ # get download url
+ download_url = '%s/download.php' % self.jsvars['server']
+ data = dict((x, self.jsvars[x]) for x in self.jsvars if x in ('ID1', 'ID2', 'ID3', 'ID4'))
+ self.logDebug("FREE URL1:" + download_url, data)
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.load(download_url, post=data)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ found = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if not found: self.fail('File not found')
+ download_url = found.group(1)
+ self.logDebug("FREE URL2:" + download_url)
+
+ # check errors
+ found = re.search(r'/chyba/(\d+)', download_url)
+ if found:
+ if found.group(1) == '1':
+ self.retry(max_tries=60, wait_time=120, reason="This IP is already downloading")
+ elif found.group(1) == '2':
+ self.retry(max_tries=60, wait_time=60, reason="No free slots available")
+ else:
+ self.fail('Error %d' % found.group(1))
+
+ # download file
+ self.download(download_url)
+
+ def handlePremium(self):
+ download_url = '%s/download_premium.php' % self.jsvars['server']
+ data = dict((x, self.jsvars[x]) for x in self.jsvars if x in ('ID1', 'ID2', 'ID4', 'ID5'))
+ self.logDebug("PREMIUM URL:" + download_url, data)
+ self.download(download_url, get=data)
+
+getInfo = create_getInfo(QuickshareCz)
diff --git a/pyload/plugins/hoster/RapidgatorNet.py b/pyload/plugins/hoster/RapidgatorNet.py
new file mode 100644
index 000000000..3c4611446
--- /dev/null
+++ b/pyload/plugins/hoster/RapidgatorNet.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from pycurl import HTTPHEADER
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.plugins.internal.CaptchaService import ReCaptcha, SolveMedia, AdsCaptcha
+from module.common.json_layer import json_loads
+from module.network.HTTPRequest import BadHeader
+
+
+class RapidgatorNet(SimpleHoster):
+ __name__ = "RapidgatorNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?(rapidgator.net)/file/(\w+)"
+ __version__ = "0.18"
+ __description__ = """rapidgator.net"""
+ __author_name__ = ("zoidberg", "chrox", "stickell")
+
+ API_URL = 'http://rapidgator.net/api/file'
+
+ FILE_INFO_PATTERN = r'Downloading:(\s*<[^>]*>)*\s*(?P<N>.*?)(\s*<[^>]*>)*\s*File size:\s*<strong>(?P<S>.*?)</strong>'
+ FILE_OFFLINE_PATTERN = r'<title>File not found</title>'
+
+ JSVARS_PATTERN = r"\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*'?(.*?)'?;"
+ DOWNLOAD_LINK_PATTERN = r"return '(http[^']+)';\s*}\s*}\s*}?\);"
+ RECAPTCHA_KEY_PATTERN = r'"http://api.recaptcha.net/challenge?k=(.*?)"'
+ ADSCAPTCHA_SRC_PATTERN = r'(http://api.adscaptcha.com/Get.aspx[^"\']*)'
+ SOLVEMEDIA_PATTERN = r'http:\/\/api\.solvemedia\.com\/papi\/challenge\.script\?k=(.*?)"'
+
+ def setup(self):
+ self.resumeDownload = False
+ self.multiDL = False
+ self.sid = None
+ self.chunkLimit = 1
+ self.req.setOption("timeout", 120)
+
+ def process(self, pyfile):
+ if self.account:
+ self.sid = self.account.getAccountData(self.user).get('SID', None)
+
+ if self.sid:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def getAPIResponse(self, cmd):
+ try:
+ json = self.load('%s/%s' % (self.API_URL, cmd),
+ get={'sid': self.sid,
+ 'url': self.pyfile.url}, decode=True)
+ self.logDebug('API:%s' % cmd, json, "SID: %s" % self.sid)
+ json = json_loads(json)
+ status = json['response_status']
+ msg = json['response_details']
+ except BadHeader, e:
+ self.logError('API:%s' % cmd, e, "SID: %s" % self.sid)
+ status = e.code
+ msg = e
+
+ if status == 200:
+ return json['response']
+ elif status == 423:
+ self.account.empty(self.user)
+ self.retry()
+ else:
+ self.account.relogin(self.user)
+ self.retry(wait_time=60)
+
+ def handlePremium(self):
+ #self.logDebug("ACCOUNT_DATA", self.account.getAccountData(self.user))
+ self.api_data = self.getAPIResponse('info')
+ self.api_data['md5'] = self.api_data['hash']
+ self.pyfile.name = self.api_data['filename']
+ self.pyfile.size = self.api_data['size']
+ url = self.getAPIResponse('download')['url']
+ self.multiDL = True
+ self.download(url)
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+ self.getFileInfo()
+
+ if "You can download files up to 500 MB in free mode" in self.html \
+ or "This file can be downloaded by premium only" in self.html:
+ self.fail("Premium account needed for download")
+
+ self.checkWait()
+
+ jsvars = dict(re.findall(self.JSVARS_PATTERN, self.html))
+ self.logDebug(jsvars)
+
+ self.req.http.lastURL = self.pyfile.url
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+
+ url = "http://rapidgator.net%s?fid=%s" % (
+ jsvars.get('startTimerUrl', '/download/AjaxStartTimer'), jsvars["fid"])
+ jsvars.update(self.getJsonResponse(url))
+
+ self.setWait(int(jsvars.get('secs', 30)) + 1, False)
+ self.wait()
+
+ url = "http://rapidgator.net%s?sid=%s" % (
+ jsvars.get('getDownloadUrl', '/download/AjaxGetDownload'), jsvars["sid"])
+ jsvars.update(self.getJsonResponse(url))
+
+ self.req.http.lastURL = self.pyfile.url
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With:"])
+
+ url = "http://rapidgator.net%s" % jsvars.get('captchaUrl', '/download/captcha')
+ self.html = self.load(url)
+ found = re.search(self.ADSCAPTCHA_SRC_PATTERN, self.html)
+ if found:
+ captcha_key = found.group(1)
+ captcha = AdsCaptcha(self)
+ else:
+ found = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
+ if found:
+ captcha_key = found.group(1)
+ captcha = ReCaptcha(self)
+
+ else:
+ found = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if found:
+ captcha_key = found.group(1)
+ captcha = SolveMedia(self)
+ else:
+ self.parseError("Captcha")
+
+ for i in range(5):
+ self.checkWait()
+ captcha_challenge, captcha_response = captcha.challenge(captcha_key)
+
+ self.html = self.load(url, post={
+ "DownloadCaptchaForm[captcha]": "",
+ "adcopy_challenge": captcha_challenge,
+ "adcopy_response": captcha_response
+ })
+
+ if 'The verification code is incorrect' in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else:
+ self.fail("No valid captcha solution received")
+
+ found = re.search(self.DOWNLOAD_LINK_PATTERN, self.html)
+ if not found:
+ self.parseError("download link")
+ download_url = found.group(1)
+ self.logDebug(download_url)
+ self.download(download_url)
+
+ def checkWait(self):
+ found = re.search(r"(?:Delay between downloads must be not less than|Try again in)\s*(\d+)\s*(hour|min)",
+ self.html)
+ if found:
+ wait_time = int(found.group(1)) * {"hour": 60, "min": 1}[found.group(2)]
+ else:
+ found = re.search(r"You have reached your (daily|hourly) downloads limit", self.html)
+ if found:
+ wait_time = 60
+ else:
+ return
+
+ self.logDebug("Waiting %d minutes" % wait_time)
+ self.setWait(wait_time * 60, True)
+ self.wait()
+ self.retry(max_tries=24)
+
+ def getJsonResponse(self, url):
+ response = self.load(url, decode=True)
+ if not response.startswith('{'):
+ self.retry()
+ self.logDebug(url, response)
+ return json_loads(response)
+
+
+getInfo = create_getInfo(RapidgatorNet)
diff --git a/pyload/plugins/hoster/RapidshareCom.py b/pyload/plugins/hoster/RapidshareCom.py
new file mode 100644
index 000000000..150dd425d
--- /dev/null
+++ b/pyload/plugins/hoster/RapidshareCom.py
@@ -0,0 +1,225 @@
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# v1.36
+# * fixed call checkfiles subroutine
+# v1.35
+# * fixed rs-urls in handleFree(..) and freeWait(..)
+# * removed getInfo(..) function as it was not used anywhere (in this file)
+# * removed some (old?) comment blocks
+
+import re
+
+from module.network.RequestFactory import getURL
+from module.plugins.Hoster import Hoster
+
+def getInfo(urls):
+ ids = ""
+ names = ""
+
+ p = re.compile(RapidshareCom.__pattern__)
+
+ for url in urls:
+ r = p.search(url)
+ if r.group("name"):
+ ids+= ","+r.group("id")
+ names+= ","+r.group("name")
+ elif r.group("name_new"):
+ ids+= ","+r.group("id_new")
+ names+= ","+r.group("name_new")
+
+ url = "http://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=checkfiles&files=%s&filenames=%s" % (ids[1:], names[1:])
+
+ api = getURL(url)
+ result = []
+ i = 0
+ for res in api.split():
+ tmp = res.split(",")
+ if tmp[4] in ("0", "4", "5"): status = 1
+ elif tmp[4] == "1": status = 2
+ else: status = 3
+
+ result.append( (tmp[1], tmp[2], status, urls[i]) )
+ i += 1
+
+ yield result
+
+
+class RapidshareCom(Hoster):
+ __name__ = "RapidshareCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://[\w\.]*?rapidshare.com/(?:files/(?P<id>\d*?)/(?P<name>[^?]+)|#!download\|(?:\w+)\|(?P<id_new>\d+)\|(?P<name_new>[^|]+))"
+ __version__ = "1.39"
+ __description__ = """Rapidshare.com Download Hoster"""
+ __config__ = [("server", "Cogent;Deutsche Telekom;Level(3);Level(3) #2;GlobalCrossing;Level(3) #3;Teleglobe;GlobalCrossing #2;TeliaSonera #2;Teleglobe #2;TeliaSonera #3;TeliaSonera", "Preferred Server", "None")]
+ __author_name__ = ("spoob", "RaNaN", "mkaay")
+ __author_mail__ = ("spoob@pyload.org", "ranan@pyload.org", "mkaay@mkaay.de")
+
+ def setup(self):
+ self.html = None
+ self.no_download = True
+ self.api_data = None
+ self.offset = 0
+ self.dl_dict = {}
+
+ self.id = None
+ self.name = None
+
+ self.chunkLimit = -1 if self.premium else 1
+ self.multiDL = self.resumeDownload = self.premium
+
+ def process(self, pyfile):
+ self.url = self.pyfile.url
+ self.prepare()
+
+ def prepare(self):
+ m = re.search(self.__pattern__, self.url)
+
+ if m.group("name"):
+ self.id = m.group("id")
+ self.name = m.group("name")
+ else:
+ self.id = m.group("id_new")
+ self.name = m.group("name_new")
+
+ self.download_api_data()
+ if self.api_data["status"] == "1":
+ self.pyfile.name = self.get_file_name()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ elif self.api_data["status"] == "2":
+ self.logInfo(_("Rapidshare: Traffic Share (direct download)"))
+ self.pyfile.name = self.get_file_name()
+
+ self.download(self.pyfile.url, get={"directstart":1})
+
+ elif self.api_data["status"] in ("0","4","5"):
+ self.offline()
+ elif self.api_data["status"] == "3":
+ self.tempOffline()
+ else:
+ self.fail("Unknown response code.")
+
+ def handleFree(self):
+
+ while self.no_download:
+ self.dl_dict = self.freeWait()
+
+ #tmp = "#!download|%(server)s|%(id)s|%(name)s|%(size)s"
+ download = "http://%(host)s/cgi-bin/rsapi.cgi?sub=download&editparentlocation=0&bin=1&fileid=%(id)s&filename=%(name)s&dlauth=%(auth)s" % self.dl_dict
+
+ self.logDebug("RS API Request: %s" % download)
+ self.download(download, ref=False)
+
+ check = self.checkDownload({"ip" : "You need RapidPro to download more files from your IP address",
+ "auth" : "Download auth invalid"})
+ if check == "ip":
+ self.setWait(60)
+ self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
+ self.wait()
+ self.handleFree()
+ elif check == "auth":
+ self.logInfo(_("Invalid Auth Code, download will be restarted"))
+ self.offset += 5
+ self.handleFree()
+
+ def handlePremium(self):
+ info = self.account.getAccountInfo(self.user, True)
+ self.logDebug("%s: Use Premium Account" % self.__name__)
+ url = self.api_data["mirror"]
+ self.download(url, get={"directstart":1})
+
+
+ def download_api_data(self, force=False):
+ """
+ http://images.rapidshare.com/apidoc.txt
+ """
+ if self.api_data and not force:
+ return
+ api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
+ api_param_file = {"sub": "checkfiles", "incmd5": "1", "files": self.id, "filenames": self.name}
+ src = self.load(api_url_base, cookies=False, get=api_param_file).strip()
+ self.logDebug("RS INFO API: %s" % src)
+ if src.startswith("ERROR"):
+ return
+ fields = src.split(",")
+ """
+ status codes:
+ 0=File not found
+ 1=File OK (Anonymous downloading)
+ 3=Server down
+ 4=File marked as illegal
+ 5=Anonymous file locked, because it has more than 10 downloads already
+ 50+n=File OK (TrafficShare direct download type "n" without any logging.)
+ 100+n=File OK (TrafficShare direct download type "n" with logging. Read our privacy policy to see what is logged.)
+ """
+ self.api_data = {"fileid": fields[0], "filename": fields[1], "size": int(fields[2]), "serverid": fields[3],
+ "status": fields[4], "shorthost": fields[5], "checksum": fields[6].strip().lower()}
+
+ if int(self.api_data["status"]) > 100:
+ self.api_data["status"] = str(int(self.api_data["status"]) - 100)
+ elif int(self.api_data["status"]) > 50:
+ self.api_data["status"] = str(int(self.api_data["status"]) - 50)
+
+ self.api_data["mirror"] = "http://rs%(serverid)s%(shorthost)s.rapidshare.com/files/%(fileid)s/%(filename)s" % self.api_data
+
+ def freeWait(self):
+ """downloads html with the important information
+ """
+ self.no_download = True
+
+ id = self.id
+ name = self.name
+
+ prepare = "https://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=download&fileid=%(id)s&filename=%(name)s&try=1&cbf=RSAPIDispatcher&cbid=1" % {"name": name, "id" : id}
+
+ self.logDebug("RS API Request: %s" % prepare)
+ result = self.load(prepare, ref=False)
+ self.logDebug("RS API Result: %s" % result)
+
+ between_wait = re.search("You need to wait (\d+) seconds", result)
+
+ if "You need RapidPro to download more files from your IP address" in result:
+ self.setWait(60)
+ self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
+ self.wait()
+ elif "Too many users downloading from this server right now" in result or "All free download slots are full" in result:
+ self.setWait(120)
+ self.logInfo(_("RapidShareCom: No free slots"))
+ self.wait()
+ elif "This file is too big to download it for free" in result:
+ self.fail(_("You need a premium account for this file"))
+ elif "Filename invalid." in result:
+ self.fail(_("Filename reported invalid"))
+ elif between_wait:
+ self.setWait(int(between_wait.group(1)))
+ self.wantReconnect = True
+ self.wait()
+ else:
+ self.no_download = False
+
+ tmp, info = result.split(":")
+ data = info.split(",")
+
+ dl_dict = {"id": id,
+ "name": name,
+ "host": data[0],
+ "auth": data[1],
+ "server": self.api_data["serverid"],
+ "size": self.api_data["size"]
+ }
+ self.setWait(int(data[2])+2+self.offset)
+ self.wait()
+
+ return dl_dict
+
+
+ def get_file_name(self):
+ if self.api_data["filename"]:
+ return self.api_data["filename"]
+ return self.url.split("/")[-1] \ No newline at end of file
diff --git a/pyload/plugins/hoster/RarefileNet.py b/pyload/plugins/hoster/RarefileNet.py
new file mode 100644
index 000000000..a0f5930b5
--- /dev/null
+++ b/pyload/plugins/hoster/RarefileNet.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+from module.utils import html_unescape
+
+
+class RarefileNet(XFileSharingPro):
+ __name__ = "RarefileNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*rarefile.net/\w{12}"
+ __version__ = "0.03"
+ __description__ = """Rarefile.net hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<td><font color="red">(?P<N>.*?)</font></td>'
+ FILE_SIZE_PATTERN = r'<td>Size : (?P<S>.+?)&nbsp;'
+ DIRECT_LINK_PATTERN = r'<a href="(?P<link>[^"]+)">(?P=link)</a>'
+ HOSTER_NAME = "rarefile.net"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+
+ def handleCaptcha(self, inputs):
+ captcha_div = re.search(r'<b>Enter code.*?<div.*?>(.*?)</div>', self.html, re.S).group(1)
+ self.logDebug(captcha_div)
+ numerals = re.findall('<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
+ inputs['code'] = "".join([a[1] for a in sorted(numerals, key = lambda num: int(num[0]))])
+ self.logDebug("CAPTCHA", inputs['code'], numerals)
+ return 3
+
+getInfo = create_getInfo(RarefileNet)
diff --git a/pyload/plugins/hoster/RealdebridCom.py b/pyload/plugins/hoster/RealdebridCom.py
new file mode 100644
index 000000000..73baff5b3
--- /dev/null
+++ b/pyload/plugins/hoster/RealdebridCom.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from time import time
+from urllib import quote, unquote
+from random import randrange
+
+from module.utils import parseFileSize, remove_chars
+from module.common.json_layer import json_loads
+from module.plugins.Hoster import Hoster
+
+class RealdebridCom(Hoster):
+ __name__ = "RealdebridCom"
+ __version__ = "0.51"
+ __type__ = "hoster"
+
+ __pattern__ = r"https?://.*real-debrid\..*"
+ __description__ = """Real-Debrid.com hoster plugin"""
+ __author_name__ = ("Devirex, Hazzard")
+ __author_mail__ = ("naibaf_11@yahoo.de")
+
+ def getFilename(self, url):
+ try:
+ name = unquote(url.rsplit("/", 1)[1])
+ except IndexError:
+ name = "Unknown_Filename..."
+ if not name or name.endswith(".."): #incomplete filename, append random stuff
+ name += "%s.tmp" % randrange(100,999)
+ return name
+
+ def init(self):
+ self.tries = 0
+ self.chunkLimit = 3
+ self.resumeDownload = True
+
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "Real-debrid")
+ self.fail("No Real-debrid account provided")
+
+ self.logDebug("Real-Debrid: Old URL: %s" % pyfile.url)
+ if re.match(self.__pattern__, pyfile.url):
+ new_url = pyfile.url
+ else:
+ password = self.getPassword().splitlines()
+ if not password: password = ""
+ else: password = password[0]
+
+ url = "http://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % (quote(pyfile.url, ""), password, int(time()*1000))
+ page = self.load(url)
+ data = json_loads(page)
+
+ self.logDebug("Returned Data: %s" % data)
+
+ if data["error"] != 0:
+ if data["message"] == "Your file is unavailable on the hoster.":
+ self.offline()
+ else:
+ self.logWarning(data["message"])
+ self.tempOffline()
+ else:
+ if self.pyfile.name is not None and self.pyfile.name.endswith('.tmp') and data["file_name"]:
+ self.pyfile.name = data["file_name"]
+ self.pyfile.size = parseFileSize(data["file_size"])
+ new_url = data['generated_links'][0][-1]
+
+ if self.getConfig("https"):
+ new_url = new_url.replace("http://", "https://")
+ else:
+ new_url = new_url.replace("https://", "http://")
+
+ self.logDebug("Real-Debrid: New URL: %s" % new_url)
+
+ if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'):
+ #only use when name wasnt already set
+ pyfile.name = self.getFilename(new_url)
+
+ self.download(new_url, disposition=True)
+
+ check = self.checkDownload(
+ {"error": "<title>An error occured while processing your request</title>"})
+
+ if check == "error":
+ #usual this download can safely be retried
+ self.retry(reason="An error occured while generating link.", wait_time=60)
+
diff --git a/pyload/plugins/hoster/RedtubeCom.py b/pyload/plugins/hoster/RedtubeCom.py
new file mode 100644
index 000000000..c2083e679
--- /dev/null
+++ b/pyload/plugins/hoster/RedtubeCom.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+from module.unescape import unescape
+
+class RedtubeCom(Hoster):
+ __name__ = "RedtubeCom"
+ __type__ = "hoster"
+ __pattern__ = r'http://[\w\.]*?redtube\.com/\d+'
+ __version__ = "0.2"
+ __description__ = """Redtube.com Download Hoster"""
+ __author_name__ = ("jeix")
+ __author_mail__ = ("jeix@hasnomail.de")
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+
+ file_url = unescape(re.search(r'hashlink=(http.*?)"', self.html).group(1))
+
+ return file_url
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ name = re.search('<title>(.*?)- RedTube - Free Porn Videos</title>', self.html).group(1).strip() + ".flv"
+ return name
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+
+ if re.search(r'This video has been removed.', self.html) is not None:
+ return False
+ else:
+ return True
+
diff --git a/pyload/plugins/hoster/RehostTo.py b/pyload/plugins/hoster/RehostTo.py
new file mode 100644
index 000000000..7d1b1c3ea
--- /dev/null
+++ b/pyload/plugins/hoster/RehostTo.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from urllib import quote, unquote
+from module.plugins.Hoster import Hoster
+
+class RehostTo(Hoster):
+ __name__ = "RehostTo"
+ __version__ = "0.13"
+ __type__ = "hoster"
+ __pattern__ = r"https?://.*rehost.to\..*"
+ __description__ = """rehost.com hoster plugin"""
+ __author_name__ = ("RaNaN")
+ __author_mail__ = ("RaNaN@pyload.org")
+
+ def getFilename(self, url):
+ return unquote(url.rsplit("/", 1)[1])
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "rehost.to")
+ self.fail("No rehost.to account provided")
+
+ data = self.account.getAccountInfo(self.user)
+ long_ses = data["long_ses"]
+
+ self.logDebug("Rehost.to: Old URL: %s" % pyfile.url)
+ new_url = "http://rehost.to/process_download.php?user=cookie&pass=%s&dl=%s" % (long_ses, quote(pyfile.url, ""))
+
+ #raise timeout to 2min
+ self.req.setOption("timeout", 120)
+
+ self.download(new_url, disposition=True) \ No newline at end of file
diff --git a/pyload/plugins/hoster/ReloadCc.py b/pyload/plugins/hoster/ReloadCc.py
new file mode 100644
index 000000000..b39c2812c
--- /dev/null
+++ b/pyload/plugins/hoster/ReloadCc.py
@@ -0,0 +1,103 @@
+from module.plugins.Hoster import Hoster
+
+from module.common.json_layer import json_loads
+
+from module.network.HTTPRequest import BadHeader
+
+class ReloadCc(Hoster):
+ __name__ = "ReloadCc"
+ __version__ = "0.5"
+ __type__ = "hoster"
+ __description__ = """Reload.Cc hoster plugin"""
+
+ # Since we want to allow the user to specify the list of hoster to use we let MultiHoster.coreReady create the regex patterns for us using getHosters in our ReloadCc hook.
+ __pattern__ = None
+
+ __author_name__ = ("Reload Team")
+ __author_mail__ = ("hello@reload.cc")
+
+ def process(self, pyfile):
+ # Check account
+ if not self.account or not self.account.canUse():
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "reload.cc")
+ self.fail("No valid reload.cc account provided")
+
+ # In some cases hostsers do not supply us with a filename at download, so we are going to set a fall back filename (e.g. for freakshare or xfileshare)
+ self.pyfile.name = self.pyfile.name.split('/').pop() # Remove everthing before last slash
+
+ # Correction for automatic assigned filename: Removing html at end if needed
+ suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"]
+ temp = self.pyfile.name.split('.')
+ if temp.pop() in suffix_to_remove:
+ self.pyfile.name = ".".join(temp)
+
+ # Get account data
+ (user, data) = self.account.selectAccount()
+
+ query_params = dict(
+ via='pyload',
+ v=1,
+ user=user,
+ uri=self.pyfile.url
+ )
+
+ try:
+ query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
+ except Exception:
+ query_params.update(dict(pwd=data['password']))
+
+ try:
+ answer = self.load("http://api.reload.cc/dl", get=query_params)
+ except BadHeader, e:
+ if e.code == 400:
+ self.fail("The URI is not supported by Reload.cc.")
+ elif e.code == 401:
+ self.fail("Wrong username or password")
+ elif e.code == 402:
+ self.fail("Your account is inactive. A payment is required for downloading!")
+ elif e.code == 403:
+ self.fail("Your account is disabled. Please contact the Reload.cc support!")
+ elif e.code == 409:
+ self.logWarning("The hoster seems to be a limited hoster and you've used your daily traffic for this hoster: %s" % self.pyfile.url)
+ # Wait for 6 hours and retry up to 4 times => one day
+ self.retry(max_retries=4, wait_time=(3600 * 6), reason="Limited hoster traffic limit exceeded")
+ elif e.code == 429:
+ self.retry(max_retries=5, wait_time=120, reason="Too many concurrent connections") # Too many connections, wait 2 minutes and try again
+ elif e.code == 503:
+ self.retry(wait_time=600, reason="Reload.cc is currently in maintenance mode! Please check again later.") # Retry in 10 minutes
+ else:
+ self.fail("Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
+ return
+
+ data = json_loads(answer)
+
+ # Check status and decide what to do
+ status = data.get('status', None)
+ if status == "ok":
+ conn_limit = data.get('msg', 0)
+ # API says these connections are limited
+ # Make sure this limit is used - the download will fail if not
+ if conn_limit > 0:
+ try:
+ self.limitDL = int(conn_limit)
+ except ValueError:
+ self.limitDL = 1
+ else:
+ self.limitDL = 0
+
+ try:
+ self.download(data['link'], disposition=True)
+ except BadHeader, e:
+ if e.code == 404:
+ self.fail("File Not Found")
+ elif e.code == 412:
+ self.fail("File access password is wrong")
+ elif e.code == 417:
+ self.fail("Password required for file access")
+ elif e.code == 429:
+ self.retry(max_retries=5, wait_time=120, reason="Too many concurrent connections") # Too many connections, wait 2 minutes and try again
+ else:
+ self.fail("Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
+ return
+ else:
+ self.fail("Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
diff --git a/pyload/plugins/hoster/RyushareCom.py b/pyload/plugins/hoster/RyushareCom.py
new file mode 100644
index 000000000..7bfe4e8fe
--- /dev/null
+++ b/pyload/plugins/hoster/RyushareCom.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+import re
+
+
+class RyushareCom(XFileSharingPro):
+ __name__ = "RyushareCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?ryushare.com/\w{11,}"
+ __version__ = "0.11"
+ __description__ = """ryushare.com hoster plugin"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ HOSTER_NAME = "ryushare.com"
+
+ WAIT_PATTERN = r'(?:You have to|Please) wait (?:(?P<min>\d+) minutes, )?(?:<span id="[^"]+">)?(?P<sec>\d+)(?:</span>)? seconds'
+ DIRECT_LINK_PATTERN = r'<a href="([^"]+)">Click here to download</a>'
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ if not self.premium:
+ self.limitDL = 2
+ # Up to 3 chunks allowed in free downloads. Unknown for premium
+ self.chunkLimit = 3
+
+ def getDownloadLink(self):
+ self.html = self.load(self.pyfile.url)
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+ if inputs.has_key('method_premium'):
+ del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post = inputs)
+ action, inputs = self.parseHtmlForm('F1')
+
+ for i in xrange(10):
+ self.logInfo('Attempt to detect direct link #%d' % i)
+
+ # Wait
+ if 'You have reached the download-limit!!!' in self.html:
+ self.setWait(3600, True)
+ else:
+ m = re.search(self.WAIT_PATTERN, self.html).groupdict('0')
+ waittime = int(m['min']) * 60 + int(m['sec'])
+ self.setWait(waittime)
+ self.wait()
+
+ self.html = self.load(self.pyfile.url, post = inputs)
+ if 'Click here to download' in self.html:
+ m = re.search(self.DIRECT_LINK_PATTERN, self.html)
+ return m.group(1)
+
+ self.parseError('No direct link within 10 retries')
+
+getInfo = create_getInfo(RyushareCom)
diff --git a/pyload/plugins/hoster/SecureUploadEu.py b/pyload/plugins/hoster/SecureUploadEu.py
new file mode 100644
index 000000000..b9a900d96
--- /dev/null
+++ b/pyload/plugins/hoster/SecureUploadEu.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class SecureUploadEu(XFileSharingPro):
+ __name__ = "SecureUploadEu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?secureupload\.eu/(\w){12}(/\w+)"
+ __version__ = "0.01"
+ __description__ = """SecureUpload.eu hoster plugin"""
+ __author_name__ = ("z00nx")
+ __author_mail__ = ("z00nx0@gmail.com")
+
+ HOSTER_NAME = "secureupload.eu"
+ FILE_INFO_PATTERN = '<h3>Downloading (?P<N>[^<]+) \((?P<S>[^<]+)\)</h3>'
+ FILE_OFFLINE_PATTERN = 'The file was removed|File Not Found'
+
+getInfo = create_getInfo(SecureUploadEu)
diff --git a/pyload/plugins/hoster/SendmywayCom.py b/pyload/plugins/hoster/SendmywayCom.py
new file mode 100644
index 000000000..fcbac850a
--- /dev/null
+++ b/pyload/plugins/hoster/SendmywayCom.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class SendmywayCom(XFileSharingPro):
+ __name__ = "SendmywayCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?sendmyway.com/\w{12}"
+ __version__ = "0.01"
+ __description__ = """SendMyWay hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<p class="file-name" ><.*?>\s*(?P<N>.+)'
+ FILE_SIZE_PATTERN = r'<small>\((?P<S>\d+) bytes\)</small>'
+ HOSTER_NAME = "sendmyway.com"
+
+getInfo = create_getInfo(SendmywayCom)
diff --git a/pyload/plugins/hoster/SendspaceCom.py b/pyload/plugins/hoster/SendspaceCom.py
new file mode 100644
index 000000000..22abaff56
--- /dev/null
+++ b/pyload/plugins/hoster/SendspaceCom.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class SendspaceCom(SimpleHoster):
+ __name__ = "SendspaceCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?sendspace.com/file/.*"
+ __version__ = "0.13"
+ __description__ = """sendspace.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+
+ DOWNLOAD_URL_PATTERN = r'<a id="download_button" href="([^"]+)"'
+ FILE_NAME_PATTERN = r'<h2 class="bgray">\s*<(?:b|strong)>(?P<N>[^<]+)</'
+ FILE_SIZE_PATTERN = r'<div class="file_description reverse margin_center">\s*<b>File Size:</b>\s*(?P<S>[0-9.]+)(?P<U>[kKMG])i?B\s*</div>'
+ FILE_OFFLINE_PATTERN = r'<div class="msg error" style="cursor: default">Sorry, the file you requested is not available.</div>'
+ CAPTCHA_PATTERN = r'<td><img src="(/captchas/captcha.php?captcha=([^"]+))"></td>'
+ USER_CAPTCHA_PATTERN = r'<td><img src="/captchas/captcha.php?user=([^"]+))"></td>'
+
+ def handleFree(self):
+ params = {}
+ for i in range(3):
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if found:
+ if params.has_key('captcha_hash'): self.correctCaptcha()
+ download_url = found.group(1)
+ break
+
+ found = re.search(self.CAPTCHA_PATTERN, self.html)
+ if found:
+ if params.has_key('captcha_hash'): self.invalidCaptcha()
+ captcha_url1 = "http://www.sendspace.com/" + found.group(1)
+ found = re.search(self.USER_CAPTCHA_PATTERN, self.html)
+ captcha_url2 = "http://www.sendspace.com/" + found.group(1)
+ params = {'captcha_hash' : found.group(2),
+ 'captcha_submit': 'Verify',
+ 'captcha_answer': self.decryptCaptcha(captcha_url1) + " " + self.decryptCaptcha(captcha_url2)
+ }
+ else:
+ params = {'download': "Regular Download"}
+
+ self.logDebug(params)
+ self.html = self.load(self.pyfile.url, post = params)
+ else:
+ self.fail("Download link not found")
+
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+create_getInfo(SendspaceCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/Share4webCom.py b/pyload/plugins/hoster/Share4webCom.py
new file mode 100644
index 000000000..ef9c2acf8
--- /dev/null
+++ b/pyload/plugins/hoster/Share4webCom.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from module.plugins.hoster.UnibytesCom import UnibytesCom
+from module.plugins.internal.SimpleHoster import create_getInfo
+
+class Share4webCom(UnibytesCom):
+ __name__ = "Share4webCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?share4web\.com/get/\w+"
+ __version__ = "0.1"
+ __description__ = """Share4web.com"""
+ __author_name__ = ("zoidberg")
+
+ DOMAIN = 'http://www.share4web.com'
+
+getInfo = create_getInfo(UnibytesCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/Share76Com.py b/pyload/plugins/hoster/Share76Com.py
new file mode 100644
index 000000000..db850cb73
--- /dev/null
+++ b/pyload/plugins/hoster/Share76Com.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class Share76Com(XFileSharingPro):
+ __name__ = "Share76Com"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?share76.com/\w{12}"
+ __version__ = "0.03"
+ __description__ = """share76.com hoster plugin"""
+ __author_name__ = ("me")
+
+ FILE_INFO_PATTERN = r'<h2>\s*File:\s*<font[^>]*>(?P<N>[^>]+)</font>\s*\[<font[^>]*>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</font>\]</h2>'
+ HOSTER_NAME = "share76.com"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+ self.chunkLimit = 1
+
+getInfo = create_getInfo(Share76Com)
diff --git a/pyload/plugins/hoster/ShareFilesCo.py b/pyload/plugins/hoster/ShareFilesCo.py
new file mode 100644
index 000000000..ee44b0a1f
--- /dev/null
+++ b/pyload/plugins/hoster/ShareFilesCo.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+import re
+
+class ShareFilesCo(XFileSharingPro):
+ __name__ = "ShareFilesCo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?sharefiles\.co/\w{12}"
+ __version__ = "0.01"
+ __description__ = """Sharefiles.co hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ HOSTER_NAME = "sharefiles.co"
+
+ def startDownload(self, link):
+ link = link.strip()
+ if link.startswith('http://adf.ly'):
+ link = re.sub('http://adf.ly/\d+/', '', link)
+ if self.captcha: self.correctCaptcha()
+ self.logDebug('DIRECT LINK: %s' % link)
+ self.download(link)
+
+getInfo = create_getInfo(ShareFilesCo)
diff --git a/pyload/plugins/hoster/ShareRapidCom.py b/pyload/plugins/hoster/ShareRapidCom.py
new file mode 100644
index 000000000..5a08fed1f
--- /dev/null
+++ b/pyload/plugins/hoster/ShareRapidCom.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from pycurl import HTTPHEADER
+from module.network.RequestFactory import getRequest, getURL
+from module.network.HTTPRequest import BadHeader
+from module.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+from module.common.json_layer import json_loads
+
+def checkFile(url):
+ response = getURL("http://share-rapid.com/checkfiles.php", post = {"files": url}, decode = True)
+ info = json_loads(response)
+
+ if "error" in info:
+ if info['error'] == False:
+ info['name'] = info['filename']
+ info['status'] = 2
+ elif info['msg'] == "Not found":
+ info['status'] = 1 #offline
+ elif info['msg'] == "Service Unavailable":
+ info['status'] = 6 #temp.offline
+
+ return info
+
+def getInfo(urls):
+ for url in urls:
+ info = checkFile(url)
+ if "filename" in info:
+ yield info['name'], info['size'], info['status'], url
+ else:
+ file_info = (url, 0, 3, url)
+ h = getRequest()
+ try:
+ h.c.setopt(HTTPHEADER, ["Accept: text/html"])
+ html = h.load(url, cookies = True, decode = True)
+ file_info = parseFileInfo(ShareRapidCom, url, html)
+ finally:
+ h.close()
+ yield file_info
+
+class ShareRapidCom(SimpleHoster):
+ __name__ = "ShareRapidCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?((share(-?rapid\.(biz|com|cz|info|eu|net|org|pl|sk)|-(central|credit|free|net)\.cz|-ms\.net)|(s-?rapid|rapids)\.(cz|sk))|(e-stahuj|mediatack|premium-rapidshare|rapidshare-premium|qiuck)\.cz|kadzet\.com|stahuj-zdarma\.eu|strelci\.net|universal-share\.com)/stahuj/(\w+)"
+ __version__ = "0.52"
+ __description__ = """Share-rapid.com plugin - premium only"""
+ __author_name__ = ("MikyWoW", "zoidberg")
+ __author_mail__ = ("MikyWoW@seznam.cz", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<h1[^>]*><span[^>]*>(?:<a[^>]*>)?(?P<N>[^<]+)'
+ FILE_SIZE_PATTERN = r'<td class="i">Velikost:</td>\s*<td class="h"><strong>\s*(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</strong></td>'
+ FILE_OFFLINE_PATTERN = ur'Nastala chyba 404|Soubor byl smazán'
+
+ DOWNLOAD_URL_PATTERN = r'<a href="([^"]+)" title="Stahnout">([^<]+)</a>'
+ ERR_LOGIN_PATTERN = ur'<div class="error_div"><strong>Stahování je přístupné pouze přihlášenÃœm uÅŸivatelům'
+ ERR_CREDIT_PATTERN = ur'<div class="error_div"><strong>Stahování zdarma je moÅŸné jen přes náš'
+
+ FILE_URL_REPLACEMENTS = [(__pattern__, r'http://share-rapid.com/stahuj/\1')]
+
+ def setup(self):
+ self.chunkLimit = 1
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ if not self.account: self.fail("User not logged in")
+
+ self.info = checkFile(pyfile.url)
+ self.logDebug(self.info)
+
+ pyfile.status = self.info['status']
+
+ if pyfile.status == 2:
+ pyfile.name = self.info['name']
+ pyfile.size = self.info['size']
+ elif pyfile.status == 1:
+ self.offline()
+ elif pyfile.status == 6:
+ self.tempOffline()
+ else:
+ self.fail("Unexpected file status")
+
+ url = "http://share-rapid.com/stahuj/%s" % self.info['filepath']
+ try:
+ self.html = self.load(url, decode=True)
+ except BadHeader, e:
+ self.account.relogin(self.user)
+ self.retry(3, 0, str(e))
+
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if found is not None:
+ link = found.group(1)
+ self.logDebug("Premium link: %s" % link)
+
+ self.check_data = {"size": pyfile.size}
+ self.download(link)
+ else:
+ if re.search(self.ERR_LOGIN_PATTERN, self.html):
+ self.relogin(self.user)
+ self.retry(3,0,"User login failed")
+ elif re.search(self.ERR_CREDIT_PATTERN, self.html):
+ self.fail("Not enough credit left")
+ else:
+ self.fail("Download link not found")
diff --git a/pyload/plugins/hoster/SharebeesCom.py b/pyload/plugins/hoster/SharebeesCom.py
new file mode 100644
index 000000000..f5bacc5b0
--- /dev/null
+++ b/pyload/plugins/hoster/SharebeesCom.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+
+class SharebeesCom(XFileSharingPro):
+ __name__ = "SharebeesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?sharebees.com/\w{12}"
+ __version__ = "0.01"
+ __description__ = """ShareBees hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<p class="file-name" ><.*?>\s*(?P<N>.+)'
+ FILE_SIZE_PATTERN = r'<small>\((?P<S>\d+) bytes\)</small>'
+ FORM_PATTERN = 'F1'
+ HOSTER_NAME = "sharebees.com"
+
+getInfo = create_getInfo(SharebeesCom)
diff --git a/pyload/plugins/hoster/ShareonlineBiz.py b/pyload/plugins/hoster/ShareonlineBiz.py
new file mode 100644
index 000000000..8a4bcfba8
--- /dev/null
+++ b/pyload/plugins/hoster/ShareonlineBiz.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from base64 import b64decode
+import hashlib
+import random
+from time import time, sleep
+
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+from module.plugins.Plugin import chunks
+from module.plugins.internal.CaptchaService import ReCaptcha as _ReCaptcha
+
+def getInfo(urls):
+ api_url_base = "http://api.share-online.biz/linkcheck.php"
+
+ for chunk in chunks(urls, 90):
+ api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/","").rstrip("/") for x in chunk)} #api only supports old style links
+ src = getURL(api_url_base, post=api_param_file, decode=True)
+ result = []
+ for i, res in enumerate(src.split("\n")):
+ if not res:
+ continue
+ fields = res.split(";")
+
+ if fields[1] == "OK":
+ status = 2
+ elif fields[1] in ("DELETED", "NOT FOUND"):
+ status = 1
+ else:
+ status = 3
+
+ result.append((fields[2], int(fields[3]), status, chunk[i]))
+ yield result
+
+#suppress ocr plugin
+class ReCaptcha(_ReCaptcha):
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%simage"%server, get={"c":challenge}, cookies=True, forceUser=True, imgtype="jpg")
+
+class ShareonlineBiz(Hoster):
+ __name__ = "ShareonlineBiz"
+ __type__ = "hoster"
+ __pattern__ = r"http://[\w\.]*?(share\-online\.biz|egoshare\.com)/(download.php\?id\=|dl/)[\w]+"
+ __version__ = "0.36"
+ __description__ = """Shareonline.biz Download Hoster"""
+ __author_name__ = ("spoob", "mkaay", "zoidberg")
+ __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+ ERROR_INFO_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>'
+
+ def setup(self):
+ # range request not working?
+ # api supports resume, only one chunk
+ # website isn't supporting resuming in first place
+ self.file_id = re.search(r"(id\=|/dl/)([a-zA-Z0-9]+)", self.pyfile.url).group(2)
+ self.pyfile.url = "http://www.share-online.biz/dl/" + self.file_id
+
+ self.resumeDownload = self.premium
+ self.multiDL = False
+ #self.chunkLimit = 1
+
+ self.check_data = None
+
+ def process(self, pyfile):
+ if self.premium:
+ self.handleAPIPremium()
+ #web-download fallback removed - didn't work anyway
+ else:
+ self.handleFree()
+
+ """
+ check = self.checkDownload({"failure": re.compile(self.ERROR_INFO_PATTERN)})
+ if check == "failure":
+ try:
+ self.retry(reason = self.lastCheck.group(1).decode("utf8"))
+ except:
+ self.retry(reason = "Unknown error")
+ """
+
+ if self.api_data:
+ self.check_data = {"size": int(self.api_data['size']), "md5": self.api_data['md5']}
+
+ def downloadAPIData(self):
+ api_url_base = "http://api.share-online.biz/linkcheck.php?md5=1"
+ api_param_file = {"links": self.pyfile.url.replace("http://www.share-online.biz/dl/","")} #api only supports old style links
+ src = self.load(api_url_base, cookies=False, post=api_param_file, decode=True)
+
+ fields = src.split(";")
+ self.api_data = {"fileid": fields[0],
+ "status": fields[1]}
+ if not self.api_data["status"] == "OK":
+ self.offline()
+ self.api_data["filename"] = fields[2]
+ self.api_data["size"] = fields[3] # in bytes
+ self.api_data["md5"] = fields[4].strip().lower().replace("\n\n", "") # md5
+
+ def handleFree(self):
+ self.downloadAPIData()
+ self.pyfile.name = self.api_data["filename"]
+ self.pyfile.size = int(self.api_data["size"])
+
+ self.html = self.load(self.pyfile.url, cookies = True) #refer, stuff
+ self.setWait(3)
+ self.wait()
+
+ self.html = self.load("%s/free/" % self.pyfile.url, post={"dl_free":"1", "choice": "free"}, decode = True)
+ self.checkErrors()
+
+ found = re.search(r'var wait=(\d+);', self.html)
+
+ recaptcha = ReCaptcha(self)
+ for i in range(5):
+ challenge, response = recaptcha.challenge("6LdatrsSAAAAAHZrB70txiV5p-8Iv8BtVxlTtjKX")
+ self.setWait(int(found.group(1)) if found else 30)
+ response = self.load("%s/free/captcha/%d" % (self.pyfile.url, int(time() * 1000)), post = {
+ 'dl_free': '1',
+ 'recaptcha_challenge_field': challenge,
+ 'recaptcha_response_field': response})
+
+ if not response == '0':
+ break
+
+ else: self.fail("No valid captcha solution received")
+
+ download_url = response.decode("base64")
+ self.logDebug(download_url)
+ if not download_url.startswith("http://"):
+ self.parseError("download url")
+
+ self.wait()
+ self.download(download_url)
+ # check download
+ check = self.checkDownload({
+ "cookie": re.compile(r'<div id="dl_failure"'),
+ "fail": re.compile(r"<title>Share-Online")
+ })
+ if check == "cookie":
+ self.retry(5, 60, "Cookie failure")
+ elif check == "fail":
+ self.retry(5, 300, "Download failed")
+
+ def checkErrors(self):
+ found = re.search(r"/failure/(.*?)/1", self.req.lastEffectiveURL)
+ if found:
+ err = found.group(1)
+ found = re.search(self.ERROR_INFO_PATTERN, self.html)
+ msg = found.group(1) if found else ""
+ self.logError(err, msg or "Unknown error occurred")
+
+ if err in ('freelimit', 'size', 'proxy'):
+ self.fail(msg or "Premium account needed")
+ if err in ('invalid'):
+ self.fail(msg or "File not available")
+ elif err in ('server'):
+ self.setWait(600, False)
+ elif err in ('expired'):
+ self.setWait(30, False)
+ else:
+ self.setWait(300, True)
+
+ self.wait()
+ self.retry(max_tries=25, reason = msg)
+
+ def handleAPIPremium(self): #should be working better
+ self.account.getAccountInfo(self.user, True)
+ src = self.load("http://api.share-online.biz/account.php",
+ {"username": self.user, "password": self.account.accounts[self.user]["password"], "act": "download", "lid": self.file_id})
+
+ self.api_data = dlinfo = {}
+ for line in src.splitlines():
+ key, value = line.split(": ")
+ dlinfo[key.lower()] = value
+
+ self.logDebug(dlinfo)
+ if not dlinfo["status"] == "online":
+ self.offline()
+
+ self.pyfile.name = dlinfo["name"]
+ self.pyfile.size = int(dlinfo["size"])
+
+ dlLink = dlinfo["url"]
+ if dlLink == "server_under_maintenance":
+ self.tempoffline()
+ else:
+ self.multiDL = True
+ self.download(dlLink)
+
+ def checksum(self, local_file):
+ if self.api_data and "md5" in self.api_data and self.api_data["md5"]:
+ h = hashlib.md5()
+ f = open(local_file, "rb")
+ h.update(f.read())
+ f.close()
+ hexd = h.hexdigest()
+ if hexd == self.api_data["md5"]:
+ return True, 0
+ else:
+ return False, 1
+ else:
+ self.logWarning("MD5 checksum missing")
+ return True, 5
diff --git a/pyload/plugins/hoster/ShareplaceCom.py b/pyload/plugins/hoster/ShareplaceCom.py
new file mode 100644
index 000000000..c55f6703a
--- /dev/null
+++ b/pyload/plugins/hoster/ShareplaceCom.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import urllib
+from module.plugins.Hoster import Hoster
+
+class ShareplaceCom(Hoster):
+ __name__ = "ShareplaceCom"
+ __type__ = "hoster"
+ __pattern__ = r"(http://)?(www\.)?shareplace\.(com|org)/\?[a-zA-Z0-9]+"
+ __version__ = "0.11"
+ __description__ = """Shareplace.com Download Hoster"""
+ __author_name__ = ("ACCakut, based on YourfilesTo by jeix and skydancer")
+ __author_mail__ = ("none")
+
+ def setup(self):
+ self.html = None
+ self.multiDL = True
+
+ def process(self,pyfile):
+ self.pyfile = pyfile
+ self.prepare()
+ self.download(self.get_file_url())
+
+ def prepare(self):
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+
+ wait_time = self.get_waiting_time()
+ self.setWait(wait_time)
+ self.logDebug("%s: Waiting %d seconds." % (self.__name__,wait_time))
+ self.wait()
+
+ def get_waiting_time(self):
+ if self.html is None:
+ self.download_html()
+
+ #var zzipitime = 15;
+ m = re.search(r'var zzipitime = (\d+);', self.html)
+ if m:
+ sec = int(m.group(1))
+ else:
+ sec = 0
+
+ return sec
+
+ def download_html(self):
+ url = re.sub("shareplace.com\/\?", "shareplace.com//index1.php/?a=", self.pyfile.url)
+ self.html = self.load(url, decode=True)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = re.search(r"var beer = '(.*?)';", self.html)
+ if url:
+ url = url.group(1)
+ url = urllib.unquote(url.replace("http://http:/", "").replace("vvvvvvvvv", "").replace("lllllllll", "").replace("teletubbies", ""))
+ self.logDebug("URL: %s" % url)
+ return url
+ else:
+ self.fail("absolute filepath could not be found. offline? ")
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ return re.search("<title>\s*(.*?)\s*</title>", self.html).group(1)
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+
+ if re.search(r"HTTP Status 404", self.html) is not None:
+ return False
+ else:
+ return True
+
+
+
diff --git a/pyload/plugins/hoster/ShragleCom.py b/pyload/plugins/hoster/ShragleCom.py
new file mode 100644
index 000000000..f21ad213d
--- /dev/null
+++ b/pyload/plugins/hoster/ShragleCom.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from pycurl import FOLLOWLOCATION
+
+from module.plugins.Hoster import Hoster
+from module.plugins.internal.SimpleHoster import parseHtmlForm
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.network.RequestFactory import getURL
+
+API_KEY = "078e5ca290d728fd874121030efb4a0d"
+
+def parseFileInfo(self, url):
+ file_id = re.match(self.__pattern__, url).group('ID')
+
+ data = getURL(
+ "http://www.cloudnator.com/api.php?key=%s&action=getStatus&fileID=%s" % (API_KEY, file_id),
+ decode = True
+ ).split()
+
+ if len(data) == 4:
+ name, size, md5, status = data
+ size = int(size)
+
+ if hasattr(self, "check_data"):
+ self.checkdata = {"size": size, "md5": md5}
+
+ return name, size, 2 if status == "0" else 1, url
+ else:
+ return url, 0, 1, url
+
+def getInfo(urls):
+ for url in urls:
+ file_info = parseFileInfo(ShragleCom, url)
+ yield file_info
+
+class ShragleCom(Hoster):
+ __name__ = "ShragleCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www.)?(cloudnator|shragle).com/files/(?P<ID>.*?)/"
+ __version__ = "0.21"
+ __description__ = """Cloudnator.com (Shragle.com) Download PLugin"""
+ __author_name__ = ("RaNaN", "zoidberg")
+ __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz")
+
+ def setup(self):
+ self.html = None
+ self.multiDL = False
+ self.check_data = None
+
+ def process(self, pyfile):
+ #get file status and info
+ self.pyfile.name, self.pyfile.size, status = parseFileInfo(self, pyfile.url)[:3]
+ if status != 2:
+ self.offline()
+
+ self.handleFree()
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url)
+
+ #get wait time
+ found = re.search('\s*var\sdownloadWait\s=\s(\d+);', self.html)
+ self.setWait(int(found.group(1)) if found else 30)
+
+ #parse download form
+ action, inputs = parseHtmlForm('id="download', self.html)
+
+ #solve captcha
+ found = re.search('recaptcha/api/(?:challenge|noscript)?k=(.+?)', self.html)
+ captcha_key = found.group(1) if found else "6LdEFb0SAAAAAAwM70vnYo2AkiVkCx-xmfniatHz"
+
+ recaptcha = ReCaptcha(self)
+
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key)
+ self.wait()
+
+ #validate
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.html = self.load(action, post = inputs)
+
+ found = re.search(r"Location\s*:\s*(\S*)", self.req.http.header, re.I)
+ if found:
+ self.correctCaptcha()
+ download_url = found.group(1)
+ else:
+ if "Sicherheitscode falsch" in self.html:
+ self.invalidCaptcha()
+ self.retry(max_tries = 5, reason = "Invalid captcha")
+ else:
+ self.fail("Invalid session")
+
+ #download
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.download(download_url)
+
+ check = self.checkDownload({
+ "ip_blocked": re.compile(r'<div class="error".*IP.*loading')
+ })
+ if check == "ip_blocked":
+ self.setWait(1800, True)
+ self.wait()
+ self.retry()
+
+
diff --git a/pyload/plugins/hoster/SpeedLoadOrg.py b/pyload/plugins/hoster/SpeedLoadOrg.py
new file mode 100644
index 000000000..32e7baf13
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedLoadOrg.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class SpeedLoadOrg(XFileSharingPro):
+ __name__ = "SpeedLoadOrg"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?speedload\.org/(?P<ID>\w+)"
+ __version__ = "1.01"
+ __description__ = """Speedload.org hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_NAME_PATTERN = r'Filename:</b></td><td nowrap>(?P<N>[^<]+)</td></tr>'
+ FILE_SIZE_PATTERN = r'Size:</b></td><td>[\w. ]+<small>\((?P<S>\d+) bytes\)</small>'
+
+ HOSTER_NAME = "speedload.org"
+
+ def handlePremium(self):
+ self.download(self.pyfile.url, post = self.getPostParameters())
+
+getInfo = create_getInfo(SpeedLoadOrg)
diff --git a/pyload/plugins/hoster/SpeedfileCz.py b/pyload/plugins/hoster/SpeedfileCz.py
new file mode 100644
index 000000000..bfd316dfa
--- /dev/null
+++ b/pyload/plugins/hoster/SpeedfileCz.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class SpeedfileCz(SimpleHoster):
+ __name__ = "SpeedFileCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://speedfile.cz/.*"
+ __version__ = "0.31"
+ __description__ = """speedfile.cz"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<meta property="og:title" content="(?P<N>[^"]+)" />'
+ FILE_SIZE_PATTERN = r'<strong><big>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B'
+ URL_PATTERN = r'<a id="request" class="caps" href="([^"]+)" rel="nofollow">'
+ FILE_OFFLINE_PATTERN = r'<title>Speedfile \| 404'
+ WAIT_PATTERN = r'"requestedAt":(\d+),"allowedAt":(\d+),"adUri"'
+
+ def setup(self):
+ self.multiDL = False
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ found = re.search(self.FILE_NAME_PATTERN, self.html)
+ if found is None:
+ self.fail("Parse error (NAME)")
+ pyfile.name = found.group(1)
+
+ found = re.search(self.URL_PATTERN, self.html)
+ if found is None:
+ self.fail("Parse error (URL)")
+ download_url = "http://speedfile.cz/" + found.group(1)
+
+ self.html = self.load(download_url)
+ self.logDebug(self.html)
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found is None:
+ self.fail("Parse error (WAIT)")
+ self.setWait(int(found.group(2)) - int(found.group(1)))
+ self.wait()
+
+ self.download(download_url)
+
+create_getInfo(SpeedfileCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/StreamCz.py b/pyload/plugins/hoster/StreamCz.py
new file mode 100644
index 000000000..ca1033502
--- /dev/null
+++ b/pyload/plugins/hoster/StreamCz.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+
+ html = getURL(url)
+ if re.search(StreamCz.FILE_OFFLINE_PATTERN, html):
+ # File offline
+ result.append((url, 0, 1, url))
+ else:
+ result.append((url, 0, 2, url))
+ yield result
+
+class StreamCz(Hoster):
+ __name__ = "StreamCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://www.stream.cz/[^/]+/\d+.*"
+ __version__ = "0.1"
+ __description__ = """stream.cz"""
+ __author_name__ = ("zoidberg")
+
+ FILE_OFFLINE_PATTERN = r'<h1 class="commonTitle">Str.nku nebylo mo.n. nal.zt \(404\)</h1>'
+ FILE_NAME_PATTERN = r'<link rel="video_src" href="http://www.stream.cz/\w+/(\d+)-([^"]+)" />'
+ CDN_PATTERN = r'<param name="flashvars" value="[^"]*&id=(?P<ID>\d+)(?:&cdnLQ=(?P<cdnLQ>\d*))?(?:&cdnHQ=(?P<cdnHQ>\d*))?(?:&cdnHD=(?P<cdnHD>\d*))?&'
+
+ def setup(self):
+ self.multiDL = True
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+
+ self.html = self.load(pyfile.url, decode=True)
+
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html):
+ self.offline()
+
+ found = re.search(self.CDN_PATTERN, self.html)
+ if found is None: self.fail("Parse error (CDN)")
+ cdn = found.groupdict()
+ self.logDebug(cdn)
+ for cdnkey in ("cdnHD", "cdnHQ", "cdnLQ"):
+ if cdn.has_key(cdnkey) and cdn[cdnkey] > '':
+ cdnid = cdn[cdnkey]
+ break
+ else:
+ self.fail("Stream URL not found")
+
+ found = re.search(self.FILE_NAME_PATTERN, self.html)
+ if found is None: self.fail("Parse error (NAME)")
+ pyfile.name = "%s-%s.%s.mp4" % (found.group(2), found.group(1), cdnkey[-2:])
+
+ download_url = "http://cdn-dispatcher.stream.cz/?id=" + cdnid
+ self.logInfo("STREAM (%s): %s" % (cdnkey[-2:], download_url))
+ self.download(download_url)
diff --git a/pyload/plugins/hoster/StreamcloudEu.py b/pyload/plugins/hoster/StreamcloudEu.py
new file mode 100644
index 000000000..73c0465f8
--- /dev/null
+++ b/pyload/plugins/hoster/StreamcloudEu.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+from module.network.HTTPRequest import HTTPRequest
+from time import sleep
+import re
+
+class StreamcloudEu(XFileSharingPro):
+ __name__ = "StreamcloudEu"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?streamcloud\.eu/\S+"
+ __version__ = "0.02"
+ __description__ = """Streamcloud.eu hoster plugin"""
+ __author_name__ = ("seoester")
+ __author_mail__ = ("seoester@googlemail.com")
+
+ HOSTER_NAME = "streamcloud.eu"
+ DIRECT_LINK_PATTERN = r'file: "(http://(stor|cdn)\d+\.streamcloud.eu:?\d*/.*/video\.mp4)",'
+
+ def setup(self):
+ super(StreamcloudEu, self).setup()
+ self.multiDL = True
+
+ def getDownloadLink(self):
+ found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)
+ if found:
+ return found.group(1)
+
+ for i in range(5):
+ self.logDebug("Getting download link: #%d" % i)
+ data = self.getPostParameters()
+ httpRequest = HTTPRequest(options=self.req.options)
+ httpRequest.cj = self.req.cj
+ sleep(10)
+ self.html = httpRequest.load(self.pyfile.url, post = data, referer=False, cookies=True, decode = True)
+ self.header = httpRequest.header
+
+ found = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if found:
+ break
+
+ found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)
+ if found:
+ break
+
+ else:
+ if self.errmsg and 'captcha' in self.errmsg:
+ self.fail("No valid captcha code entered")
+ else:
+ self.fail("Download link not found")
+
+ return found.group(1)
+
+ def getPostParameters(self):
+ for i in range(3):
+ if not self.errmsg: self.checkErrors()
+
+ if hasattr(self,"FORM_PATTERN"):
+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
+ else:
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+
+ if not inputs:
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry()
+ else:
+ self.parseError("Form not found")
+
+ self.logDebug(self.HOSTER_NAME, inputs)
+
+ if 'op' in inputs and inputs['op'] in ('download1', 'download2', 'download3'):
+ if "password" in inputs:
+ if self.passwords:
+ inputs['password'] = self.passwords.pop(0)
+ else:
+ self.fail("No or invalid passport")
+
+ if not self.premium:
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found:
+ wait_time = int(found.group(1)) + 1
+ self.setWait(wait_time, False)
+ else:
+ wait_time = 0
+
+ self.captcha = self.handleCaptcha(inputs)
+
+ if wait_time: self.wait()
+
+ self.errmsg = None
+ self.logDebug("getPostParameters {0}".format(i))
+ return inputs
+
+ else:
+ inputs['referer'] = self.pyfile.url
+
+ if self.premium:
+ inputs['method_premium'] = "Premium Download"
+ if 'method_free' in inputs: del inputs['method_free']
+ else:
+ inputs['method_free'] = "Free Download"
+ if 'method_premium' in inputs: del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post = inputs, ref = False)
+ self.errmsg = None
+
+ else: self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))
+
+
+getInfo = create_getInfo(StreamcloudEu)
diff --git a/pyload/plugins/hoster/TurbobitNet.py b/pyload/plugins/hoster/TurbobitNet.py
new file mode 100644
index 000000000..4e7eb81c2
--- /dev/null
+++ b/pyload/plugins/hoster/TurbobitNet.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+"""
+ Copyright (C) 2012 pyLoad team
+ Copyright (C) 2012 JD-Team support@jdownloader.org
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+import random
+from urllib import quote
+from binascii import hexlify, unhexlify
+from Crypto.Cipher import ARC4
+import time
+
+from module.network.RequestFactory import getURL
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+from pycurl import HTTPHEADER
+
+class TurbobitNet(SimpleHoster):
+ __name__ = "TurbobitNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?(turbobit.net|unextfiles.com)/(?:download/free/)?(?P<ID>\w+).*"
+ __version__ = "0.09"
+ __description__ = """Turbobit.net plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r"<span class='file-icon1[^>]*>(?P<N>[^<]+)</span>\s*\((?P<S>[^\)]+)\)\s*</h1>" #long filenames are shortened
+ FILE_NAME_PATTERN = r'<meta name="keywords" content="\s+(?P<N>[^,]+)' #full name but missing on page2
+ FILE_OFFLINE_PATTERN = r'<h2>File Not Found</h2>|html\(\'File (?:was )?not found'
+ FILE_URL_REPLACEMENTS = [(r"http://(?:\w*\.)?(turbobit.net|unextfiles.com)/(?:download/free/)?(?P<ID>\w+).*", "http://turbobit.net/\g<ID>.html")]
+ SH_COOKIES = [("turbobit.net", "user_lang", "en")]
+
+ CAPTCHA_KEY_PATTERN = r'src="http://api\.recaptcha\.net/challenge\?k=([^"]+)"'
+ DOWNLOAD_URL_PATTERN = r'(?P<url>/download/redirect/[^"\']+)'
+ LIMIT_WAIT_PATTERN = r'<div id="time-limit-text">\s*.*?<span id=\'timeout\'>(\d+)</span>'
+ CAPTCHA_SRC_PATTERN = r'<img alt="Captcha" src="(.*?)"'
+
+ def handleFree(self):
+ self.url = "http://turbobit.net/download/free/%s" % self.file_info['ID']
+ self.html = self.load(self.url)
+
+ rtUpdate = self.getRtUpdate()
+
+ self.solveCaptcha()
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.url = self.getDownloadUrl(rtUpdate)
+
+ self.wait()
+ self.html = self.load(self.url)
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With:"])
+ self.downloadFile()
+
+ def solveCaptcha(self):
+ for i in range(5):
+ found = re.search(self.LIMIT_WAIT_PATTERN, self.html)
+ if found:
+ wait_time = int(found.group(1))
+ self.setWait(wait_time, wait_time > 60)
+ self.wait()
+ self.retry()
+
+ action, inputs = self.parseHtmlForm("action='#'")
+ if not inputs: self.parseError("captcha form")
+ self.logDebug(inputs)
+
+ if inputs['captcha_type'] == 'recaptcha':
+ recaptcha = ReCaptcha(self)
+ found = re.search(self.CAPTCHA_KEY_PATTERN, self.html)
+ captcha_key = found.group(1) if found else '6LcTGLoSAAAAAHCWY9TTIrQfjUlxu6kZlTYP50_c'
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key)
+ else:
+ found = re.search(self.CAPTCHA_SRC_PATTERN, self.html)
+ if not found: self.parseError('captcha')
+ captcha_url = found.group(1)
+ inputs['captcha_response'] = self.decryptCaptcha(captcha_url)
+
+ self.logDebug(inputs)
+ self.html = self.load(self.url, post = inputs)
+
+ if not "<div class='download-timer-header'>" in self.html:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+ else: self.fail("Invalid captcha")
+
+ def getRtUpdate(self):
+ rtUpdate = self.getStorage("rtUpdate")
+ if not rtUpdate:
+ if self.getStorage("version") != self.__version__ or int(self.getStorage("timestamp", 0)) + 86400000 < timestamp():
+ # that's right, we are even using jdownloader updates
+ rtUpdate = getURL("http://update0.jdownloader.org/pluginstuff/tbupdate.js")
+ rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
+ # but we still need to fix the syntax to work with other engines than rhino
+ rtUpdate = re.sub(r'for each\(var (\w+) in(\[[^\]]+\])\)\{',r'zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];',rtUpdate)
+ rtUpdate = re.sub(r"for\((\w+)=",r"for(var \1=", rtUpdate)
+
+ self.logDebug("rtUpdate")
+ self.setStorage("rtUpdate", rtUpdate)
+ self.setStorage("timestamp", timestamp())
+ self.setStorage("version", self.__version__)
+ else:
+ self.logError("Unable to download, wait for update...")
+ self.tempOffline()
+
+ return rtUpdate
+
+ def getDownloadUrl(self, rtUpdate):
+ self.req.http.lastURL = self.url
+
+ found = re.search("(/\w+/timeout\.js\?\w+=)([^\"\'<>]+)", self.html)
+ url = "http://turbobit.net%s%s" % (found.groups() if found else ('/files/timeout.js?ver=', ''.join(random.choice('0123456789ABCDEF') for x in range(32))))
+ fun = self.load(url)
+
+ self.setWait(65, False)
+
+ for b in [1,3]:
+ self.jscode = "var id = \'%s\';var b = %d;var inn = \'%s\';%sout" % (self.file_info['ID'], b, quote(fun), rtUpdate)
+
+ try:
+ out = self.js.eval(self.jscode)
+ self.logDebug("URL", self.js.engine, out)
+ if out.startswith('/download/'):
+ return "http://turbobit.net%s" % out.strip()
+ except Exception, e:
+ self.logError(e)
+ else:
+ if self.retries >= 2:
+ # retry with updated js
+ self.delStorage("rtUpdate")
+ self.retry()
+
+ def decrypt(self, data):
+ cipher = ARC4.new(hexlify('E\x15\xa1\x9e\xa3M\xa0\xc6\xa0\x84\xb6H\x83\xa8o\xa0'))
+ return unhexlify(cipher.encrypt(unhexlify(data)))
+
+ def getLocalTimeString(self):
+ lt = time.localtime()
+ tz = time.altzone if lt.tm_isdst else time.timezone
+ return "%s GMT%+03d%02d" % (time.strftime("%a %b %d %Y %H:%M:%S", lt), -tz // 3600, tz % 3600)
+
+ def handlePremium(self):
+ self.logDebug("Premium download as user %s" % self.user)
+ self.downloadFile()
+
+ def downloadFile(self):
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if not found: self.parseError("download link")
+ self.url = "http://turbobit.net" + found.group('url')
+ self.logDebug(self.url)
+ self.download(self.url)
+
+getInfo = create_getInfo(TurbobitNet)
diff --git a/pyload/plugins/hoster/TurbouploadCom.py b/pyload/plugins/hoster/TurbouploadCom.py
new file mode 100644
index 000000000..6e81c6319
--- /dev/null
+++ b/pyload/plugins/hoster/TurbouploadCom.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.DeadHoster import DeadHoster as EasybytezCom, create_getInfo
+#from module.plugins.internal.SimpleHoster import create_getInfo
+#from module.plugins.hoster.EasybytezCom import EasybytezCom
+
+class TurbouploadCom(EasybytezCom):
+ __name__ = "TurbouploadCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?turboupload.com/(\w+).*"
+ __version__ = "0.02"
+ __description__ = """turboupload.com"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ # shares code with EasybytezCom
+
+ DIRECT_LINK_PATTERN = r'<a href="(http://turboupload.com/files/[^"]+)">\1</a>'
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, post = self.getPostParameters(), ref = True, cookies = True)
+ found = re.search(self.DIRECT_LINK_PATTERN, self.html)
+ if not found: self.parseError('Download Link')
+ url = found.group(1)
+ self.logDebug('URL: ' + url)
+ self.download(url)
+
+getInfo = create_getInfo(TurbouploadCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/TusfilesNet.py b/pyload/plugins/hoster/TusfilesNet.py
new file mode 100644
index 000000000..517df8561
--- /dev/null
+++ b/pyload/plugins/hoster/TusfilesNet.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class TusfilesNet(XFileSharingPro):
+ __name__ = "TusfilesNet"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?tusfiles\.net/\w{12}"
+ __version__ = "0.01"
+ __description__ = """Tusfiles.net hoster plugin"""
+ __author_name__ = ("stickell")
+ __author_mail__ = ("l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<li>(?P<N>[^<]+)</li>\s+<li><b>Size:</b> <small>(?P<S>[\d.]+) (?P<U>\w+)</small></li>'
+ FILE_OFFLINE_PATTERN = r'The file you were looking for could not be found'
+
+ HOSTER_NAME = "tusfiles.net"
+
+getInfo = create_getInfo(TusfilesNet)
diff --git a/pyload/plugins/hoster/TwoSharedCom.py b/pyload/plugins/hoster/TwoSharedCom.py
new file mode 100644
index 000000000..8401e0cb0
--- /dev/null
+++ b/pyload/plugins/hoster/TwoSharedCom.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+import re
+
+class TwoSharedCom(SimpleHoster):
+ __name__ = "TwoSharedCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://[\w\.]*?2shared.com/(account/)?(download|get|file|document|photo|video|audio)/.*"
+ __version__ = "0.11"
+ __description__ = """2Shared Download Hoster"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<h1>(?P<N>.*)</h1>'
+ FILE_SIZE_PATTERN = r'<span class="dtitle">File size:</span>\s*(?P<S>[0-9,.]+) (?P<U>[kKMG])i?B'
+ FILE_OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted\.'
+ DOWNLOAD_URL_PATTERN = r"window.location ='([^']+)';"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def handleFree(self):
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if not found: self.parseError('Download link')
+ link = found.group(1)
+ self.logDebug("Download URL %s" % link)
+
+ self.download(link)
+
+getInfo = create_getInfo(TwoSharedCom)
+ \ No newline at end of file
diff --git a/pyload/plugins/hoster/UlozTo.py b/pyload/plugins/hoster/UlozTo.py
new file mode 100644
index 000000000..1c3891eb1
--- /dev/null
+++ b/pyload/plugins/hoster/UlozTo.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+def convertDecimalPrefix(m):
+ # decimal prefixes used in filesize and traffic
+ return ("%%.%df" % {'k':3,'M':6,'G':9}[m.group(2)] % float(m.group(1))).replace('.','')
+
+class UlozTo(SimpleHoster):
+ __name__ = "UlozTo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w*\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj.cz|zachowajto.pl)/(?:live/)?(?P<id>\w+/[^/?]*)"
+ __version__ = "0.93"
+ __description__ = """uloz.to"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<a href="#download" class="jsShowDownload">(?P<N>[^<]+)</a>'
+ FILE_SIZE_PATTERN = r'<span id="fileSize">.*?(?P<S>[0-9.]+\s[kMG]?B)</span>'
+ FILE_INFO_PATTERN = r'<p>File <strong>(?P<N>[^<]+)</strong> is password protected</p>'
+ FILE_OFFLINE_PATTERN = r'<title>404 - Page not found</title>|<h1 class="h1">File (has been deleted|was banned)</h1>'
+ FILE_SIZE_REPLACEMENTS = [('([0-9.]+)\s([kMG])B', convertDecimalPrefix)]
+ FILE_URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "www.ulozto.net")]
+
+ PASSWD_PATTERN = r'<div class="passwordProtectedFile">'
+ VIPLINK_PATTERN = r'<a href="[^"]*\?disclaimer=1" class="linkVip">'
+ FREE_URL_PATTERN = r'<div class="freeDownloadForm"><form action="([^"]+)"'
+ PREMIUM_URL_PATTERN = r'<div class="downloadForm"><form action="([^"]+)"'
+
+ def setup(self):
+ self.multiDL = self.premium
+ self.resumeDownload = True
+
+ def process(self, pyfile):
+ pyfile.url = re.sub(r"(?<=http://)([^/]+)", "www.ulozto.net", pyfile.url)
+ self.html = self.load(pyfile.url, decode = True, cookies = True)
+
+ passwords = self.getPassword().splitlines()
+ while self.PASSWD_PATTERN in self.html:
+ if passwords:
+ password = passwords.pop(0)
+ self.logInfo("Password protected link, trying " + password)
+ self.html = self.load(pyfile.url, get = {"do": "passwordProtectedForm-submit"},
+ post={"password": password, "password_send": 'Send'}, cookies=True)
+ else:
+ self.fail("No or incorrect password")
+
+ if re.search(self.VIPLINK_PATTERN, self.html):
+ self.html = self.load(pyfile.url, get={"disclaimer": "1"})
+
+ self.file_info = self.getFileInfo()
+
+ if self.premium and self.checkTrafficLeft():
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ self.doCheckDownload()
+
+ def handleFree(self):
+ action, inputs = self.parseHtmlForm('id="frm-downloadDialog-freeDownloadForm"')
+ if not action or not inputs:
+ self.parseError("free download form")
+
+ # get and decrypt captcha
+ captcha_id_field = captcha_text_field = None
+
+ for key in inputs.keys():
+ found = re.match("captcha.*(id|text|value)", key)
+ if found:
+ if found.group(1) == "id":
+ captcha_id_field = key
+ else:
+ captcha_text_field = key
+
+ if not captcha_id_field or not captcha_text_field:
+ self.parseError("CAPTCHA form changed")
+
+ """
+ captcha_id = self.getStorage("captcha_id")
+ captcha_text = self.getStorage("captcha_text")
+
+ if not captcha_id or not captcha_text:
+ """
+ captcha_id = inputs[captcha_id_field]
+ captcha_text = self.decryptCaptcha("http://img.uloz.to/captcha/%s.png" % captcha_id)
+
+ self.logDebug(' CAPTCHA ID:' + captcha_id + ' CAPTCHA TEXT:' + captcha_text)
+
+ """
+ self.setStorage("captcha_id", captcha_id)
+ self.setStorage("captcha_text", captcha_text)
+ """
+ self.multiDL = True
+
+ inputs.update({captcha_id_field: captcha_id, captcha_text_field: captcha_text})
+
+ self.download("http://www.ulozto.net" + action, post=inputs, cookies=True, disposition=True)
+
+ def handlePremium(self):
+ self.download(self.pyfile.url + "?do=directDownload", disposition=True)
+ #parsed_url = self.findDownloadURL(premium=True)
+ #self.download(parsed_url, post={"download": "Download"})
+
+ def findDownloadURL(self, premium=False):
+ msg = "%s link" % ("Premium" if premium else "Free")
+ found = re.search(self.PREMIUM_URL_PATTERN if premium else self.FREE_URL_PATTERN, self.html)
+ if not found: self.parseError(msg)
+ parsed_url = "http://www.ulozto.net" + found.group(1)
+ self.logDebug("%s: %s" % (msg, parsed_url))
+ return parsed_url
+
+ def doCheckDownload(self):
+ check = self.checkDownload({
+ "wrong_captcha": re.compile(r'<ul class="error">\s*<li>Error rewriting the text.</li>'),
+ "offline": re.compile(self.FILE_OFFLINE_PATTERN),
+ "passwd": self.PASSWD_PATTERN,
+ "server_error": 'src="http://img.ulozto.cz/error403/vykricnik.jpg"', #paralell dl, server overload etc.
+ "not_found": "<title>UloÅŸ.to</title>"
+ })
+
+ if check == "wrong_captcha":
+ self.delStorage("captcha_id")
+ self.delStorage("captcha_text")
+ self.invalidCaptcha()
+ self.retry(reason="Wrong captcha code")
+ elif check == "offline":
+ self.offline()
+ elif check == "passwd":
+ self.fail("Wrong password")
+ elif check == "server_error":
+ self.logError("Server error, try downloading later")
+ self.multiDL = False
+ self.setWait(3600, True)
+ self.wait()
+ self.retry()
+ elif check == "not_found":
+ self.fail("Server error - file not downloadable")
+
+getInfo = create_getInfo(UlozTo)
diff --git a/pyload/plugins/hoster/UloziskoSk.py b/pyload/plugins/hoster/UloziskoSk.py
new file mode 100644
index 000000000..c607e7a5b
--- /dev/null
+++ b/pyload/plugins/hoster/UloziskoSk.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError
+
+class UloziskoSk(SimpleHoster):
+ __name__ = "UloziskoSk"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w*\.)?ulozisko.sk/.*"
+ __version__ = "0.23"
+ __description__ = """Ulozisko.sk"""
+ __author_name__ = ("zoidberg")
+
+ URL_PATTERN = r'<form name = "formular" action = "([^"]+)" method = "post">'
+ ID_PATTERN = r'<input type = "hidden" name = "id" value = "([^"]+)" />'
+ FILE_NAME_PATTERN = r'<div class="down1">(?P<N>[^<]+)</div>'
+ FILE_SIZE_PATTERN = ur'Veğkosť súboru: <strong>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</strong><br />'
+ CAPTCHA_PATTERN = r'<img src="(/obrazky/obrazky.php\?fid=[^"]+)" alt="" />'
+ FILE_OFFLINE_PATTERN = ur'<span class = "red">ZadanÜ súbor neexistuje z jedného z nasledujúcich dÎvodov:</span>'
+ IMG_PATTERN = ur'<strong>PRE ZVÄČŠENIE KLIKNITE NA OBRÁZOK</strong><br /><a href = "([^"]+)">'
+
+ def process(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+ self.getFileInfo()
+
+ found = re.search(self.IMG_PATTERN, self.html)
+ if found:
+ url = "http://ulozisko.sk" + found.group(1)
+ self.download(url)
+ else:
+ self.handleFree()
+
+ def handleFree(self):
+ found = re.search(self.URL_PATTERN, self.html)
+ if found is None: raise PluginParseError('URL')
+ parsed_url = 'http://www.ulozisko.sk' + found.group(1)
+
+ found = re.search(self.ID_PATTERN, self.html)
+ if found is None: raise PluginParseError('ID')
+ id = found.group(1)
+
+ self.logDebug('URL:' + parsed_url + ' ID:' + id)
+
+ found = re.search(self.CAPTCHA_PATTERN, self.html)
+ if found is None: raise PluginParseError('CAPTCHA')
+ captcha_url = 'http://www.ulozisko.sk' + found.group(1)
+
+ captcha = self.decryptCaptcha(captcha_url, cookies=True)
+
+ self.logDebug('CAPTCHA_URL:' + captcha_url + ' CAPTCHA:' + captcha)
+
+ self.download(parsed_url, post={
+ "antispam": captcha,
+ "id": id,
+ "name": self.pyfile.name,
+ "but": "++++STIAHNI+S%DABOR++++"
+ })
+
+getInfo = create_getInfo(UloziskoSk)
diff --git a/pyload/plugins/hoster/UnibytesCom.py b/pyload/plugins/hoster/UnibytesCom.py
new file mode 100644
index 000000000..3c8552271
--- /dev/null
+++ b/pyload/plugins/hoster/UnibytesCom.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from pycurl import FOLLOWLOCATION
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class UnibytesCom(SimpleHoster):
+ __name__ = "UnibytesCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?unibytes\.com/[a-zA-Z0-9-._ ]{11}B"
+ __version__ = "0.1"
+ __description__ = """UniBytes.com"""
+ __author_name__ = ("zoidberg")
+
+ FILE_INFO_PATTERN = r'<span[^>]*?id="fileName"[^>]*>(?P<N>[^>]+)</span>\s*\((?P<S>\d.*?)\)'
+ DOMAIN = 'http://www.unibytes.com'
+
+ WAIT_PATTERN = r'Wait for <span id="slowRest">(\d+)</span> sec'
+ DOWNLOAD_LINK_PATTERN = r'<a href="([^"]+)">Download</a>'
+
+ def handleFree(self):
+ action, post_data = self.parseHtmlForm('id="startForm"')
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+
+ for i in range(8):
+ self.logDebug(action, post_data)
+ self.html = self.load(self.DOMAIN + action, post = post_data)
+
+ found = re.search(r'location:\s*(\S+)', self.req.http.header, re.I)
+ if found:
+ url = found.group(1)
+ break
+
+ if '>Somebody else is already downloading using your IP-address<' in self.html:
+ self.setWait(600, True)
+ self.wait()
+ self.retry()
+
+ if post_data['step'] == 'last':
+ found = re.search(self.DOWNLOAD_LINK_PATTERN, self.html)
+ if found:
+ url = found.group(1)
+ self.correctCaptcha()
+ break
+ else:
+ self.invalidCaptcha()
+
+ last_step = post_data['step']
+ action, post_data = self.parseHtmlForm('id="stepForm"')
+
+ if last_step == 'timer':
+ found = re.search(self.WAIT_PATTERN, self.html)
+ self.setWait(int(found.group(1)) if found else 60, False)
+ self.wait()
+ elif last_step in ('captcha', 'last'):
+ post_data['captcha'] = self.decryptCaptcha(self.DOMAIN + '/captcha.jpg')
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.logDebug('Download link: ' + url)
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ self.download(url)
+
+getInfo = create_getInfo(UnibytesCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/UploadStationCom.py b/pyload/plugins/hoster/UploadStationCom.py
new file mode 100644
index 000000000..96dc7b577
--- /dev/null
+++ b/pyload/plugins/hoster/UploadStationCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.FileserveCom import FileserveCom, checkFile
+from module.plugins.Plugin import chunks
+
+class UploadStationCom(FileserveCom):
+ __name__ = "UploadStationCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?uploadstation\.com/file/(?P<id>[A-Za-z0-9]+)"
+ __version__ = "0.51"
+ __description__ = """UploadStation.Com File Download Hoster"""
+ __author_name__ = ("fragonib", "zoidberg")
+ __author_mail__ = ("fragonib[AT]yahoo[DOT]es", "zoidberg@mujmail.cz")
+
+ URLS = ['http://www.uploadstation.com/file/', 'http://www.uploadstation.com/check-links.php', 'http://www.uploadstation.com/checkReCaptcha.php']
+ LINKCHECK_TR = r'<div class="details (?:white|grey)">(.*?)\t{9}</div>'
+ LINKCHECK_TD = r'<div class="(?:col )?col\d">(?:<[^>]*>|&nbsp;)*([^<]*)'
+
+ LONG_WAIT_PATTERN = r'<h1>You have to wait (\d+) (\w+) to download the next file\.</h1>'
+
+def getInfo(urls):
+ for chunk in chunks(urls, 100): yield checkFile(UploadStationCom, chunk) \ No newline at end of file
diff --git a/pyload/plugins/hoster/UploadedTo.py b/pyload/plugins/hoster/UploadedTo.py
new file mode 100644
index 000000000..f38336773
--- /dev/null
+++ b/pyload/plugins/hoster/UploadedTo.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+
+# Test links (random.bin):
+# http://ul.to/044yug9o
+# http://ul.to/gzfhd0xs
+
+import re
+from time import sleep
+
+from module.utils import html_unescape, parseFileSize
+
+from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+from module.plugins.Plugin import chunks
+from module.plugins.internal.CaptchaService import ReCaptcha
+
+key = "bGhGMkllZXByd2VEZnU5Y2NXbHhYVlZ5cEE1bkEzRUw=".decode('base64')
+
+
+def getID(url):
+ """ returns id from file url"""
+ m = re.match(UploadedTo.__pattern__, url)
+ return m.group('ID')
+
+
+def getAPIData(urls):
+ post = {"apikey": key}
+
+ idMap = {}
+
+ for i, url in enumerate(urls):
+ id = getID(url)
+ post["id_%s" % i] = id
+ idMap[id] = url
+
+ for i in xrange(5):
+ api = unicode(getURL("http://uploaded.net/api/filemultiple", post=post, decode=False), 'iso-8859-1')
+ if api != "can't find request":
+ break
+ else:
+ sleep(3)
+
+ result = {}
+
+ if api:
+ for line in api.splitlines():
+ data = line.split(",", 4)
+ if data[1] in idMap:
+ result[data[1]] = (data[0], data[2], data[4], data[3], idMap[data[1]])
+
+ return result
+
+
+def parseFileInfo(self, url='', html=''):
+ if not html and hasattr(self, "html"): html = self.html
+ name, size, status, found, fileid = url, 0, 3, None, None
+
+ if re.search(self.FILE_OFFLINE_PATTERN, html):
+ # File offline
+ status = 1
+ else:
+ found = re.search(self.FILE_INFO_PATTERN, html)
+ if found:
+ name, fileid = html_unescape(found.group('N')), found.group('ID')
+ size = parseFileSize(found.group('S'))
+ status = 2
+
+ return name, size, status, fileid
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 80):
+ result = []
+
+ api = getAPIData(chunk)
+
+ for data in api.itervalues():
+ if data[0] == "online":
+ result.append((html_unescape(data[2]), data[1], 2, data[4]))
+
+ elif data[0] == "offline":
+ result.append((data[4], 0, 1, data[4]))
+
+ yield result
+
+
+class UploadedTo(Hoster):
+ __name__ = "UploadedTo"
+ __type__ = "hoster"
+ __pattern__ = r"https?://[\w\.-]*?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)"
+ __version__ = "0.71"
+ __description__ = """Uploaded.net Download Hoster"""
+ __author_name__ = ("spoob", "mkaay", "zoidberg", "netpok", "stickell")
+ __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de", "zoidberg@mujmail.cz", "netpok@gmail.com", "l.stickell@yahoo.it")
+
+ FILE_INFO_PATTERN = r'<a href="file/(?P<ID>\w+)" id="filename">(?P<N>[^<]+)</a> &nbsp;\s*<small[^>]*>(?P<S>[^<]+)</small>'
+ FILE_OFFLINE_PATTERN = r'<small class="cL">Error: 404</small>'
+ DL_LIMIT_PATTERN = "You have reached the max. number of possible free downloads for this hour"
+
+ def setup(self):
+ self.html = None
+ self.multiDL = False
+ self.resumeDownload = False
+ self.url = False
+ self.chunkLimit = 1 # critical problems with more chunks
+ if self.account:
+ self.premium = self.account.getAccountInfo(self.user)["premium"]
+ if self.premium:
+ self.multiDL = True
+ self.resumeDownload = True
+
+ self.fileID = getID(self.pyfile.url)
+ self.pyfile.url = "http://uploaded.net/file/%s" % self.fileID
+
+ def process(self, pyfile):
+ self.load("http://uploaded.net/language/en", just_header=True)
+
+ api = getAPIData([pyfile.url])
+
+ # TODO: fallback to parse from site, because api sometimes delivers wrong status codes
+
+ if not api:
+ self.logWarning("No response for API call")
+
+ self.html = unicode(self.load(pyfile.url, decode=False), 'iso-8859-1')
+ name, size, status, self.fileID = parseFileInfo(self)
+ self.logDebug(name, size, status, self.fileID)
+ if status == 1:
+ self.offline()
+ elif status == 2:
+ pyfile.name, pyfile.size = name, size
+ else:
+ self.fail('Parse error - file info')
+ elif api == 'Access denied':
+ self.fail(_("API key invalid"))
+
+ else:
+ if self.fileID not in api:
+ self.offline()
+
+ self.data = api[self.fileID]
+ if self.data[0] != "online":
+ self.offline()
+
+ pyfile.name = html_unescape(self.data[2])
+
+ # self.pyfile.name = self.get_file_name()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+
+ def handlePremium(self):
+ info = self.account.getAccountInfo(self.user, True)
+ self.logDebug("%(name)s: Use Premium Account (%(left)sGB left)" % {"name": self.__name__,
+ "left": info["trafficleft"] / 1024 / 1024})
+ if int(self.data[1]) / 1024 > info["trafficleft"]:
+ self.logInfo(_("%s: Not enough traffic left" % self.__name__))
+ self.account.empty(self.user)
+ self.resetAccount()
+ self.fail(_("Traffic exceeded"))
+
+ header = self.load("http://uploaded.net/file/%s" % self.fileID, just_header=True)
+ if "location" in header:
+ #Direct download
+ print "Direct Download: " + header['location']
+ self.download(header['location'])
+ else:
+ #Indirect download
+ self.html = self.load("http://uploaded.net/file/%s" % self.fileID)
+ found = re.search(r'<div class="tfree".*\s*<form method="post" action="(.*?)"', self.html)
+ if not found:
+ self.fail("Download URL not found. Try to enable direct downloads.")
+ url = found.group(1)
+ print "Premium URL: " + url
+ self.download(url, post={})
+
+ def handleFree(self):
+ self.html = self.load(self.pyfile.url, decode=True)
+
+ if 'var free_enabled = false;' in self.html:
+ self.logError("Free-download capacities exhausted.")
+ self.retry(24, 300)
+
+ found = re.search(r"Current waiting period: <span>(\d+)</span> seconds", self.html)
+ if not found:
+ self.fail("File not downloadable for free users")
+ self.setWait(int(found.group(1)))
+
+ js = self.load("http://uploaded.net/js/download.js", decode=True)
+
+ challengeId = re.search(r'Recaptcha\.create\("([^"]+)', js)
+
+ url = "http://uploaded.net/io/ticket/captcha/%s" % self.fileID
+ downloadURL = ""
+
+ for i in range(5):
+ #self.req.lastURL = str(self.url)
+ re_captcha = ReCaptcha(self)
+ challenge, result = re_captcha.challenge(challengeId.group(1))
+ options = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": result}
+ self.wait()
+
+ result = self.load(url, post=options)
+ self.logDebug("result: %s" % result)
+
+ if "limit-size" in result:
+ self.fail("File too big for free download")
+ elif "limit-slot" in result: # Temporary restriction so just wait a bit
+ self.setWait(30 * 60, True)
+ self.wait()
+ self.retry()
+ elif "limit-parallel" in result:
+ self.fail("Cannot download in parallel")
+ elif self.DL_LIMIT_PATTERN in result: # limit-dl
+ self.setWait(3 * 60 * 60, True)
+ self.wait()
+ self.retry()
+ elif 'err:"captcha"' in result:
+ self.logError("ul.net captcha is disabled")
+ self.invalidCaptcha()
+ elif "type:'download'" in result:
+ self.correctCaptcha()
+ downloadURL = re.search("url:'([^']+)", result).group(1)
+ break
+ else:
+ self.fail("Unknown error '%s'")
+
+ if not downloadURL:
+ self.fail("No Download url retrieved/all captcha attempts failed")
+
+ self.download(downloadURL, disposition=True)
+ check = self.checkDownload({"limit-dl": self.DL_LIMIT_PATTERN})
+ if check == "limit-dl":
+ self.setWait(3 * 60 * 60, True)
+ self.wait()
+ self.retry()
diff --git a/pyload/plugins/hoster/UploadheroCom.py b/pyload/plugins/hoster/UploadheroCom.py
new file mode 100644
index 000000000..65d6cc4e9
--- /dev/null
+++ b/pyload/plugins/hoster/UploadheroCom.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+# Test link (random.bin):
+# http://uploadhero.co/dl/wQBRAVSM
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class UploadheroCom(SimpleHoster):
+ __name__ = "UploadheroCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?uploadhero\.com?/dl/\w+"
+ __version__ = "0.14"
+ __description__ = """UploadHero.com plugin"""
+ __author_name__ = ("mcmyst", "zoidberg")
+ __author_mail__ = ("mcmyst@hotmail.fr", "zoidberg@mujmail.cz")
+
+ SH_COOKIES = [("http://uploadhero.com", "lang", "en")]
+ FILE_NAME_PATTERN = r'<div class="nom_de_fichier">(?P<N>.*?)</div>'
+ FILE_SIZE_PATTERN = r'Taille du fichier : </span><strong>(?P<S>.*?)</strong>'
+ FILE_OFFLINE_PATTERN = r'<p class="titre_dl_2">|<div class="raison"><strong>Le lien du fichier ci-dessus n\'existe plus.'
+
+ DOWNLOAD_URL_PATTERN = r'<a href="([^"]+)" id="downloadnow"'
+
+ IP_BLOCKED_PATTERN = r'href="(/lightbox_block_download.php\?min=.*?)"'
+ IP_WAIT_PATTERN = r'<span id="minutes">(\d+)</span>.*\s*<span id="seconds">(\d+)</span>'
+
+ CAPTCHA_PATTERN = r'"(/captchadl\.php\?[a-z0-9]+)"'
+ FREE_URL_PATTERN = r'var magicomfg = \'<a href="(http://[^<>"]*?)"|"(http://storage\d+\.uploadhero\.com/\?d=[A-Za-z0-9]+/[^<>"/]+)"'
+
+ def handleFree(self):
+ self.checkErrors()
+
+ found = re.search(self.CAPTCHA_PATTERN, self.html)
+ if not found: self.parseError("Captcha URL")
+ captcha_url = "http://uploadhero.com" + found.group(1)
+
+ for i in range(5):
+ captcha = self.decryptCaptcha(captcha_url)
+ self.html = self.load(self.pyfile.url, get = {"code": captcha})
+ found = re.search(self.FREE_URL_PATTERN, self.html)
+ if found:
+ self.correctCaptcha()
+ download_url = found.group(1) or found.group(2)
+ break
+ else:
+ self.invalidCaptcha()
+ else:
+ self.fail("No valid captcha code entered")
+
+ self.download(download_url)
+
+ def handlePremium(self):
+ self.logDebug("%s: Use Premium Account" % self.__name__)
+ self.html = self.load(self.pyfile.url)
+ link = re.search(self.DOWNLOAD_URL_PATTERN, self.html).group(1)
+ self.logDebug("Downloading link : '%s'" % link)
+ self.download(link)
+
+ def checkErrors(self):
+ found = re.search(self.IP_BLOCKED_PATTERN, self.html)
+ if found:
+ self.html = self.load("http://uploadhero.com%s" % found.group(1))
+
+ found = re.search(self.IP_WAIT_PATTERN, self.html)
+ wait_time = (int(found.group(1)) * 60 + int(found.group(2))) if found else 300
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry()
+
+getInfo = create_getInfo(UploadheroCom)
diff --git a/pyload/plugins/hoster/UploadingCom.py b/pyload/plugins/hoster/UploadingCom.py
new file mode 100644
index 000000000..a98c3bf71
--- /dev/null
+++ b/pyload/plugins/hoster/UploadingCom.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: jeix
+"""
+
+import re
+from pycurl import HTTPHEADER
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+from module.common.json_layer import json_loads
+
+class UploadingCom(SimpleHoster):
+ __name__ = "UploadingCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?uploading\.com/files/(?:get/)?(?P<ID>[\w\d]+)"
+ __version__ = "0.33"
+ __description__ = """Uploading.Com File Download Hoster"""
+ __author_name__ = ("jeix", "mkaay", "zoidberg")
+ __author_mail__ = ("jeix@hasnomail.de", "mkaay@mkaay.de", "zoidberg@mujmail.cz")
+
+ FILE_NAME_PATTERN = r'<title>Download (?P<N>.*?) for free on uploading.com</title>'
+ FILE_SIZE_PATTERN = r'<span>File size: (?P<S>.*?)</span>'
+ FILE_OFFLINE_PATTERN = r'<h2.*?>The requested file is not found</h2>'
+
+ def process(self, pyfile):
+ # set lang to english
+ self.req.cj.setCookie("uploading.com", "lang", "1")
+ self.req.cj.setCookie("uploading.com", "language", "1")
+ self.req.cj.setCookie("uploading.com", "setlang", "en")
+ self.req.cj.setCookie("uploading.com", "_lang", "en")
+
+ if not "/get/" in self.pyfile.url:
+ self.pyfile.url = self.pyfile.url.replace("/files", "/files/get")
+
+ self.html = self.load(pyfile.url, decode = True)
+ self.file_info = self.getFileInfo()
+
+ if self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def handlePremium(self):
+ postData = {'action': 'get_link',
+ 'code': self.file_info['ID'],
+ 'pass': 'undefined'}
+
+ self.html = self.load('http://uploading.com/files/get/?JsHttpRequest=%d-xml' % timestamp(), post=postData)
+ url = re.search(r'"link"\s*:\s*"(.*?)"', self.html)
+ if url:
+ url = url.group(1).replace("\\/", "/")
+ self.download(url)
+
+ raise Exception("Plugin defect.")
+
+ def handleFree(self):
+ found = re.search('<h2>((Daily )?Download Limit)</h2>', self.html)
+ if found:
+ self.pyfile.error = found.group(1)
+ self.logWarning(self.pyfile.error)
+ self.retry(max_tries=6, wait_time = 21600 if found.group(2) else 900, reason = self.pyfile.error)
+
+ ajax_url = "http://uploading.com/files/get/?ajax"
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.req.http.lastURL = self.pyfile.url
+
+ response = json_loads(self.load(ajax_url, post = {'action': 'second_page', 'code': self.file_info['ID']}))
+ if 'answer' in response and 'wait_time' in response['answer']:
+ wait_time = int(response['answer']['wait_time'])
+ self.logInfo("%s: Waiting %d seconds." % (self.__name__, wait_time))
+ self.setWait(wait_time)
+ self.wait()
+ else:
+ self.pluginParseError("AJAX/WAIT")
+
+ response = json_loads(self.load(ajax_url, post = {'action': 'get_link', 'code': self.file_info['ID'], 'pass': 'false'}))
+ if 'answer' in response and 'link' in response['answer']:
+ url = response['answer']['link']
+ else:
+ self.pluginParseError("AJAX/URL")
+
+ self.html = self.load(url)
+ found = re.search(r'<form id="file_form" action="(.*?)"', self.html)
+ if found:
+ url = found.group(1)
+ else:
+ self.pluginParseError("URL")
+
+ self.download(url)
+
+ check = self.checkDownload({"html" : re.compile("\A<!DOCTYPE html PUBLIC")})
+ if check == "html":
+ self.logWarning("Redirected to a HTML page, wait 10 minutes and retry")
+ self.setWait(600, True)
+ self.wait()
+
+getInfo = create_getInfo(UploadingCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/UptoboxCom.py b/pyload/plugins/hoster/UptoboxCom.py
new file mode 100644
index 000000000..60a93c1e5
--- /dev/null
+++ b/pyload/plugins/hoster/UptoboxCom.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+
+class UptoboxCom(XFileSharingPro):
+ __name__ = "UptoboxCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)*?uptobox.com/\w{12}"
+ __version__ = "0.06"
+ __description__ = """Uptobox.com hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r'<h2>\s*Download File\s*<span[^>]*>(?P<N>[^>]+)</span></h2>\s*[^\(]*\((?P<S>[^\)]+)\)</h2>'
+ FILE_OFFLINE_PATTERN = r'<center>File Not Found</center>'
+ HOSTER_NAME = "uptobox.com"
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = self.premium
+ self.chunkLimit = 1
+
+getInfo = create_getInfo(UptoboxCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/VeehdCom.py b/pyload/plugins/hoster/VeehdCom.py
new file mode 100644
index 000000000..4486eb84a
--- /dev/null
+++ b/pyload/plugins/hoster/VeehdCom.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import re
+from module.plugins.Hoster import Hoster
+
+class VeehdCom(Hoster):
+ __name__ = 'VeehdCom'
+ __type__ = 'hoster'
+ __pattern__ = r'http://veehd\.com/video/\d+_\S+'
+ __config__ = [
+ ('filename_spaces', 'bool', "Allow spaces in filename", 'False'),
+ ('replacement_char', 'str', "Filename replacement character", '_'),
+ ]
+ __version__ = '0.22'
+ __description__ = """Veehd.com Download Hoster"""
+ __author_name__ = ('cat')
+ __author_mail__ = ('cat@pyload')
+
+ def _debug(self, msg):
+ self.logDebug('[%s] %s' % (self.__name__, msg))
+
+ def setup(self):
+ self.html = None
+ self.multiDL = True
+ self.req.canContinue = True
+
+ def process(self, pyfile):
+ self.download_html()
+ if not self.file_exists():
+ self.offline()
+
+ pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self._debug("Requesting page: %s" % (repr(url),))
+ self.html = self.load(url)
+
+ def file_exists(self):
+ if self.html is None:
+ self.download_html()
+
+ if '<title>Veehd</title>' in self.html:
+ return False
+ return True
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ match = re.search(r'<title[^>]*>([^<]+) on Veehd</title>', self.html)
+ if not match:
+ self.fail("video title not found")
+ name = match.group(1)
+
+ # replace unwanted characters in filename
+ if self.getConfig('filename_spaces'):
+ pattern = '[^0-9A-Za-z\.\ ]+'
+ else:
+ pattern = '[^0-9A-Za-z\.]+'
+
+ name = re.sub(pattern, self.getConfig('replacement_char'),
+ name)
+ return name + '.avi'
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+
+ match = re.search(r'<embed type="video/divx" '
+ r'src="(http://([^/]*\.)?veehd\.com/dl/[^"]+)"',
+ self.html)
+ if not match:
+ self.fail("embedded video url not found")
+ file_url = match.group(1)
+
+ return file_url
diff --git a/pyload/plugins/hoster/WarserverCz.py b/pyload/plugins/hoster/WarserverCz.py
new file mode 100644
index 000000000..b256f8d1b
--- /dev/null
+++ b/pyload/plugins/hoster/WarserverCz.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+#similar to coolshare.cz (down)
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.network.HTTPRequest import BadHeader
+from module.utils import html_unescape
+
+class WarserverCz(SimpleHoster):
+ __name__ = "WarserverCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w*\.)?warserver.cz/stahnout/(?P<ID>\d+)/.+"
+ __version__ = "0.12"
+ __description__ = """Warserver.cz"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<h1.*?>(?P<N>[^<]+)</h1>'
+ FILE_SIZE_PATTERN = r'<li>Velikost: <strong>(?P<S>[^<]+)</strong>'
+ FILE_OFFLINE_PATTERN = r'<h1>Soubor nenalezen</h1>'
+
+ PREMIUM_URL_PATTERN = r'href="(http://[^/]+/dwn-premium.php.*?)"'
+ DOMAIN = "http://csd01.coolshare.cz"
+
+ DOMAIN = "http://s01.warserver.cz"
+
+ def handleFree(self):
+ try:
+ self.download("%s/dwn-free.php?fid=%s" % (self.DOMAIN, self.file_info['ID']))
+ except BadHeader, e:
+ self.logError(e)
+ if e.code == 403:
+ self.longWait(60,60)
+ else: raise
+ self.checkDownloadedFile()
+
+ def handlePremium(self):
+ found = re.search(self.PREMIUM_URL_PATTERN, self.html)
+ if not found: self.parseError("Premium URL")
+ url = html_unescape(found.group(1))
+ self.logDebug("Premium URL: " + url)
+ if not url.startswith("http://"): self.resetAccount()
+ self.download(url)
+ self.checkDownloadedFile()
+
+ def checkDownloadedFile(self):
+ check = self.checkDownload({
+ "offline": ">404 Not Found<"
+ })
+
+ if check == "offline":
+ self.offline()
+
+getInfo = create_getInfo(WarserverCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/WebshareCz.py b/pyload/plugins/hoster/WebshareCz.py
new file mode 100644
index 000000000..195e65a93
--- /dev/null
+++ b/pyload/plugins/hoster/WebshareCz.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.network.HTTPRequest import BadHeader
+
+class WebshareCz(SimpleHoster):
+ __name__ = "WebshareCz"
+ __type__ = "hoster"
+ __pattern__ = r"http://(\w+\.)?webshare.cz/(stahnout/)?(?P<ID>\w{10})-.+"
+ __version__ = "0.12"
+ __description__ = """WebShare.cz"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r'<h3>Stahujete soubor: </h3>\s*<div class="textbox">(?P<N>[^<]+)</div>'
+ FILE_SIZE_PATTERN = r'<h3>Velikost souboru je: </h3>\s*<div class="textbox">(?P<S>[^<]+)</div>'
+ FILE_OFFLINE_PATTERN = r'<h3>Soubor ".*?" nebyl nalezen.</h3>'
+
+ DOWNLOAD_LINK_PATTERN = r'id="download_link" href="(?P<url>.*?)"'
+
+ def setup(self):
+ self.multiDL = True
+
+ def handleFree(self):
+ url_a = re.search(r"(var l.*)", self.html).group(1)
+ url_b = re.search(r"(var keyStr.*)", self.html).group(1)
+ url = self.js.eval("%s\n%s\ndec(l)" % (url_a, url_b))
+
+ self.logDebug('Download link: ' + url)
+ self.download(url)
+
+getInfo = create_getInfo(WebshareCz) \ No newline at end of file
diff --git a/pyload/plugins/hoster/WrzucTo.py b/pyload/plugins/hoster/WrzucTo.py
new file mode 100644
index 000000000..4a5e89f22
--- /dev/null
+++ b/pyload/plugins/hoster/WrzucTo.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from pycurl import HTTPHEADER
+
+class WrzucTo(SimpleHoster):
+ __name__ = "WrzucTo"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:\w+\.)*?wrzuc\.to/([a-zA-Z0-9]+(\.wt|\.html)|(\w+/?linki/[a-zA-Z0-9]+))"
+ __version__ = "0.01"
+ __description__ = """Wrzuc.to plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ SH_COOKIES = [("http://www.wrzuc.to", "language", "en")]
+ FILE_SIZE_PATTERN = r'class="info">\s*<tr>\s*<td>(?P<S>.*?)</td>'
+ FILE_NAME_PATTERN = r'id="file_info">\s*<strong>(?P<N>.*?)</strong>'
+
+ def setup(self):
+ self.multiDL = True
+
+ def handleFree(self):
+ data = dict(re.findall(r'(md5|file): "(.*?)"', self.html))
+ if len(data) != 2: self.parseError('File ID')
+
+ self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
+ self.req.http.lastURL = self.pyfile.url
+ self.load("http://www.wrzuc.to/ajax/server/prepair", post = {"md5": data['md5']})
+
+ self.req.http.lastURL = self.pyfile.url
+ self.html = self.load("http://www.wrzuc.to/ajax/server/download_link", post = {"file": data['file']})
+
+ data.update(re.findall(r'"(download_link|server_id)":"(.*?)"', self.html))
+ if len(data) != 4: self.parseError('Download URL')
+
+ download_url = "http://%s.wrzuc.to/pobierz/%s" % (data['server_id'], data['download_link'])
+ self.logDebug("Download URL: %s" % download_url)
+ self.download(download_url)
+
+getInfo = create_getInfo(WrzucTo)
+
diff --git a/pyload/plugins/hoster/WuploadCom.py b/pyload/plugins/hoster/WuploadCom.py
new file mode 100644
index 000000000..3dab1b1bb
--- /dev/null
+++ b/pyload/plugins/hoster/WuploadCom.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import string
+from urllib import unquote
+
+from module.plugins.Hoster import Hoster
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.plugins.Plugin import chunks
+
+from module.network.RequestFactory import getURL
+from module.common.json_layer import json_loads
+
+
+def getInfo(urls):
+ for chunk in chunks(urls, 20):
+ result = []
+ ids = dict()
+ for url in chunk:
+ id = getId(url)
+ if id:
+ ids[id] = url
+ else:
+ result.append((None, 0, 1, url))
+
+ if len(ids) > 0:
+ check_url = "http://api.wupload.com/link?method=getInfo&format=json&ids=" + ",".join(ids.keys())
+ response = json_loads(getURL(check_url).decode("utf8", "ignore"))
+ for item in response["FSApi_Link"]["getInfo"]["response"]["links"]:
+ if item["status"] != "AVAILABLE":
+ result.append((None, 0, 1, ids[str(item["id"])]))
+ else:
+ result.append((unquote(item["filename"]), item["size"], 2, ids[str(item["id"])]))
+ yield result
+
+
+def getId(url):
+ match = re.search(WuploadCom.FILE_ID_PATTERN, url)
+ if match:
+ return string.replace(match.group("id"), "/", "-")
+ else:
+ return None
+
+
+class WuploadCom(Hoster):
+ __name__ = "WuploadCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://[\w\.]*?wupload\..*?/file/(([a-z][0-9]+/)?[0-9]+)(/.*)?"
+ __version__ = "0.21"
+ __description__ = """Wupload com"""
+ __author_name__ = ("jeix", "paulking")
+ __author_mail__ = ("jeix@hasnomail.de", "")
+
+ API_ADDRESS = "http://api.wupload.com"
+ URL_DOMAIN_PATTERN = r'(?P<prefix>.*?)(?P<domain>.wupload\..+?)(?P<suffix>/.*)'
+ FILE_ID_PATTERN = r'/file/(?P<id>([a-z][0-9]+/)?[0-9]+)(/.*)?'
+ FILE_LINK_PATTERN = r'<p><a href="(http://.+?\.wupload\..+?)"><span>Download Now'
+ WAIT_TIME_PATTERN = r'countDownDelay = (?P<wait>\d+)'
+ WAIT_TM_PATTERN = r"name='tm' value='(.*?)' />"
+ WAIT_TM_HASH_PATTERN = r"name='tm_hash' value='(.*?)' />"
+ CAPTCHA_TYPE1_PATTERN = r'Recaptcha.create\("(.*?)",'
+ CAPTCHA_TYPE2_PATTERN = r'id="recaptcha_image"><img style="display: block;" src="(.+)image?c=(.+?)"'
+
+ def init(self):
+ if self.account:
+ self.premium = self.account.getAccountInfo(self.user)["premium"]
+ if not self.premium:
+ self.chunkLimit = 1
+ self.multiDL = False
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ self.pyfile.url = self.checkFile(self.pyfile.url)
+
+ if self.premium:
+ self.downloadPremium()
+ else:
+ self.downloadFree()
+
+ def checkFile(self, url):
+ id = getId(url)
+ self.logDebug("file id is %s" % id)
+ if id:
+ # Use the api to check the current status of the file and fixup data
+ check_url = self.API_ADDRESS + "/link?method=getInfo&format=json&ids=%s" % id
+ result = json_loads(self.load(check_url, decode=True))
+ item = result["FSApi_Link"]["getInfo"]["response"]["links"][0]
+ self.logDebug("api check returns %s" % item)
+
+ if item["status"] != "AVAILABLE":
+ self.offline()
+ if item["is_password_protected"] != 0:
+ self.fail("This file is password protected")
+
+ # ignored this check due to false api information
+ #if item["is_premium_only"] != 0 and not self.premium:
+ # self.fail("need premium account for file")
+
+ self.pyfile.name = unquote(item["filename"])
+
+ # Fix the url and resolve the domain to the correct regional variation
+ url = item["url"]
+ urlparts = re.search(self.URL_DOMAIN_PATTERN, url)
+ if urlparts:
+ url = urlparts.group("prefix") + self.getDomain() + urlparts.group("suffix")
+ self.logDebug("localised url is %s" % url)
+ return url
+ else:
+ self.fail("Invalid URL")
+
+ def getDomain(self):
+ result = json_loads(
+ self.load(self.API_ADDRESS + "/utility?method=getWuploadDomainForCurrentIp&format=json", decode=True))
+ self.logDebug("%s: response to get domain %s" % (self.__name__, result))
+ return result["FSApi_Utility"]["getWuploadDomainForCurrentIp"]["response"]
+
+ def downloadPremium(self):
+ self.logDebug("Premium download")
+
+ api = self.API_ADDRESS + "/link?method=getDownloadLink&u=%%s&p=%%s&ids=%s" % getId(self.pyfile.url)
+
+ result = json_loads(self.load(api % (self.user, self.account.getAccountData(self.user)["password"])))
+ links = result["FSApi_Link"]["getDownloadLink"]["response"]["links"]
+
+ #wupload seems to return list and no dicts
+ if type(links) == dict:
+ info = links.values()[0]
+ else:
+ info = links[0]
+
+ if "status" in info and info["status"] == "NOT_AVAILABLE":
+ self.tempOffline()
+
+ self.download(info["url"])
+
+ def downloadFree(self):
+ self.logDebug("Free download")
+ # Get initial page
+ self.html = self.load(self.pyfile.url)
+ url = self.pyfile.url + "?start=1"
+ self.html = self.load(url)
+ self.handleErrors()
+
+ finalUrl = re.search(self.FILE_LINK_PATTERN, self.html)
+
+ if not finalUrl:
+ self.doWait(url)
+
+ chall = re.search(self.CAPTCHA_TYPE1_PATTERN, self.html)
+ chall2 = re.search(self.CAPTCHA_TYPE2_PATTERN, self.html)
+ if chall or chall2:
+ for i in range(5):
+ re_captcha = ReCaptcha(self)
+ if chall:
+ self.logDebug("Captcha type1")
+ challenge, result = re_captcha.challenge(chall.group(1))
+ else:
+ self.logDebug("Captcha type2")
+ server = chall2.group(1)
+ challenge = chall2.group(2)
+ result = re_captcha.result(server, challenge)
+
+ postData = {"recaptcha_challenge_field": challenge,
+ "recaptcha_response_field": result}
+
+ self.html = self.load(url, post=postData)
+ self.handleErrors()
+ chall = re.search(self.CAPTCHA_TYPE1_PATTERN, self.html)
+ chall2 = re.search(self.CAPTCHA_TYPE2_PATTERN, self.html)
+
+ if chall or chall2:
+ self.invalidCaptcha()
+ else:
+ self.correctCaptcha()
+ break
+
+ finalUrl = re.search(self.FILE_LINK_PATTERN, self.html)
+
+ if not finalUrl:
+ self.fail("Couldn't find free download link")
+
+ self.logDebug("got download url %s" % finalUrl.group(1))
+ self.download(finalUrl.group(1))
+
+ def doWait(self, url):
+ # If the current page requires us to wait then wait and move to the next page as required
+
+ # There maybe more than one wait period. The extended wait if download limits have been exceeded (in which case we try reconnect)
+ # and the short wait before every download. Visually these are the same, the difference is that one includes a code to allow
+ # progress to the next page
+
+ waitSearch = re.search(self.WAIT_TIME_PATTERN, self.html)
+ while waitSearch:
+ wait = int(waitSearch.group("wait"))
+ if wait > 300:
+ self.wantReconnect = True
+
+ self.setWait(wait)
+ self.logDebug("Waiting %d seconds." % wait)
+ self.wait()
+
+ tm = re.search(self.WAIT_TM_PATTERN, self.html)
+ tm_hash = re.search(self.WAIT_TM_HASH_PATTERN, self.html)
+
+ if tm and tm_hash:
+ tm = tm.group(1)
+ tm_hash = tm_hash.group(1)
+ self.html = self.load(url, post={"tm": tm, "tm_hash": tm_hash})
+ self.handleErrors()
+ break
+ else:
+ self.html = self.load(url)
+ self.handleErrors()
+ waitSearch = re.search(self.WAIT_TIME_PATTERN, self.html)
+
+ def handleErrors(self):
+ if "This file is available for premium users only." in self.html:
+ self.fail("need premium account for file")
+
+ if "The file that you're trying to download is larger than" in self.html:
+ self.fail("need premium account for file")
+
+ if "Free users may only download 1 file at a time" in self.html:
+ self.fail("only 1 file at a time for free users")
+
+ if "Free user can not download files" in self.html:
+ self.fail("need premium account for file")
+
+ if "Download session in progress" in self.html:
+ self.fail("already downloading")
+
+ if "This file is password protected" in self.html:
+ self.fail("This file is password protected")
+
+ if "An Error Occurred" in self.html:
+ self.fail("A server error occured.")
+
+ if "This file was deleted" in self.html:
+ self.offline()
diff --git a/pyload/plugins/hoster/X7To.py b/pyload/plugins/hoster/X7To.py
new file mode 100644
index 000000000..965d84543
--- /dev/null
+++ b/pyload/plugins/hoster/X7To.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+import re
+
+from module.plugins.Hoster import Hoster
+
+from module.network.RequestFactory import getURL
+
+def getInfo(urls):
+ yield [(url, 0, 1, url) for url in urls]
+
+
+class X7To(Hoster):
+ __name__ = "X7To"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www.)?x7.to/"
+ __version__ = "0.3"
+ __description__ = """X7.To File Download Hoster"""
+ __author_name__ = ("ernieb")
+ __author_mail__ = ("ernieb")
+
+ FILE_INFO_PATTERN=r'<meta name="description" content="Download: (.*?) \(([0-9,.]+) (KB|MB|GB)\)'
+
+ def init(self):
+ if self.premium:
+ self.multiDL = False
+ self.resumeDownload = False
+ self.chunkLimit = 1
+ else:
+ self.multiDL = False
+
+ self.file_id = re.search(r"http://x7.to/([a-zA-Z0-9]+)", self.pyfile.url).group(1)
+ self.logDebug("file id is %s" % self.file_id)
+ self.pyfile.url = "http://x7.to/" + self.file_id
+
+ def process(self, pyfile):
+ self.fail("Hoster not longer available")
+
+ def handlePremium(self):
+ # check if over limit first
+ overLimit = re.search(r'<a onClick="cUser.buyTraffic\(\)" id="DL">', self.html)
+ if overLimit:
+ self.logDebug("over limit, falling back to free")
+ self.handleFree()
+ else:
+ realurl = re.search(r'<a href="(http://stor.*?)" id="DL">', self.html)
+ if realurl:
+ realurl = realurl.group(1)
+ self.logDebug("premium url found %s" % realurl)
+ else:
+ self.logDebug("premium link not found")
+ self.download(realurl)
+
+ def handleFree(self):
+ # find file id
+ file_id = re.search(r"var dlID = '(.*?)'", self.html)
+ if not file_id:
+ self.fail("Free download id not found")
+
+ file_url = "http://x7.to/james/ticket/dl/" + file_id.group(1)
+ self.logDebug("download id %s" % file_id.group(1))
+
+ self.html = self.load(file_url, ref=False, decode=True)
+
+ # deal with errors
+ if "limit-dl" in self.html:
+ self.logDebug("Limit reached ... waiting")
+ self.setWait(900,True)
+ self.wait()
+ self.retry()
+
+ if "limit-parallel" in self.html:
+ self.fail("Cannot download in parallel")
+
+ # no waiting required, go to download
+ waitCheck = re.search(r"wait:(\d*),", self.html)
+ if waitCheck:
+ waitCheck = int(waitCheck.group(1))
+ self.setWait(waitCheck)
+ self.wait()
+
+ urlCheck = re.search(r"url:'(.*?)'", self.html)
+ url = None
+ if urlCheck:
+ url = urlCheck.group(1)
+ self.logDebug("free url found %s" % url)
+
+ if url:
+ try:
+ self.download(url)
+ except:
+ self.logDebug("downloading url failed: %s" % url)
+ else:
+ self.fail("Free download url found")
diff --git a/pyload/plugins/hoster/XFileSharingPro.py b/pyload/plugins/hoster/XFileSharingPro.py
new file mode 100644
index 000000000..d5a32dd16
--- /dev/null
+++ b/pyload/plugins/hoster/XFileSharingPro.py
@@ -0,0 +1,318 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from random import random
+from urllib import unquote
+from urlparse import urlparse
+from pycurl import FOLLOWLOCATION, LOW_SPEED_TIME
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError
+from module.plugins.internal.CaptchaService import ReCaptcha, SolveMedia
+from module.utils import html_unescape
+
+class XFileSharingPro(SimpleHoster):
+ """
+ Common base for XFileSharingPro hosters like EasybytezCom, CramitIn, FiledinoCom...
+ Some hosters may work straight away when added to __pattern__
+ However, most of them will NOT work because they are either down or running a customized version
+ """
+ __name__ = "XFileSharingPro"
+ __type__ = "hoster"
+ __pattern__ = r"^unmatchable$"
+ __version__ = "0.18"
+ __description__ = """XFileSharingPro common hoster base"""
+ __author_name__ = ("zoidberg", "stickell")
+ __author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+
+ FILE_NAME_PATTERN = r'<input type="hidden" name="fname" value="(?P<N>[^"]+)"'
+ FILE_SIZE_PATTERN = r'You have requested <font color="red">[^<]+</font> \((?P<S>[^<]+)\)</font>'
+ FILE_INFO_PATTERN = r'<tr><td align=right><b>Filename:</b></td><td nowrap>(?P<N>[^<]+)</td></tr>\s*.*?<small>\((?P<S>[^<]+)\)</small>'
+ FILE_OFFLINE_PATTERN = r'<(b|h[1-6])>File Not Found</(b|h[1-6])>'
+
+ WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>'
+ LONG_WAIT_PATTERN = r'(?P<H>\d+(?=\s*hour))?.*?(?P<M>\d+(?=\s*minute))?.*?(?P<S>\d+(?=\s*second))?'
+ OVR_DOWNLOAD_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
+ OVR_KILL_LINK_PATTERN = r'<h2>Delete Link</h2>\s*<textarea[^>]*>([^<]+)'
+ CAPTCHA_URL_PATTERN = r'(http://[^"\']+?/captchas?/[^"\']+)'
+ RECAPTCHA_URL_PATTERN = r'http://[^"\']+?recaptcha[^"\']+?\?k=([^"\']+)"'
+ CAPTCHA_DIV_PATTERN = r'<b>Enter code.*?<div.*?>(.*?)</div>'
+ SOLVEMEDIA_PATTERN = r'http:\/\/api\.solvemedia\.com\/papi\/challenge\.script\?k=(.*?)"'
+ ERROR_PATTERN = r'class=["\']err["\'][^>]*>(.*?)</'
+
+ def setup(self):
+ if self.__name__ == "XFileSharingPro":
+ self.__pattern__ = self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
+ self.multiDL = True
+ else:
+ self.resumeDownload = self.multiDL = self.premium
+
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ self.prepare()
+
+ if not re.match(self.__pattern__, self.pyfile.url):
+ if self.premium:
+ self.handleOverriden()
+ else:
+ self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
+ else:
+ try:
+ self.html = self.load(pyfile.url, cookies = False, decode = True)
+ self.file_info = self.getFileInfo()
+ except PluginParseError:
+ self.file_info = None
+
+ self.location = self.getDirectDownloadLink()
+
+ if not self.file_info:
+ pyfile.name = html_unescape(unquote(urlparse(self.location if self.location else pyfile.url).path.split("/")[-1]))
+
+ if self.location:
+ self.startDownload(self.location)
+ elif self.premium:
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def prepare(self):
+ """ Initialize important variables """
+ if not hasattr(self, "HOSTER_NAME"):
+ self.HOSTER_NAME = re.search(self.__pattern__, self.pyfile.url).group(1)
+ if not hasattr(self, "DIRECT_LINK_PATTERN"):
+ self.DIRECT_LINK_PATTERN = r'(http://([^/]*?%s|\d+\.\d+\.\d+\.\d+)(:\d+/d/|/files/\d+/\w+/)[^"\'<]+)' % self.HOSTER_NAME
+
+ self.captcha = self.errmsg = None
+ self.passwords = self.getPassword().splitlines()
+
+ def getDirectDownloadLink(self):
+ """ Get download link for premium users with direct download enabled """
+ self.req.http.lastURL = self.pyfile.url
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.html = self.load(self.pyfile.url, cookies = True, decode = True)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ location = None
+ found = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if found and re.match(self.DIRECT_LINK_PATTERN, found.group(1)):
+ location = found.group(1).strip()
+
+ return location
+
+ def handleFree(self):
+ url = self.getDownloadLink()
+ self.logDebug("Download URL: %s" % url)
+ self.startDownload(url)
+
+ def getDownloadLink(self):
+ for i in range(5):
+ self.logDebug("Getting download link: #%d" % i)
+ data = self.getPostParameters()
+
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
+ self.html = self.load(self.pyfile.url, post = data, ref = True, decode = True)
+ self.header = self.req.http.header
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+
+ found = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if found:
+ break
+
+ found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)
+ if found:
+ break
+
+ else:
+ if self.errmsg and 'captcha' in self.errmsg:
+ self.fail("No valid captcha code entered")
+ else:
+ self.fail("Download link not found")
+
+ return found.group(1)
+
+ def handlePremium(self):
+ self.html = self.load(self.pyfile.url, post = self.getPostParameters())
+ found = re.search(self.DIRECT_LINK_PATTERN, self.html)
+ if not found:
+ self.parseError('DIRECT LINK')
+ self.startDownload(found.group(1))
+
+ def handleOverriden(self):
+ #only tested with easybytez.com
+ self.html = self.load("http://www.%s/" % self.HOSTER_NAME)
+ action, inputs = self.parseHtmlForm('')
+ upload_id = "%012d" % int(random()*10**12)
+ action += upload_id + "&js_on=1&utype=prem&upload_type=url"
+ inputs['tos'] = '1'
+ inputs['url_mass'] = self.pyfile.url
+ inputs['up1oad_type'] = 'url'
+
+ self.logDebug(self.HOSTER_NAME, action, inputs)
+ #wait for file to upload to easybytez.com
+ self.req.http.c.setopt(LOW_SPEED_TIME, 600)
+ self.html = self.load(action, post = inputs)
+
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs: self.parseError('TEXTAREA')
+ self.logDebug(self.HOSTER_NAME, inputs)
+ if inputs['st'] == 'OK':
+ self.html = self.load(action, post = inputs)
+ elif inputs['st'] == 'Can not leech file':
+ self.retry(max_tries=20, wait_time=180, reason=inputs['st'])
+ else:
+ self.fail(inputs['st'])
+
+ #get easybytez.com link for uploaded file
+ found = re.search(self.OVR_DOWNLOAD_LINK_PATTERN, self.html)
+ if not found: self.parseError('DIRECT LINK (OVR)')
+ self.pyfile.url = found.group(1)
+ header = self.load(self.pyfile.url, just_header=True)
+ if 'location' in header: # Direct link
+ self.startDownload(self.pyfile.url)
+ else:
+ self.retry()
+
+ def startDownload(self, link):
+ link = link.strip()
+ if self.captcha: self.correctCaptcha()
+ self.logDebug('DIRECT LINK: %s' % link)
+ self.download(link, disposition=True)
+
+ def checkErrors(self):
+ found = re.search(self.ERROR_PATTERN, self.html)
+ if found:
+ self.errmsg = found.group(1)
+ self.logWarning(re.sub(r"<.*?>"," ",self.errmsg))
+
+ if 'wait' in self.errmsg:
+ wait_time = sum([int(v) * {"hour": 3600, "minute": 60, "second": 1}[u] for v, u in re.findall('(\d+)\s*(hour|minute|second)?', self.errmsg)])
+ self.setWait(wait_time, True)
+ self.wait()
+ elif 'captcha' in self.errmsg:
+ self.invalidCaptcha()
+ elif 'premium' in self.errmsg and 'require' in self.errmsg:
+ self.fail("File can be downloaded by premium users only")
+ elif 'limit' in self.errmsg:
+ self.setWait(3600, True)
+ self.wait()
+ self.retry(25)
+ elif 'countdown' in self.errmsg or 'Expired session' in self.errmsg:
+ self.retry(3)
+ elif 'maintenance' in self.errmsg:
+ self.tempOffline()
+ elif 'download files up to' in self.errmsg:
+ self.fail("File too large for free download")
+ else:
+ self.fail(self.errmsg)
+
+ else:
+ self.errmsg = None
+
+ return self.errmsg
+
+ def getPostParameters(self):
+ for i in range(3):
+ if not self.errmsg: self.checkErrors()
+
+ if hasattr(self,"FORM_PATTERN"):
+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
+ else:
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+
+ if not inputs:
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry()
+ else:
+ self.parseError("Form not found")
+
+ self.logDebug(self.HOSTER_NAME, inputs)
+
+ if 'op' in inputs and inputs['op'] in ('download2', 'download3'):
+ if "password" in inputs:
+ if self.passwords:
+ inputs['password'] = self.passwords.pop(0)
+ else:
+ self.fail("No or invalid passport")
+
+ if not self.premium:
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found:
+ wait_time = int(found.group(1)) + 1
+ self.setWait(wait_time, False)
+ else:
+ wait_time = 0
+
+ self.captcha = self.handleCaptcha(inputs)
+
+ if wait_time: self.wait()
+
+ self.errmsg = None
+ return inputs
+
+ else:
+ inputs['referer'] = self.pyfile.url
+
+ if self.premium:
+ inputs['method_premium'] = "Premium Download"
+ if 'method_free' in inputs: del inputs['method_free']
+ else:
+ inputs['method_free'] = "Free Download"
+ if 'method_premium' in inputs: del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post = inputs, ref = True)
+ self.errmsg = None
+
+ else: self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))
+
+ def handleCaptcha(self, inputs):
+ found = re.search(self.RECAPTCHA_URL_PATTERN, self.html)
+ if found:
+ recaptcha_key = unquote(found.group(1))
+ self.logDebug("RECAPTCHA KEY: %s" % recaptcha_key)
+ recaptcha = ReCaptcha(self)
+ inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(recaptcha_key)
+ return 1
+ else:
+ found = re.search(self.CAPTCHA_URL_PATTERN, self.html)
+ if found:
+ captcha_url = found.group(1)
+ inputs['code'] = self.decryptCaptcha(captcha_url)
+ return 2
+ else:
+ found = re.search(self.CAPTCHA_DIV_PATTERN, self.html, re.S)
+ if found:
+ captcha_div = found.group(1)
+ self.logDebug(captcha_div)
+ numerals = re.findall('<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
+ inputs['code'] = "".join([a[1] for a in sorted(numerals, key = lambda num: int(num[0]))])
+ self.logDebug("CAPTCHA", inputs['code'], numerals)
+ return 3
+ else:
+ found = re.search(self.SOLVEMEDIA_PATTERN, self.html)
+ if found:
+ captcha_key = found.group(1)
+ captcha = SolveMedia(self)
+ inputs['adcopy_challenge'], inputs['adcopy_response'] = captcha.challenge(captcha_key)
+ return 4
+ return 0
+
+getInfo = create_getInfo(XFileSharingPro)
diff --git a/pyload/plugins/hoster/XHamsterCom.py b/pyload/plugins/hoster/XHamsterCom.py
new file mode 100644
index 000000000..866c5da45
--- /dev/null
+++ b/pyload/plugins/hoster/XHamsterCom.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+from urllib import unquote
+from module.common.json_layer import json_loads
+
+def clean_json(json_expr):
+ json_expr = re.sub('[\n\r]', '', json_expr)
+ json_expr = re.sub(' +', '', json_expr)
+ json_expr = re.sub('\'','"', json_expr)
+
+ return json_expr
+
+class XHamsterCom(Hoster):
+ __name__ = "XHamsterCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?xhamster\.com/movies/.+"
+ __version__ = "0.11"
+ __config__ = [("type", ".mp4;.flv", "Preferred type", ".mp4")]
+ __description__ = """XHamster.com Video Download Hoster"""
+
+ def setup(self):
+ self.html = None
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ if not self.file_exists():
+ self.offline()
+
+ if self.getConfig("type"):
+ self.desired_fmt = self.getConfig("type")
+
+ self.pyfile.name = self.get_file_name() + self.desired_fmt
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+
+ flashvar_pattern = re.compile('flashvars = ({.*?});', re.DOTALL)
+ json_flashvar=flashvar_pattern.search(self.html)
+
+ if json_flashvar is None:
+ self.fail("Parse error (flashvars)")
+
+ j = clean_json(json_flashvar.group(1))
+ flashvars = json_loads(j)
+
+ if flashvars["srv"]:
+ srv_url = flashvars["srv"] + '/'
+ else:
+ self.fail("Parse error (srv_url)")
+
+ if flashvars["url_mode"]:
+ url_mode = flashvars["url_mode"]
+ else:
+ self.fail("Parse error (url_mode)")
+
+
+ if self.desired_fmt == ".mp4":
+ file_url = re.search(r"<a href=\"" + srv_url + "(.+?)\"", self.html)
+ if file_url is None:
+ self.fail("Parse error (file_url)")
+ file_url=file_url.group(1)
+ long_url = srv_url + file_url
+ self.logDebug(_("long_url: %s") % long_url)
+ else:
+ if flashvars["file"]:
+ file_url = unquote(flashvars["file"])
+ else:
+ self.fail("Parse error (file_url)")
+
+ if url_mode=='3':
+ long_url = file_url
+ self.logDebug(_("long_url: %s") % long_url)
+ else:
+ long_url = srv_url + "key=" + file_url
+ self.logDebug(_("long_url: %s") % long_url)
+
+ return long_url
+
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ file_name_pattern = r"<title>(.*?) - xHamster\.com</title>"
+ file_name = re.search(file_name_pattern, self.html)
+ if file_name is None:
+ file_name_pattern = r"<h1 >(.*)</h1>"
+ file_name = re.search(file_name_pattern, self.html)
+ if file_name is None:
+ file_name_pattern = r"http://[www.]+xhamster\.com/movies/.*/(.*?)\.html?"
+ file_name = re.search(file_name_pattern, self.pyfile.url)
+ if file_name is None:
+ file_name_pattern = r"<div id=\"element_str_id\" style=\"display:none;\">(.*)</div>"
+ file_name = re.search(file_name_pattern, self.html)
+ if file_name is None:
+ return "Unknown"
+
+ return file_name.group(1)
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+ if re.search(r"(.*Video not found.*)", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/XVideosCom.py b/pyload/plugins/hoster/XVideosCom.py
new file mode 100644
index 000000000..b7f3f7b58
--- /dev/null
+++ b/pyload/plugins/hoster/XVideosCom.py
@@ -0,0 +1,19 @@
+
+import re
+import urllib
+
+from module.plugins.Hoster import Hoster
+
+class XVideosCom(Hoster):
+ __name__ = "XVideos.com"
+ __version__ = "0.1"
+ __pattern__ = r"http://www\.xvideos\.com/video([0-9]+)/.*"
+ __config__ = []
+
+ def process(self, pyfile):
+ site = self.load(pyfile.url)
+ pyfile.name = "%s (%s).flv" %(
+ re.search(r"<h2>([^<]+)<span", site).group(1),
+ re.search(self.__pattern__, pyfile.url).group(1),
+ )
+ self.download(urllib.unquote(re.search(r"flv_url=([^&]+)&", site).group(1)))
diff --git a/pyload/plugins/hoster/Xdcc.py b/pyload/plugins/hoster/Xdcc.py
new file mode 100644
index 000000000..d0630bd29
--- /dev/null
+++ b/pyload/plugins/hoster/Xdcc.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: jeix
+"""
+
+from os.path import join
+from os.path import exists
+from os import makedirs
+import re
+import sys
+import time
+import socket, struct
+from select import select
+from module.utils import save_join
+
+from module.plugins.Hoster import Hoster
+
+
+class Xdcc(Hoster):
+ __name__ = "Xdcc"
+ __version__ = "0.32"
+ __pattern__ = r'xdcc://.*?(/#?.*?)?/.*?/#?\d+/?' # xdcc://irc.Abjects.net/#channel/[XDCC]|Shit/#0004/
+ __type__ = "hoster"
+ __config__ = [
+ ("nick", "str", "Nickname", "pyload"),
+ ("ident", "str", "Ident", "pyloadident"),
+ ("realname", "str", "Realname", "pyloadreal")
+ ]
+ __description__ = """A Plugin that allows you to download from an IRC XDCC bot"""
+ __author_name__ = ("jeix")
+ __author_mail__ = ("jeix@hasnomail.com")
+
+ def setup(self):
+ self.debug = 0 #0,1,2
+ self.timeout = 30
+ self.multiDL = False
+
+
+
+ def process(self, pyfile):
+ # change request type
+ self.req = pyfile.m.core.requestFactory.getRequest(self.__name__, type="XDCC")
+
+ self.pyfile = pyfile
+ for i in range(0,3):
+ try:
+ nmn = self.doDownload(pyfile.url)
+ self.logDebug("%s: Download of %s finished." % (self.__name__, nmn))
+ return
+ except socket.error, e:
+ if hasattr(e, "errno"):
+ errno = e.errno
+ else:
+ errno = e.args[0]
+
+ if errno in (10054,):
+ self.logDebug("XDCC: Server blocked our ip, retry in 5 min")
+ self.setWait(300)
+ self.wait()
+ continue
+
+ self.fail("Failed due to socket errors. Code: %d" % errno)
+
+ self.fail("Server blocked our ip, retry again later manually")
+
+
+ def doDownload(self, url):
+ self.pyfile.setStatus("waiting") # real link
+
+ download_folder = self.config['general']['download_folder']
+ location = join(download_folder, self.pyfile.package().folder.decode(sys.getfilesystemencoding()))
+ if not exists(location):
+ makedirs(location)
+
+ m = re.search(r'xdcc://(.*?)/#?(.*?)/(.*?)/#?(\d+)/?', url)
+ server = m.group(1)
+ chan = m.group(2)
+ bot = m.group(3)
+ pack = m.group(4)
+ nick = self.getConfig('nick')
+ ident = self.getConfig('ident')
+ real = self.getConfig('realname')
+
+ temp = server.split(':')
+ ln = len(temp)
+ if ln == 2:
+ host, port = temp
+ elif ln == 1:
+ host, port = temp[0], 6667
+ else:
+ self.fail("Invalid hostname for IRC Server (%s)" % server)
+
+
+ #######################
+ # CONNECT TO IRC AND IDLE FOR REAL LINK
+ dl_time = time.time()
+
+ sock = socket.socket()
+ sock.connect((host, int(port)))
+ if nick == "pyload":
+ nick = "pyload-%d" % (time.time() % 1000) # last 3 digits
+ sock.send("NICK %s\r\n" % nick)
+ sock.send("USER %s %s bla :%s\r\n" % (ident, host, real))
+ time.sleep(3)
+ sock.send("JOIN #%s\r\n" % chan)
+ sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
+
+ # IRC recv loop
+ readbuffer = ""
+ done = False
+ retry = None
+ m = None
+ while True:
+
+ # done is set if we got our real link
+ if done:
+ break
+
+ if retry:
+ if time.time() > retry:
+ retry = None
+ dl_time = time.time()
+ sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
+
+ else:
+ if (dl_time + self.timeout) < time.time(): # todo: add in config
+ sock.send("QUIT :byebye\r\n")
+ sock.close()
+ self.fail("XDCC Bot did not answer")
+
+
+ fdset = select([sock], [], [], 0)
+ if sock not in fdset[0]:
+ continue
+
+ readbuffer += sock.recv(1024)
+ temp = readbuffer.split("\n")
+ readbuffer = temp.pop()
+
+ for line in temp:
+ if self.debug is 2: print "*> " + unicode(line, errors='ignore')
+ line = line.rstrip()
+ first = line.split()
+
+ if first[0] == "PING":
+ sock.send("PONG %s\r\n" % first[1])
+
+ if first[0] == "ERROR":
+ self.fail("IRC-Error: %s" % line)
+
+ msg = line.split(None, 3)
+ if len(msg) != 4:
+ continue
+
+ msg = { \
+ "origin":msg[0][1:], \
+ "action":msg[1], \
+ "target":msg[2], \
+ "text" :msg[3][1:] \
+ }
+
+
+ if nick == msg["target"][0:len(nick)] and "PRIVMSG" == msg["action"]:
+ if msg["text"] == "\x01VERSION\x01":
+ self.logDebug("XDCC: Sending CTCP VERSION.")
+ sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
+ elif msg["text"] == "\x01TIME\x01":
+ self.logDebug("Sending CTCP TIME.")
+ sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
+ elif msg["text"] == "\x01LAG\x01":
+ pass # don't know how to answer
+
+ if not (bot == msg["origin"][0:len(bot)]
+ and nick == msg["target"][0:len(nick)]
+ and msg["action"] in ("PRIVMSG", "NOTICE")):
+ continue
+
+ if self.debug is 1:
+ print "%s: %s" % (msg["origin"], msg["text"])
+
+ if "You already requested that pack" in msg["text"]:
+ retry = time.time() + 300
+
+ if "you must be on a known channel to request a pack" in msg["text"]:
+ self.fail("Wrong channel")
+
+ m = re.match('\x01DCC SEND (.*?) (\d+) (\d+)(?: (\d+))?\x01', msg["text"])
+ if m:
+ done = True
+
+ # get connection data
+ ip = socket.inet_ntoa(struct.pack('L', socket.ntohl(int(m.group(2)))))
+ port = int(m.group(3))
+ packname = m.group(1)
+
+ if len(m.groups()) > 3:
+ self.req.filesize = int(m.group(4))
+
+ self.pyfile.name = packname
+ filename = save_join(location, packname)
+ self.logInfo("XDCC: Downloading %s from %s:%d" % (packname, ip, port))
+
+ self.pyfile.setStatus("downloading")
+ newname = self.req.download(ip, port, filename, sock, self.pyfile.setProgress)
+ if newname and newname != filename:
+ self.logInfo("%(name)s saved as %(newname)s" % {"name": self.pyfile.name, "newname": newname})
+ filename = newname
+
+ # kill IRC socket
+ # sock.send("QUIT :byebye\r\n")
+ sock.close()
+
+ self.lastDownload = filename
+ return self.lastDownload
+
diff --git a/pyload/plugins/hoster/YibaishiwuCom.py b/pyload/plugins/hoster/YibaishiwuCom.py
new file mode 100644
index 000000000..901225944
--- /dev/null
+++ b/pyload/plugins/hoster/YibaishiwuCom.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+from module.common.json_layer import json_loads
+
+class YibaishiwuCom(SimpleHoster):
+ __name__ = "YibaishiwuCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?(?:u\.)?115.com/file/(?P<ID>\w+)"
+ __version__ = "0.12"
+ __description__ = """115.com"""
+ __author_name__ = ("zoidberg")
+
+ FILE_NAME_PATTERN = r"file_name: '(?P<N>[^']+)'"
+ FILE_SIZE_PATTERN = r"file_size: '(?P<S>[^']+)'"
+ FILE_OFFLINE_PATTERN = ur'<h3><i style="color:red;">哎呀提取码䞍存圚䞍劚搜搜看吧</i></h3>'
+
+ AJAX_URL_PATTERN = r'(/\?ct=(pickcode|download)[^"\']+)'
+
+ def handleFree(self):
+ found = re.search(self.AJAX_URL_PATTERN, self.html)
+ if not found: self.parseError("AJAX URL")
+ url = found.group(1)
+ self.logDebug(('FREEUSER' if found.group(2) == 'download' else 'GUEST') + ' URL', url)
+
+ response = json_loads(self.load("http://115.com" + url, decode = False))
+ for mirror in (response['urls'] if 'urls' in response else response['data'] if 'data' in response else []):
+ try:
+ url = mirror['url'].replace('\\','')
+ self.logDebug("Trying URL: " + url)
+ self.download(url)
+ break
+ except:
+ continue
+ else: self.fail('No working link found')
+
+getInfo = create_getInfo(YibaishiwuCom)
diff --git a/pyload/plugins/hoster/YoupornCom.py b/pyload/plugins/hoster/YoupornCom.py
new file mode 100644
index 000000000..b17a4ef80
--- /dev/null
+++ b/pyload/plugins/hoster/YoupornCom.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+
+class YoupornCom(Hoster):
+ __name__ = "YoupornCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(www\.)?youporn\.com/watch/.+"
+ __version__ = "0.2"
+ __description__ = """Youporn.com Video Download Hoster"""
+ __author_name__ = ("willnix")
+ __author_mail__ = ("willnix@pyload.org")
+
+ def setup(self):
+ self.html = None
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+ self.download(self.get_file_url())
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url, post={"user_choice":"Enter"}, cookies=False)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html is None:
+ self.download_html()
+
+ file_url = re.search(r'(http://download\.youporn\.com/download/\d+\?save=1)">', self.html).group(1)
+ return file_url
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ file_name_pattern = r"<title>(.*) - Free Porn Videos - YouPorn</title>"
+ return re.search(file_name_pattern, self.html).group(1).replace("&amp;", "&").replace("/","") + '.flv'
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+ if re.search(r"(.*invalid video_id.*)", self.html) is not None:
+ return False
+ else:
+ return True
diff --git a/pyload/plugins/hoster/YourfilesTo.py b/pyload/plugins/hoster/YourfilesTo.py
new file mode 100644
index 000000000..4a192b32a
--- /dev/null
+++ b/pyload/plugins/hoster/YourfilesTo.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import urllib
+from module.plugins.Hoster import Hoster
+
+class YourfilesTo(Hoster):
+ __name__ = "YourfilesTo"
+ __type__ = "hoster"
+ __pattern__ = r"(http://)?(www\.)?yourfiles\.(to|biz)/\?d=[a-zA-Z0-9]+"
+ __version__ = "0.21"
+ __description__ = """Youfiles.to Download Hoster"""
+ __author_name__ = ("jeix", "skydancer")
+ __author_mail__ = ("jeix@hasnomail.de", "skydancer@hasnomail.de")
+
+ def setup(self):
+ self.html = None
+ self.multiDL = True
+
+ def process(self,pyfile):
+ self.pyfile = pyfile
+ self.prepare()
+ self.download(self.get_file_url())
+
+ def prepare(self):
+ if not self.file_exists():
+ self.offline()
+
+ self.pyfile.name = self.get_file_name()
+
+ wait_time = self.get_waiting_time()
+ self.setWait(wait_time)
+ self.logDebug("%s: Waiting %d seconds." % (self.__name__,wait_time))
+ self.wait()
+
+ def get_waiting_time(self):
+ if self.html is None:
+ self.download_html()
+
+ #var zzipitime = 15;
+ m = re.search(r'var zzipitime = (\d+);', self.html)
+ if m:
+ sec = int(m.group(1))
+ else:
+ sec = 0
+
+ return sec
+
+ def download_html(self):
+ url = self.pyfile.url
+ self.html = self.load(url)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = re.search(r"var bla = '(.*?)';", self.html)
+ if url:
+ url = url.group(1)
+ url = urllib.unquote(url.replace("http://http:/http://", "http://").replace("dumdidum", ""))
+ return url
+ else:
+ self.fail("absolute filepath could not be found. offline? ")
+
+ def get_file_name(self):
+ if self.html is None:
+ self.download_html()
+
+ return re.search("<title>(.*)</title>", self.html).group(1)
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html is None:
+ self.download_html()
+
+ if re.search(r"HTTP Status 404", self.html) is not None:
+ return False
+ else:
+ return True
+
+
+
diff --git a/pyload/plugins/hoster/YoutubeCom.py b/pyload/plugins/hoster/YoutubeCom.py
new file mode 100644
index 000000000..129b948bf
--- /dev/null
+++ b/pyload/plugins/hoster/YoutubeCom.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import subprocess
+import os
+import os.path
+from urllib import unquote
+
+from module.utils import html_unescape
+from module.plugins.Hoster import Hoster
+
+def which(program):
+ """Works exactly like the unix command which
+
+ Courtesy of http://stackoverflow.com/a/377028/675646"""
+ def is_exe(fpath):
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ fpath, fname = os.path.split(program)
+ if fpath:
+ if is_exe(program):
+ return program
+ else:
+ for path in os.environ["PATH"].split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+
+ return None
+
+class YoutubeCom(Hoster):
+ __name__ = "YoutubeCom"
+ __type__ = "hoster"
+ __pattern__ = r"https?://(?:[^/]*?)youtube\.com/watch.*?[?&]v=.*"
+ __version__ = "0.34"
+ __config__ = [("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;3072p", "Quality Setting", "hd"),
+ ("fmt", "int", "FMT/ITAG Number (5-102, 0 for auto)", 0),
+ (".mp4", "bool", "Allow .mp4", True),
+ (".flv", "bool", "Allow .flv", True),
+ (".webm", "bool", "Allow .webm", False),
+ (".3gp", "bool", "Allow .3gp", False),
+ ("3d", "bool", "Prefer 3D", False)]
+ __description__ = """Youtube.com Video Download Hoster"""
+ __author_name__ = ("spoob", "zoidberg")
+ __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz")
+
+ # name, width, height, quality ranking, 3D
+ formats = {5: (".flv", 400, 240, 1, False),
+ 6: (".flv", 640, 400, 4, False),
+ 17: (".3gp", 176, 144, 0, False),
+ 18: (".mp4", 480, 360, 2, False),
+ 22: (".mp4", 1280, 720, 8, False),
+ 43: (".webm", 640, 360, 3, False),
+ 34: (".flv", 640, 360, 4, False),
+ 35: (".flv", 854, 480, 6, False),
+ 36: (".3gp", 400, 240, 1, False),
+ 37: (".mp4", 1920, 1080, 9, False),
+ 38: (".mp4", 4096, 3072, 10, False),
+ 44: (".webm", 854, 480, 5, False),
+ 45: (".webm", 1280, 720, 7, False),
+ 46: (".webm", 1920, 1080, 9, False),
+ 82: (".mp4", 640, 360, 3, True),
+ 83: (".mp4", 400, 240, 1, True),
+ 84: (".mp4", 1280, 720, 8, True),
+ 85: (".mp4", 1920, 1080, 9, True),
+ 100: (".webm", 640, 360, 3, True),
+ 101: (".webm", 640, 360, 4, True),
+ 102: (".webm", 1280, 720, 8, True)
+ }
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+
+ def process(self, pyfile):
+ html = self.load(pyfile.url, decode=True)
+
+ if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*">', html):
+ self.offline()
+
+ if "We have been receiving a large volume of requests from your network." in html:
+ self.tempOffline()
+
+ #get config
+ use3d = self.getConfig("3d")
+ if use3d:
+ quality = {"sd":82,"hd":84,"fullhd":85,"240p":83,"360p":82,"480p":82,"720p":84,"1080p":85,"3072p":85}
+ else:
+ quality = {"sd":18,"hd":22,"fullhd":37,"240p":5,"360p":18,"480p":35,"720p":22,"1080p":37,"3072p":38}
+ desired_fmt = self.getConfig("fmt")
+ if desired_fmt and desired_fmt not in self.formats:
+ self.logWarning("FMT %d unknown - using default." % desired_fmt)
+ desired_fmt = 0
+ if not desired_fmt:
+ desired_fmt = quality.get(self.getConfig("quality"), 18)
+
+ #parse available streams
+ streams = re.search(r'"url_encoded_fmt_stream_map": "(.*?)",', html).group(1)
+ streams = [x.split('\u0026') for x in streams.split(',')]
+ streams = [dict((y.split('=',1)) for y in x) for x in streams]
+ streams = [(int(x['itag']), "%s&signature=%s" % (unquote(x['url']), x['sig'])) for x in streams]
+ #self.logDebug("Found links: %s" % streams)
+ self.logDebug("AVAILABLE STREAMS: %s" % [x[0] for x in streams])
+
+ #build dictionary of supported itags (3D/2D)
+ allowed = lambda x: self.getConfig(self.formats[x][0])
+ streams = [x for x in streams if x[0] in self.formats and allowed(x[0])]
+ if not streams:
+ self.fail("No available stream meets your preferences")
+ fmt_dict = dict([x for x in streams if self.formats[x[0]][4] == use3d] or streams)
+
+ self.logDebug("DESIRED STREAM: ITAG:%d (%s) %sfound, %sallowed" %
+ (desired_fmt,
+ "%s %dx%d Q:%d 3D:%s" % self.formats[desired_fmt],
+ "" if desired_fmt in fmt_dict else "NOT ",
+ "" if allowed(desired_fmt) else "NOT ")
+ )
+
+ #return fmt nearest to quality index
+ if desired_fmt in fmt_dict and allowed(desired_fmt):
+ fmt = desired_fmt
+ else:
+ sel = lambda x: self.formats[x][3] #select quality index
+ comp = lambda x, y: abs(sel(x) - sel(y))
+
+ self.logDebug("Choosing nearest fmt: %s" % [(x, allowed(x), comp(x, desired_fmt)) for x in fmt_dict.keys()])
+ fmt = reduce(lambda x, y: x if comp(x, desired_fmt) <= comp(y, desired_fmt) and
+ sel(x) > sel(y) else y, fmt_dict.keys())
+
+ self.logDebug("Chosen fmt: %s" % fmt)
+ url = fmt_dict[fmt]
+ self.logDebug("URL: %s" % url)
+
+ #set file name
+ file_suffix = self.formats[fmt][0] if fmt in self.formats else ".flv"
+ file_name_pattern = '<meta name="title" content="(.+?)">'
+ name = re.search(file_name_pattern, html).group(1).replace("/", "")
+ pyfile.name = html_unescape(name)
+
+ time = re.search(r"t=((\d+)m)?(\d+)s", pyfile.url)
+ ffmpeg = which("ffmpeg")
+ if ffmpeg and time:
+ m, s = time.groups()[1:]
+ if not m:
+ m = "0"
+
+ pyfile.name += " (starting at %s:%s)" % (m, s)
+ pyfile.name += file_suffix
+
+ filename = self.download(url)
+
+ if ffmpeg and time:
+ inputfile = filename + "_"
+ os.rename(filename, inputfile)
+
+ subprocess.call([
+ ffmpeg,
+ "-ss", "00:%s:%s" % (m, s),
+ "-i", inputfile,
+ "-vcodec", "copy",
+ "-acodec", "copy",
+ filename])
+ os.remove(inputfile)
diff --git a/pyload/plugins/hoster/ZDF.py b/pyload/plugins/hoster/ZDF.py
new file mode 100644
index 000000000..ea45f4fd8
--- /dev/null
+++ b/pyload/plugins/hoster/ZDF.py
@@ -0,0 +1,46 @@
+
+import re
+from xml.etree.ElementTree import fromstring
+
+from module.plugins.Hoster import Hoster
+
+XML_API = "http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?id=%i"
+
+class ZDF(Hoster):
+ # Based on zdfm by Roland Beermann
+ # http://github.com/enkore/zdfm/
+ __name__ = "ZDF Mediathek"
+ __version__ = "0.7"
+ __pattern__ = r"http://www\.zdf\.de/ZDFmediathek/[^0-9]*([0-9]+)[^0-9]*"
+ __config__ = []
+
+ @staticmethod
+ def video_key(video):
+ return (
+ int(video.findtext("videoBitrate", "0")),
+ any(f.text == "progressive" for f in video.iter("facet")),
+ )
+
+ @staticmethod
+ def video_valid(video):
+ return (video.findtext("url").startswith("http") and video.findtext("url").endswith(".mp4"))
+
+ @staticmethod
+ def get_id(url):
+ return int(re.search(r"[^0-9]*([0-9]+)[^0-9]*", url).group(1))
+
+ def process(self, pyfile):
+ xml = fromstring(self.load(XML_API % self.get_id(pyfile.url)))
+
+ status = xml.findtext("./status/statuscode")
+ if status != "ok":
+ self.fail("Error retrieving manifest.")
+
+ video = xml.find("video")
+ title = video.findtext("information/title")
+
+ pyfile.name = title
+
+ target_url = sorted((v for v in video.iter("formitaet") if self.video_valid(v)), key=self.video_key)[-1].findtext("url")
+
+ self.download(target_url)
diff --git a/pyload/plugins/hoster/ZeveraCom.py b/pyload/plugins/hoster/ZeveraCom.py
new file mode 100644
index 000000000..92f9e4dcd
--- /dev/null
+++ b/pyload/plugins/hoster/ZeveraCom.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from module.plugins.Hoster import Hoster
+from module.utils import html_unescape
+from urllib import quote, unquote
+from time import sleep
+
+class ZeveraCom(Hoster):
+ __name__ = "ZeveraCom"
+ __version__ = "0.21"
+ __type__ = "hoster"
+ __pattern__ = r"http://zevera.com/.*"
+ __description__ = """zevera.com hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True
+ self.chunkLimit = 1
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your %s account or deactivate this plugin") % "zevera.com")
+ self.fail("No zevera.com account provided")
+
+ self.logDebug("zevera.com: Old URL: %s" % pyfile.url)
+
+ if self.account.getAPIData(self.req, cmd = "checklink", olink = pyfile.url) != "Alive":
+ self.fail("Offline or not downloadable - contact Zevera support")
+
+ header = self.account.getAPIData(self.req, just_header = True, cmd="generatedownloaddirect", olink = pyfile.url)
+ if not "location" in header:
+ self.fail("Unable to initialize download - contact Zevera support")
+
+ self.download(header['location'], disposition = True)
+
+ check = self.checkDownload({"error" : 'action="ErrorDownload.aspx'})
+ if check == "error":
+ self.fail("Error response received - contact Zevera support")
+
+ """
+ # BitAPI not used - defunct, probably abandoned by Zevera
+
+ api_url = "http://zevera.com/API.ashx"
+
+ def process(self, pyfile):
+ if not self.account:
+ self.logError(_("Please enter your zevera.com account or deactivate this plugin"))
+ self.fail("No zevera.com account provided")
+
+ self.logDebug("zevera.com: Old URL: %s" % pyfile.url)
+
+ last_size = retries = 0
+ olink = self.pyfile.url #quote(self.pyfile.url.encode('utf_8'))
+
+ for i in range(100):
+ self.retData = self.account.loadAPIRequest(self.req, cmd = 'download_request', olink = olink)
+ self.checkAPIErrors(self.retData)
+
+ if self.retData['FileInfo']['StatusID'] == 100:
+ break
+ elif self.retData['FileInfo']['StatusID'] == 99:
+ self.fail('Failed to initialize download (99)')
+ else:
+ if self.retData['FileInfo']['Progress']['BytesReceived'] <= last_size:
+ if retries >= 6:
+ self.fail('Failed to initialize download (%d)' % self.retData['FileInfo']['StatusID'] )
+ retries += 1
+ else:
+ retries = 0
+
+ last_size = self.retData['FileInfo']['Progress']['BytesReceived']
+
+ self.setWait(self.retData['Update_Wait'])
+ self.wait()
+
+ pyfile.name = self.retData['FileInfo']['RealFileName']
+ pyfile.size = self.retData['FileInfo']['FileSizeInBytes']
+
+ self.retData = self.account.loadAPIRequest(self.req, cmd = 'download_start', FileID = self.retData['FileInfo']['FileID'])
+ self.checkAPIErrors(self.retData)
+
+ self.download(self.api_url, get = {
+ 'cmd': "open_stream",
+ 'login': self.account.loginname,
+ 'pass': self.account.password,
+ 'FileID': self.retData['FileInfo']['FileID'],
+ 'startBytes': 0
+ }
+ )
+
+ def checkAPIErrors(self, retData):
+ if not retData:
+ self.fail('Unknown API response')
+
+ if retData['ErrorCode']:
+ self.logError(retData['ErrorCode'], retData['ErrorMessage'])
+ #self.fail('ERROR: ' + retData['ErrorMessage'])
+
+ if self.pyfile.size / 1024000 > retData['AccountInfo']['AvailableTODAYTrafficForUseInMBytes']:
+ self.logWarning("Not enough data left to download the file")
+
+ def crazyDecode(self, ustring):
+ # accepts decoded ie. unicode string - API response is double-quoted, double-utf8-encoded
+ # no idea what the proper order of calling these functions would be :-/
+ return html_unescape(unquote(unquote(ustring.replace('@DELIMITER@','#'))).encode('raw_unicode_escape').decode('utf-8'))
+ """ \ No newline at end of file
diff --git a/pyload/plugins/hoster/ZippyshareCom.py b/pyload/plugins/hoster/ZippyshareCom.py
new file mode 100644
index 000000000..e130be24c
--- /dev/null
+++ b/pyload/plugins/hoster/ZippyshareCom.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re, subprocess, tempfile, os
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, timestamp
+from module.plugins.internal.CaptchaService import ReCaptcha
+from module.common.json_layer import json_loads
+
+class ZippyshareCom(SimpleHoster):
+ __name__ = "ZippyshareCom"
+ __type__ = "hoster"
+ __pattern__ = r"(?P<HOST>http://www\d{0,2}\.zippyshare.com)/v(?:/|iew.jsp.*key=)(?P<KEY>\d+)"
+ __version__ = "0.38"
+ __description__ = """Zippyshare.com Download Hoster"""
+ __author_name__ = ("spoob", "zoidberg", "stickell")
+ __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz", "l.stickell@yahoo.it")
+ __config__ = [("swfdump_path", "string", "Path to swfdump", "")]
+
+ FILE_NAME_PATTERN = r'>Name:</font>\s*<font [^>]*>(?P<N>[^<]+)</font><br />'
+ FILE_SIZE_PATTERN = r'>Size:</font>\s*<font [^>]*>(?P<S>[0-9.,]+) (?P<U>[kKMG]+)i?B</font><br />'
+ FILE_INFO_PATTERN = r'document\.getElementById\(\'dlbutton\'\)\.href = "[^;]*/(?P<N>[^"]+)";'
+ FILE_OFFLINE_PATTERN = r'>File does not exist on this server</div>'
+
+ DOWNLOAD_URL_PATTERN = r"<script type=\"text/javascript\">([^<]*?)document\.getElementById\('dlbutton'\).href = ([^;]+);"
+ SEED_PATTERN = r'swfobject.embedSWF\("([^"]+)".*?seed: (\d+)'
+ CAPTCHA_KEY_PATTERN = r'Recaptcha.create\("([^"]+)"'
+ CAPTCHA_SHORTENCODE_PATTERN = r"shortencode: '([^']+)'"
+ CAPTCHA_DOWNLOAD_PATTERN = r"document.location = '([^']+)'"
+
+ LAST_KNOWN_VALUES = (9, 2374755) #time = (seed * multiply) % modulo
+
+ def setup(self):
+ self.html = None
+ self.wantReconnect = False
+ self.multiDL = True
+
+ def handleFree(self):
+ url = self.get_file_url()
+ if not url: self.fail("Download URL not found.")
+ self.logDebug("Download URL %s" % url)
+ self.download(url, cookies = True)
+
+ check = self.checkDownload({
+ "swf_values": re.compile(self.SEED_PATTERN)
+ })
+
+ if check == "swf_values":
+ swf_sts = self.getStorage("swf_sts")
+ if not swf_sts:
+ self.setStorage("swf_sts", 2)
+ self.setStorage("swf_stamp", 0)
+ elif swf_sts == '1':
+ self.setStorage("swf_sts", 2)
+
+ self.retry(max_tries = 1)
+
+ def get_file_url(self):
+ """ returns the absolute downloadable filepath
+ """
+ url = multiply = modulo = None
+
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html, re.S)
+ if found:
+ #Method #1: JS eval
+ js = "\n".join(found.groups())
+ regex = r"document.getElementById\(\\*'dlbutton\\*'\).omg"
+ omg = re.search(regex + r" = ([^;]+);", js).group(1)
+ js = re.sub(regex + r" = ([^;]+);", '', js)
+ js = re.sub(regex, omg, js)
+ url = self.js.eval(js)
+ else:
+ #Method #2: SWF eval
+ seed_search = re.search(self.SEED_PATTERN, self.html)
+ if seed_search:
+ swf_url, file_seed = seed_search.groups()
+
+ swf_sts = self.getStorage("swf_sts")
+ swf_stamp = int(self.getStorage("swf_stamp") or 0)
+ swf_version = self.getStorage("version")
+ self.logDebug("SWF", swf_sts, swf_stamp, swf_version)
+
+ if not swf_sts:
+ self.logDebug('Using default values')
+ multiply, modulo = self.LAST_KNOWN_VALUES
+ elif swf_sts == "1":
+ self.logDebug('Using stored values')
+ multiply = self.getStorage("multiply")
+ modulo = self.getStorage("modulo")
+ elif swf_sts == "2":
+ if swf_version < self.__version__:
+ self.logDebug('Reverting to default values')
+ self.setStorage("swf_sts", "")
+ self.setStorage("version", self.__version__)
+ multiply, modulo = self.LAST_KNOWN_VALUES
+ elif (swf_stamp + 3600000) < timestamp():
+ swfdump = self.get_swfdump_path()
+ if swfdump:
+ multiply, modulo = self.get_swf_values(self.file_info['HOST'] + swf_url, swfdump)
+ else:
+ self.logWarning("Swfdump not found. Install swftools to bypass captcha.")
+
+ if multiply and modulo:
+ self.logDebug("TIME = (%s * %s) %s" % (file_seed, multiply, modulo))
+ url = "/download?key=%s&time=%d" % (self.file_info['KEY'], (int(file_seed) * int(multiply)) % int(modulo))
+
+ if not url:
+ #Method #3: Captcha
+ url = self.do_recaptcha()
+
+ return self.file_info['HOST'] + url
+
+ def get_swf_values(self, swf_url, swfdump):
+ self.logDebug('Parsing values from %s' % swf_url)
+ multiply = modulo = None
+
+ fd, fpath = tempfile.mkstemp()
+ try:
+ swf_data = self.load(swf_url)
+ os.write(fd, swf_data)
+
+ p = subprocess.Popen([swfdump, '-a', fpath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+
+ if err:
+ self.logError(err)
+ else:
+ m_str = re.search(r'::break.*?{(.*?)}', out, re.S).group(1)
+ multiply = re.search(r'pushbyte (\d+)', m_str).group(1)
+ modulo = re.search(r'pushint (\d+)', m_str).group(1)
+ finally:
+ os.close(fd)
+ os.remove(fpath)
+
+ if multiply and modulo:
+ self.setStorage("multiply", multiply)
+ self.setStorage("modulo", modulo)
+ self.setStorage("swf_sts", 1)
+ self.setStorage("version", self.__version__)
+ else:
+ self.logError("Parsing SWF failed: swfdump not installed or plugin out of date")
+ self.setStorage("swf_sts", 2)
+
+ self.setStorage("swf_stamp", timestamp())
+
+ return multiply, modulo
+
+ def get_swfdump_path(self):
+ # used for detecting if swfdump is installed
+ def is_exe(ppath):
+ return os.path.isfile(ppath) and os.access(ppath, os.X_OK)
+
+ program = self.getConfig("swfdump_path") or "swfdump"
+ swfdump = None
+ ppath, pname = os.path.split(program)
+ if ppath:
+ if is_exe(program):
+ swfdump = program
+ else:
+ for ppath in os.environ["PATH"].split(os.pathsep):
+ exe_file = os.path.join(ppath, program)
+ if is_exe(exe_file):
+ swfdump = exe_file
+
+ # return path to the executable or None if not found
+ return swfdump
+
+ def do_recaptcha(self):
+ self.logDebug('Trying to solve captcha')
+ captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group(1)
+ shortencode = re.search(self.CAPTCHA_SHORTENCODE_PATTERN, self.html).group(1)
+ url = re.search(self.CAPTCHA_DOWNLOAD_PATTERN, self.html).group(1)
+
+ recaptcha = ReCaptcha(self)
+
+ for i in range(5):
+ challenge, code = recaptcha.challenge(captcha_key)
+
+ response = json_loads(self.load(self.file_info['HOST'] + '/rest/captcha/test',
+ post={'challenge': challenge,
+ 'response': code,
+ 'shortencode': shortencode}))
+ self.logDebug("reCaptcha response : %s" % response)
+ if response == True:
+ self.correctCaptcha
+ break
+ else:
+ self.invalidCaptcha()
+ else: self.fail("Invalid captcha")
+
+ return url
+
+getInfo = create_getInfo(ZippyshareCom) \ No newline at end of file
diff --git a/pyload/plugins/hoster/__init__.py b/pyload/plugins/hoster/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/hoster/__init__.py
diff --git a/pyload/plugins/internal/AbstractExtractor.py b/pyload/plugins/internal/AbstractExtractor.py
new file mode 100644
index 000000000..3cd635eff
--- /dev/null
+++ b/pyload/plugins/internal/AbstractExtractor.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+class ArchiveError(Exception):
+ pass
+
+class CRCError(Exception):
+ pass
+
+class WrongPassword(Exception):
+ pass
+
+class AbtractExtractor:
+ @staticmethod
+ def checkDeps():
+ """ Check if system satisfies dependencies
+ :return: boolean
+ """
+ return True
+
+ @staticmethod
+ def getTargets(files_ids):
+ """ Filter suited targets from list of filename id tuple list
+ :param files_ids: List of file paths
+ :return: List of targets, id tuple list
+ """
+ raise NotImplementedError
+
+
+ def __init__(self, m, file, out, fullpath, overwrite, renice):
+ """Initialize extractor for specific file
+
+ :param m: ExtractArchive addon plugin
+ :param file: Absolute file path
+ :param out: Absolute path to destination directory
+ :param fullpath: Extract to fullpath
+ :param overwrite: Overwrite existing archives
+ :param renice: Renice value
+ """
+ self.m = m
+ self.file = file
+ self.out = out
+ self.fullpath = fullpath
+ self.overwrite = overwrite
+ self.renice = renice
+ self.files = [] # Store extracted files here
+
+
+ def init(self):
+ """ Initialize additional data structures """
+ pass
+
+
+ def checkArchive(self):
+ """Check if password is needed. Raise ArchiveError if integrity is
+ questionable.
+
+ :return: boolean
+ :raises ArchiveError
+ """
+ return False
+
+ def checkPassword(self, password):
+ """ Check if the given password is/might be correct.
+ If it can not be decided at this point return true.
+
+ :param password:
+ :return: boolean
+ """
+ return True
+
+ def extract(self, progress, password=None):
+ """Extract the archive. Raise specific errors in case of failure.
+
+ :param progress: Progress function, call this to update status
+ :param password password to use
+ :raises WrongPassword
+ :raises CRCError
+ :raises ArchiveError
+ :return:
+ """
+ raise NotImplementedError
+
+ def getDeleteFiles(self):
+ """Return list of files to delete, do *not* delete them here.
+
+ :return: List with paths of files to delete
+ """
+ raise NotImplementedError
+
+ def getExtractedFiles(self):
+ """Populate self.files at some point while extracting"""
+ return self.files \ No newline at end of file
diff --git a/pyload/plugins/internal/CaptchaService.py b/pyload/plugins/internal/CaptchaService.py
new file mode 100644
index 000000000..b912436a7
--- /dev/null
+++ b/pyload/plugins/internal/CaptchaService.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+
+class CaptchaService():
+ __version__ = "0.02"
+
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+class ReCaptcha():
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def challenge(self, id):
+ js = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={"k":id}, cookies=True)
+
+ try:
+ challenge = re.search("challenge : '(.*?)',", js).group(1)
+ server = re.search("server : '(.*?)',", js).group(1)
+ except:
+ self.plugin.fail("recaptcha error")
+ result = self.result(server,challenge)
+
+ return challenge, result
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%simage"%server, get={"c":challenge}, cookies=True, forceUser=True, imgtype="jpg")
+
+class AdsCaptcha(CaptchaService):
+ def challenge(self, src):
+ js = self.plugin.req.load(src, cookies=True)
+
+ try:
+ challenge = re.search("challenge: '(.*?)',", js).group(1)
+ server = re.search("server: '(.*?)',", js).group(1)
+ except:
+ self.plugin.fail("adscaptcha error")
+ result = self.result(server,challenge)
+
+ return challenge, result
+
+ def result(self, server, challenge):
+ return self.plugin.decryptCaptcha("%sChallenge.aspx" % server, get={"cid": challenge, "dummy": random()}, cookies=True, imgtype="jpg")
+
+class SolveMedia(CaptchaService):
+ def __init__(self,plugin):
+ self.plugin = plugin
+
+ def challenge(self, src):
+ html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript?k=%s" % src, cookies=True)
+ try:
+ challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="([^"]+)">', html).group(1)
+ except:
+ self.plugin.fail("solvmedia error")
+ result = self.result(challenge)
+
+ return challenge, result
+
+ def result(self,challenge):
+ return self.plugin.decryptCaptcha("http://api.solvemedia.com/papi/media?c=%s" % challenge,imgtype="gif") \ No newline at end of file
diff --git a/pyload/plugins/internal/DeadHoster.py b/pyload/plugins/internal/DeadHoster.py
new file mode 100644
index 000000000..e180e2384
--- /dev/null
+++ b/pyload/plugins/internal/DeadHoster.py
@@ -0,0 +1,18 @@
+from module.plugins.Hoster import Hoster as _Hoster
+
+def create_getInfo(plugin):
+ def getInfo(urls):
+ yield [('#N/A: ' + url, 0, 1, url) for url in urls]
+ return getInfo
+
+class DeadHoster(_Hoster):
+ __name__ = "DeadHoster"
+ __type__ = "hoster"
+ __pattern__ = r""
+ __version__ = "0.11"
+ __description__ = """Hoster is no longer available"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ def setup(self):
+ self.fail("Hoster is no longer available") \ No newline at end of file
diff --git a/pyload/plugins/internal/NetloadInOCR.py b/pyload/plugins/internal/NetloadInOCR.py
new file mode 100644
index 000000000..e50978701
--- /dev/null
+++ b/pyload/plugins/internal/NetloadInOCR.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+from OCR import OCR
+
+class NetloadInOCR(OCR):
+ __version__ = 0.1
+
+ def __init__(self):
+ OCR.__init__(self)
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.to_greyscale()
+ self.clean(3)
+ self.clean(3)
+ self.run_tesser(True, True, False, False)
+
+ self.result_captcha = self.result_captcha.replace(" ", "")[:4] # cut to 4 numbers
+
+ return self.result_captcha
+
+if __name__ == '__main__':
+ import urllib
+ ocr = NetloadInOCR()
+ urllib.urlretrieve("http://netload.in/share/includes/captcha.php", "captcha.png")
+
+ print ocr.get_captcha('captcha.png')
diff --git a/pyload/plugins/internal/OCR.py b/pyload/plugins/internal/OCR.py
new file mode 100644
index 000000000..9f8b7ef8c
--- /dev/null
+++ b/pyload/plugins/internal/OCR.py
@@ -0,0 +1,314 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+#Copyright (C) 2009 kingzero, RaNaN
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3 of the License,
+#or (at your option) any later version.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#See the GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+###
+from __future__ import with_statement
+import os
+from os.path import join
+from os.path import abspath
+import logging
+import subprocess
+#import tempfile
+
+import Image
+import TiffImagePlugin
+import PngImagePlugin
+import GifImagePlugin
+import JpegImagePlugin
+
+
+class OCR(object):
+ __version__ = 0.1
+
+ def __init__(self):
+ self.logger = logging.getLogger("log")
+
+ def load_image(self, image):
+ self.image = Image.open(image)
+ self.pixels = self.image.load()
+ self.result_captcha = ''
+
+ def unload(self):
+ """delete all tmp images"""
+ pass
+
+ def threshold(self, value):
+ self.image = self.image.point(lambda a: a * value + 10)
+
+ def run(self, command):
+ """Run a command"""
+
+ popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ popen.wait()
+ output = popen.stdout.read() +" | "+ popen.stderr.read()
+ popen.stdout.close()
+ popen.stderr.close()
+ self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
+
+ def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
+ #self.logger.debug("create tmp tif")
+
+
+ #tmp = tempfile.NamedTemporaryFile(suffix=".tif")
+ tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
+ tmp.close()
+ #self.logger.debug("create tmp txt")
+ #tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
+ tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
+ tmpTxt.close()
+
+ self.logger.debug("save tiff")
+ self.image.save(tmp.name, 'TIFF')
+
+ if os.name == "nt":
+ tessparams = [join(pypath,"tesseract","tesseract.exe")]
+ else:
+ tessparams = ['tesseract']
+
+ tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
+
+ if subset and (digits or lowercase or uppercase):
+ #self.logger.debug("create temp subset config")
+ #tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
+ tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
+ tmpSub.write("tessedit_char_whitelist ")
+ if digits:
+ tmpSub.write("0123456789")
+ if lowercase:
+ tmpSub.write("abcdefghijklmnopqrstuvwxyz")
+ if uppercase:
+ tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ tmpSub.write("\n")
+ tessparams.append("nobatch")
+ tessparams.append(abspath(tmpSub.name))
+ tmpSub.close()
+
+ self.logger.debug("run tesseract")
+ self.run(tessparams)
+ self.logger.debug("read txt")
+
+ try:
+ with open(tmpTxt.name, 'r') as f:
+ self.result_captcha = f.read().replace("\n", "")
+ except:
+ self.result_captcha = ""
+
+ self.logger.debug(self.result_captcha)
+ try:
+ os.remove(tmp.name)
+ os.remove(tmpTxt.name)
+ if subset and (digits or lowercase or uppercase):
+ os.remove(tmpSub.name)
+ except:
+ pass
+
+ def get_captcha(self, name):
+ raise NotImplementedError
+
+ def to_greyscale(self):
+ if self.image.mode != 'L':
+ self.image = self.image.convert('L')
+
+ self.pixels = self.image.load()
+
+ def eval_black_white(self, limit):
+ self.pixels = self.image.load()
+ w, h = self.image.size
+ for x in xrange(w):
+ for y in xrange(h):
+ if self.pixels[x, y] > limit:
+ self.pixels[x, y] = 255
+ else:
+ self.pixels[x, y] = 0
+
+ def clean(self, allowed):
+ pixels = self.pixels
+
+ w, h = self.image.size
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 255: continue
+ # no point in processing white pixels since we only want to remove black pixel
+ count = 0
+
+ try:
+ if pixels[x-1, y-1] != 255: count += 1
+ if pixels[x-1, y] != 255: count += 1
+ if pixels[x-1, y + 1] != 255: count += 1
+ if pixels[x, y + 1] != 255: count += 1
+ if pixels[x + 1, y + 1] != 255: count += 1
+ if pixels[x + 1, y] != 255: count += 1
+ if pixels[x + 1, y-1] != 255: count += 1
+ if pixels[x, y-1] != 255: count += 1
+ except:
+ pass
+
+ # not enough neighbors are dark pixels so mark this pixel
+ # to be changed to white
+ if count < allowed:
+ pixels[x, y] = 1
+
+ # second pass: this time set all 1's to 255 (white)
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 1: pixels[x, y] = 255
+
+ self.pixels = pixels
+
+ def derotate_by_average(self):
+ """rotate by checking each angle and guess most suitable"""
+
+ w, h = self.image.size
+ pixels = self.pixels
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 155
+
+ highest = {}
+ counts = {}
+
+ for angle in range(-45, 45):
+
+ tmpimage = self.image.rotate(angle)
+
+ pixels = tmpimage.load()
+
+ w, h = self.image.size
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 255
+
+
+ count = {}
+
+ for x in xrange(w):
+ count[x] = 0
+ for y in xrange(h):
+ if pixels[x, y] == 155:
+ count[x] += 1
+
+ sum = 0
+ cnt = 0
+
+ for x in count.values():
+ if x != 0:
+ sum += x
+ cnt += 1
+
+ avg = sum / cnt
+ counts[angle] = cnt
+ highest[angle] = 0
+ for x in count.values():
+ if x > highest[angle]:
+ highest[angle] = x
+
+ highest[angle] = highest[angle] - avg
+
+ hkey = 0
+ hvalue = 0
+
+ for key, value in highest.iteritems():
+ if value > hvalue:
+ hkey = key
+ hvalue = value
+
+ self.image = self.image.rotate(hkey)
+ pixels = self.image.load()
+
+ for x in xrange(w):
+ for y in xrange(h):
+ if pixels[x, y] == 0:
+ pixels[x, y] = 255
+
+ if pixels[x, y] == 155:
+ pixels[x, y] = 0
+
+ self.pixels = pixels
+
+ def split_captcha_letters(self):
+ captcha = self.image
+ started = False
+ letters = []
+ width, height = captcha.size
+ bottomY, topY = 0, height
+ pixels = captcha.load()
+
+ for x in xrange(width):
+ black_pixel_in_col = False
+ for y in xrange(height):
+ if pixels[x, y] != 255:
+ if not started:
+ started = True
+ firstX = x
+ lastX = x
+
+ if y > bottomY: bottomY = y
+ if y < topY: topY = y
+ if x > lastX: lastX = x
+
+ black_pixel_in_col = True
+
+ if black_pixel_in_col == False and started == True:
+ rect = (firstX, topY, lastX, bottomY)
+ new_captcha = captcha.crop(rect)
+
+ w, h = new_captcha.size
+ if w > 5 and h > 5:
+ letters.append(new_captcha)
+
+ started = False
+ bottomY, topY = 0, height
+
+ return letters
+
+ def correct(self, values, var=None):
+
+ if var:
+ result = var
+ else:
+ result = self.result_captcha
+
+ for key, item in values.iteritems():
+
+ if key.__class__ == str:
+ result = result.replace(key, item)
+ else:
+ for expr in key:
+ result = result.replace(expr, item)
+
+ if var:
+ return result
+ else:
+ self.result_captcha = result
+
+
+if __name__ == '__main__':
+ ocr = OCR()
+ ocr.load_image("B.jpg")
+ ocr.to_greyscale()
+ ocr.eval_black_white(140)
+ ocr.derotate_by_average()
+ ocr.run_tesser()
+ print "Tesseract", ocr.result_captcha
+ ocr.image.save("derotated.jpg")
+
diff --git a/pyload/plugins/internal/ShareonlineBizOCR.py b/pyload/plugins/internal/ShareonlineBizOCR.py
new file mode 100644
index 000000000..c5c2e92e8
--- /dev/null
+++ b/pyload/plugins/internal/ShareonlineBizOCR.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+#Copyright (C) 2009 kingzero, RaNaN
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3 of the License,
+#or (at your option) any later version.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#See the GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+###
+from OCR import OCR
+
+class ShareonlineBizOCR(OCR):
+ __version__ = 0.1
+
+ def __init__(self):
+ OCR.__init__(self)
+
+ def get_captcha(self, image):
+ self.load_image(image)
+ self.to_greyscale()
+ self.image = self.image.resize((160, 50))
+ self.pixels = self.image.load()
+ self.threshold(1.85)
+ #self.eval_black_white(240)
+ #self.derotate_by_average()
+
+ letters = self.split_captcha_letters()
+
+ final = ""
+ for letter in letters:
+ self.image = letter
+ self.run_tesser(True, True, False, False)
+ final += self.result_captcha
+
+ return final
+
+ #tesseract at 60%
+
+if __name__ == '__main__':
+ import urllib
+ ocr = ShareonlineBizOCR()
+ urllib.urlretrieve("http://www.share-online.biz/captcha.php", "captcha.jpeg")
+ print ocr.get_captcha('captcha.jpeg')
diff --git a/pyload/plugins/internal/SimpleCrypter.py b/pyload/plugins/internal/SimpleCrypter.py
new file mode 100644
index 000000000..546b920e0
--- /dev/null
+++ b/pyload/plugins/internal/SimpleCrypter.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+
+from module.plugins.Crypter import Crypter
+from module.utils import html_unescape
+
+
+class SimpleCrypter(Crypter):
+ __name__ = "SimpleCrypter"
+ __version__ = "0.05"
+ __pattern__ = None
+ __type__ = "crypter"
+ __description__ = """Base crypter plugin"""
+ __author_name__ = ("stickell", "zoidberg")
+ __author_mail__ = ("l.stickell@yahoo.it", "zoidberg@mujmail.cz")
+ """
+ These patterns should be defined by each crypter:
+
+ LINK_PATTERN: group(1) must be a download link
+ example: <div class="link"><a href="(http://speedload.org/\w+)
+
+ TITLE_PATTERN: (optional) the group defined by 'title' should be the title
+ example: <title>Files of: (?P<title>[^<]+) folder</title>
+
+ If the links are disposed on multiple pages you need to define a pattern:
+
+ PAGES_PATTERN: the group defined by 'pages' must be the total number of pages
+
+ and a function:
+
+ loadPage(self, page_n):
+ must return the html of the page number 'page_n'
+ """
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, decode=True)
+
+ package_name, folder_name = self.getPackageNameAndFolder()
+
+ self.package_links = re.findall(self.LINK_PATTERN, self.html)
+
+ if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'):
+ self.handleMultiPages()
+
+ self.logDebug('Package has %d links' % len(self.package_links))
+
+ if self.package_links:
+ self.packages = [(package_name, self.package_links, folder_name)]
+ else:
+ self.fail('Could not extract any links')
+
+ def getPackageNameAndFolder(self):
+ if hasattr(self, 'TITLE_PATTERN'):
+ m = re.search(self.TITLE_PATTERN, self.html)
+ if m:
+ name = folder = html_unescape(m.group('title').strip())
+ self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder))
+ return name, folder
+
+ name = self.pyfile.package().name
+ folder = self.pyfile.package().folder
+ self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
+ return name, folder
+
+ def handleMultiPages(self):
+ pages = re.search(self.PAGES_PATTERN, self.html)
+ if pages:
+ pages = int(pages.group('pages'))
+ else:
+ pages = 1
+
+ for p in range(2, pages + 1):
+ self.html = self.loadPage(p)
+ self.package_links += re.findall(self.LINK_PATTERN, self.html)
diff --git a/pyload/plugins/internal/SimpleHoster.py b/pyload/plugins/internal/SimpleHoster.py
new file mode 100644
index 000000000..7b1d7323a
--- /dev/null
+++ b/pyload/plugins/internal/SimpleHoster.py
@@ -0,0 +1,251 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+from urlparse import urlparse
+import re
+from time import time
+
+from module.plugins.Hoster import Hoster
+from module.utils import html_unescape, fixup, parseFileSize
+from module.network.RequestFactory import getURL
+from module.network.CookieJar import CookieJar
+
+def replace_patterns(string, ruleslist):
+ for r in ruleslist:
+ rf, rt = r
+ string = re.sub(rf, rt, string)
+ #self.logDebug(rf, rt, string)
+ return string
+
+def set_cookies(cj, cookies):
+ for cookie in cookies:
+ if isinstance(cookie, tuple) and len(cookie) == 3:
+ domain, name, value = cookie
+ cj.setCookie(domain, name, value)
+
+def parseHtmlTagAttrValue(attr_name, tag):
+ m = re.search(r"%s\s*=\s*([\"']?)((?<=\")[^\"]+|(?<=')[^']+|[^>\s\"'][^>\s]*)\1" % attr_name, tag, re.I)
+ return m.group(2) if m else None
+
+def parseHtmlForm(attr_str, html, input_names=None):
+ for form in re.finditer(r"(?P<tag><form[^>]*%s[^>]*>)(?P<content>.*?)</?(form|body|html)[^>]*>" % attr_str, html, re.S | re.I):
+ inputs = {}
+ action = parseHtmlTagAttrValue("action", form.group('tag'))
+ for inputtag in re.finditer(r'(<(input|textarea)[^>]*>)([^<]*(?=</\2)|)', form.group('content'), re.S | re.I):
+ name = parseHtmlTagAttrValue("name", inputtag.group(1))
+ if name:
+ value = parseHtmlTagAttrValue("value", inputtag.group(1))
+ if value is None:
+ inputs[name] = inputtag.group(3) or ''
+ else:
+ inputs[name] = value
+
+ if isinstance(input_names, dict):
+ # check input attributes
+ for key, val in input_names.items():
+ if key in inputs:
+ if isinstance(val, basestring) and inputs[key] == val:
+ continue
+ elif isinstance(val, tuple) and inputs[key] in val:
+ continue
+ elif hasattr(val, "search") and re.match(val, inputs[key]):
+ continue
+ break # attibute value does not match
+ else:
+ break # attibute name does not match
+ else:
+ return action, inputs # passed attribute check
+ else:
+ # no attribute check
+ return action, inputs
+
+ return {}, None # no matching form found
+
+def parseFileInfo(self, url = '', html = ''):
+ info = {"name" : url, "size" : 0, "status" : 3}
+
+ if hasattr(self, "pyfile"):
+ url = self.pyfile.url
+
+ if hasattr(self, "req") and self.req.http.code == '404':
+ info['status'] = 1
+ else:
+ if not html and hasattr(self, "html"): html = self.html
+ if isinstance(self.SH_BROKEN_ENCODING, (str, unicode)):
+ html = unicode(html, self.SH_BROKEN_ENCODING)
+ if hasattr(self, "html"): self.html = html
+
+ if hasattr(self, "FILE_OFFLINE_PATTERN") and re.search(self.FILE_OFFLINE_PATTERN, html):
+ # File offline
+ info['status'] = 1
+ else:
+ online = False
+ try:
+ info.update(re.match(self.__pattern__, url).groupdict())
+ except:
+ pass
+
+ for pattern in ("FILE_INFO_PATTERN", "FILE_NAME_PATTERN", "FILE_SIZE_PATTERN"):
+ try:
+ info.update(re.search(getattr(self, pattern), html).groupdict())
+ online = True
+ except AttributeError:
+ continue
+
+ if online:
+ # File online, return name and size
+ info['status'] = 2
+ if 'N' in info:
+ info['name'] = replace_patterns(info['N'], self.FILE_NAME_REPLACEMENTS)
+ if 'S' in info:
+ size = replace_patterns(info['S'] + info['U'] if 'U' in info else info['S'], self.FILE_SIZE_REPLACEMENTS)
+ info['size'] = parseFileSize(size)
+ elif isinstance(info['size'], (str, unicode)):
+ if 'units' in info: info['size'] += info['units']
+ info['size'] = parseFileSize(info['size'])
+
+ if hasattr(self, "file_info"):
+ self.file_info = info
+
+ return info['name'], info['size'], info['status'], url
+
+def create_getInfo(plugin):
+ def getInfo(urls):
+ for url in urls:
+ cj = CookieJar(plugin.__name__)
+ if isinstance(plugin.SH_COOKIES, list): set_cookies(cj, plugin.SH_COOKIES)
+ file_info = parseFileInfo(plugin, url, getURL(replace_patterns(url, plugin.FILE_URL_REPLACEMENTS), \
+ decode = not plugin.SH_BROKEN_ENCODING, cookies = cj))
+ yield file_info
+ return getInfo
+
+def timestamp():
+ return int(time()*1000)
+
+class PluginParseError(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self)
+ self.value = 'Parse error (%s) - plugin may be out of date' % msg
+ def __str__(self):
+ return repr(self.value)
+
+class SimpleHoster(Hoster):
+ __name__ = "SimpleHoster"
+ __version__ = "0.28"
+ __pattern__ = None
+ __type__ = "hoster"
+ __description__ = """Base hoster plugin"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+ """
+ These patterns should be defined by each hoster:
+ FILE_INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>units)'
+ or FILE_NAME_PATTERN = r'(?P<N>file_name)'
+ and FILE_SIZE_PATTERN = r'(?P<S>file_size) (?P<U>units)'
+ FILE_OFFLINE_PATTERN = r'File (deleted|not found)'
+ TEMP_OFFLINE_PATTERN = r'Server maintenance'
+ """
+
+ FILE_SIZE_REPLACEMENTS = []
+ FILE_NAME_REPLACEMENTS = [("&#?\w+;", fixup)]
+ FILE_URL_REPLACEMENTS = []
+
+ SH_BROKEN_ENCODING = False # Set to True or encoding name if encoding in http header is not correct
+ SH_COOKIES = True # or False or list of tuples [(domain, name, value)]
+ SH_CHECK_TRAFFIC = False # True = force check traffic left for a premium account
+
+ def init(self):
+ self.file_info = {}
+
+ def setup(self):
+ self.resumeDownload = self.multiDL = True if self.premium else False
+ if isinstance(self.SH_COOKIES, list): set_cookies(self.req.cj, self.SH_COOKIES)
+
+ def process(self, pyfile):
+ pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
+ self.req.setOption("timeout", 120)
+ self.html = self.load(pyfile.url, decode = not self.SH_BROKEN_ENCODING, cookies = self.SH_COOKIES)
+ self.getFileInfo()
+ if self.premium and (not self.SH_CHECK_TRAFFIC or self.checkTrafficLeft()):
+ self.handlePremium()
+ else:
+ self.handleFree()
+
+ def load(self, url, get={}, post={}, ref=True, cookies=True, just_header=False, decode=False):
+ if type(url) == unicode: url = url.encode('utf8')
+ return Hoster.load(self, url=url, get=get, post=post, ref=ref, cookies=cookies, just_header=just_header, decode=decode)
+
+ def getFileInfo(self):
+ self.logDebug("URL: %s" % self.pyfile.url)
+ if hasattr(self, "TEMP_OFFLINE_PATTERN") and re.search(self.TEMP_OFFLINE_PATTERN, self.html):
+ self.tempOffline()
+
+ name, size, status = parseFileInfo(self)[:3]
+
+ if status == 1:
+ self.offline()
+ elif status != 2:
+ self.logDebug(self.file_info)
+ self.parseError('File info')
+
+ if name:
+ self.pyfile.name = name
+ else:
+ self.pyfile.name = html_unescape(urlparse(self.pyfile.url).path.split("/")[-1])
+
+ if size:
+ self.pyfile.size = size
+ else:
+ self.logError("File size not parsed")
+
+ self.logDebug("FILE NAME: %s FILE SIZE: %s" % (self.pyfile.name, self.pyfile.size))
+ return self.file_info
+
+ def handleFree(self):
+ self.fail("Free download not implemented")
+
+ def handlePremium(self):
+ self.fail("Premium download not implemented")
+
+ def parseError(self, msg):
+ raise PluginParseError(msg)
+
+ def longWait(self, wait_time = None, max_tries = 3):
+ if wait_time and isinstance(wait_time, (int, long, float)):
+ time_str = "%dh %dm" % divmod(wait_time / 60, 60)
+ else:
+ wait_time = 900
+ time_str = "(unknown time)"
+ max_tries = 100
+
+ self.logInfo("Download limit reached, reconnect or wait %s" % time_str)
+
+ self.setWait(wait_time, True)
+ self.wait()
+ self.retry(max_tries = max_tries, reason="Download limit reached")
+
+ def parseHtmlForm(self, attr_str='', input_names=None):
+ return parseHtmlForm(attr_str, self.html, input_names)
+
+ def checkTrafficLeft(self):
+ traffic = self.account.getAccountInfo(self.user, True)["trafficleft"]
+ if traffic == -1:
+ return True
+ size = self.pyfile.size / 1024
+ self.logInfo("Filesize: %i KiB, Traffic left for user %s: %i KiB" % (size, self.user, traffic))
+ return size <= traffic \ No newline at end of file
diff --git a/pyload/plugins/internal/UnRar.py b/pyload/plugins/internal/UnRar.py
new file mode 100644
index 000000000..7becd663c
--- /dev/null
+++ b/pyload/plugins/internal/UnRar.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+import os
+import re
+from glob import glob
+from subprocess import Popen, PIPE
+from string import digits
+
+from module.utils.fs import save_join, decode, fs_encode
+from module.plugins.internal.AbstractExtractor import AbtractExtractor, WrongPassword, ArchiveError, CRCError
+
+class UnRar(AbtractExtractor):
+ __name__ = "UnRar"
+ __version__ = "0.13"
+
+ # there are some more uncovered rar formats
+ re_splitfile = re.compile(r"(.*)\.part(\d+)\.rar$", re.I)
+ re_partfiles = re.compile(r".*\.(rar|r[0-9]+)", re.I)
+ re_filelist = re.compile(r"(.+)\s+(\d+)\s+(\d+)\s+")
+ re_wrongpwd = re.compile("(Corrupt file or wrong password|password incorrect)", re.I)
+ CMD = "unrar"
+
+ @staticmethod
+ def checkDeps():
+ if os.name == "nt":
+ UnRar.CMD = save_join(pypath, "UnRAR.exe")
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+ else:
+ try:
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+ except OSError:
+
+ #fallback to rar
+ UnRar.CMD = "rar"
+ p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE)
+ p.communicate()
+
+ return True
+
+ @staticmethod
+ def getTargets(files_ids):
+ result = []
+
+ for file, id in files_ids:
+ if not file.endswith(".rar"): continue
+
+ match = UnRar.re_splitfile.findall(file)
+ if match:
+ #only add first parts
+ if int(match[0][1]) == 1:
+ result.append((file, id))
+ else:
+ result.append((file, id))
+
+ return result
+
+
+ def init(self):
+ self.passwordProtected = False
+ self.headerProtected = False #list files will not work without password
+ self.smallestFile = None #small file to test passwords
+ self.password = "" #save the correct password
+
+ def checkArchive(self):
+ p = self.call_unrar("l", "-v", fs_encode(self.file))
+ out, err = p.communicate()
+ if self.re_wrongpwd.search(err):
+ self.passwordProtected = True
+ self.headerProtected = True
+ return True
+
+ # output only used to check if passworded files are present
+ for name, size, packed in self.re_filelist.findall(out):
+ if name.startswith("*"):
+ self.passwordProtected = True
+ return True
+
+ self.listContent()
+ if not self.files:
+ raise ArchiveError("Empty Archive")
+
+ return False
+
+ def checkPassword(self, password):
+ #at this point we can only verify header protected files
+ if self.headerProtected:
+ p = self.call_unrar("l", "-v", fs_encode(self.file), password=password)
+ out, err = p.communicate()
+ if self.re_wrongpwd.search(err):
+ return False
+
+ return True
+
+
+ def extract(self, progress, password=None):
+ command = "x" if self.fullpath else "e"
+
+ p = self.call_unrar(command, fs_encode(self.file), self.out, password=password)
+ renice(p.pid, self.renice)
+
+ progress(0)
+ progressstring = ""
+ while True:
+ c = p.stdout.read(1)
+ # quit loop on eof
+ if not c:
+ break
+ # reading a percentage sign -> set progress and restart
+ if c == '%':
+ progress(int(progressstring))
+ progressstring = ""
+ # not reading a digit -> therefore restart
+ elif c not in digits:
+ progressstring = ""
+ # add digit to progressstring
+ else:
+ progressstring = progressstring + c
+ progress(100)
+
+ # retrieve stderr
+ err = p.stderr.read()
+
+ if "CRC failed" in err and not password and not self.passwordProtected:
+ raise CRCError
+ elif "CRC failed" in err:
+ raise WrongPassword
+ if err.strip(): #raise error if anything is on stderr
+ raise ArchiveError(err.strip())
+ if p.returncode:
+ raise ArchiveError("Process terminated")
+
+ if not self.files:
+ self.password = password
+ self.listContent()
+
+
+ def getDeleteFiles(self):
+ if ".part" in self.file:
+ return glob(re.sub("(?<=\.part)([01]+)", "*", self.file, re.IGNORECASE))
+ # get files which matches .r* and filter unsuited files out
+ parts = glob(re.sub(r"(?<=\.r)ar$", "*", self.file, re.IGNORECASE))
+ return filter(lambda x: self.re_partfiles.match(x), parts)
+
+ def listContent(self):
+ command = "vb" if self.fullpath else "lb"
+ p = self.call_unrar(command, "-v", fs_encode(self.file), password=self.password)
+ out, err = p.communicate()
+
+ if "Cannot open" in err:
+ raise ArchiveError("Cannot open file")
+
+ if err.strip(): # only log error at this point
+ self.m.logError(err.strip())
+
+ result = set()
+
+ for f in decode(out).splitlines():
+ f = f.strip()
+ result.add(save_join(self.out, f))
+
+ self.files = result
+
+
+ def call_unrar(self, command, *xargs, **kwargs):
+ args = []
+ #overwrite flag
+ args.append("-o+") if self.overwrite else args.append("-o-")
+
+ # assume yes on all queries
+ args.append("-y")
+
+ #set a password
+ if "password" in kwargs and kwargs["password"]:
+ args.append("-p%s" % kwargs["password"])
+ else:
+ args.append("-p-")
+
+
+ #NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue
+ call = [self.CMD, command] + args + list(xargs)
+ self.m.logDebug(" ".join([decode(arg) for arg in call]))
+
+ p = Popen(call, stdout=PIPE, stderr=PIPE)
+
+ return p
+
+
+def renice(pid, value):
+ if os.name != "nt" and value:
+ try:
+ Popen(["renice", str(value), str(pid)], stdout=PIPE, stderr=PIPE, bufsize=-1)
+ except:
+ print "Renice failed"
diff --git a/pyload/plugins/internal/UnZip.py b/pyload/plugins/internal/UnZip.py
new file mode 100644
index 000000000..9aa9ac75c
--- /dev/null
+++ b/pyload/plugins/internal/UnZip.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+import zipfile
+import sys
+
+from module.plugins.internal.AbstractExtractor import AbtractExtractor
+
+class UnZip(AbtractExtractor):
+ __name__ = "UnZip"
+ __version__ = "0.1"
+
+ @staticmethod
+ def checkDeps():
+ return sys.version_info[:2] >= (2, 6)
+
+ @staticmethod
+ def getTargets(files_ids):
+ result = []
+
+ for file, id in files_ids:
+ if file.endswith(".zip"):
+ result.append((file, id))
+
+ return result
+
+ def extract(self, progress, password=None):
+ z = zipfile.ZipFile(self.file)
+ self.files = z.namelist()
+ z.extractall(self.out)
+
+ def getDeleteFiles(self):
+ return [self.file] \ No newline at end of file
diff --git a/pyload/plugins/internal/XFSPAccount.py b/pyload/plugins/internal/XFSPAccount.py
new file mode 100644
index 000000000..8333c7265
--- /dev/null
+++ b/pyload/plugins/internal/XFSPAccount.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from time import mktime, strptime
+from module.plugins.Account import Account
+from module.plugins.internal.SimpleHoster import parseHtmlForm
+from module.utils import parseFileSize
+
+class XFSPAccount(Account):
+ __name__ = "XFSPAccount"
+ __version__ = "0.05"
+ __type__ = "account"
+ __description__ = """XFileSharingPro account base"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ MAIN_PAGE = None
+
+ VALID_UNTIL_PATTERN = r'>Premium.[Aa]ccount expire:</TD><TD><b>([^<]+)</b>'
+ TRAFFIC_LEFT_PATTERN = r'>Traffic available today:</TD><TD><b>([^<]+)</b>'
+
+ def loadAccountInfo(self, user, req):
+ html = req.load(self.MAIN_PAGE + "?op=my_account", decode = True)
+
+ validuntil = trafficleft = None
+ premium = True if '>Renew premium<' in html else False
+
+ found = re.search(self.VALID_UNTIL_PATTERN, html)
+ if found:
+ premium = True
+ trafficleft = -1
+ try:
+ self.logDebug(found.group(1))
+ validuntil = mktime(strptime(found.group(1), "%d %B %Y"))
+ except Exception, e:
+ self.logError(e)
+ else:
+ found = re.search(self.TRAFFIC_LEFT_PATTERN, html)
+ if found:
+ trafficleft = found.group(1)
+ if "Unlimited" in trafficleft:
+ premium = True
+ else:
+ trafficleft = parseFileSize(trafficleft) / 1024
+
+ return ({"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium})
+
+ def login(self, user, data, req):
+ html = req.load('%slogin.html' % self.MAIN_PAGE, decode = True)
+
+ action, inputs = parseHtmlForm('name="FL"', html)
+ if not inputs:
+ inputs = {"op": "login",
+ "redirect": self.MAIN_PAGE}
+
+ inputs.update({"login": user,
+ "password": data['password']})
+
+ html = req.load(self.MAIN_PAGE, post = inputs, decode = True)
+
+ if 'Incorrect Login or Password' in html or '>Error<' in html:
+ self.wrongPassword() \ No newline at end of file
diff --git a/pyload/plugins/internal/__init__.py b/pyload/plugins/internal/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/plugins/internal/__init__.py
diff --git a/pyload/plugins/network/CurlChunk.py b/pyload/plugins/network/CurlChunk.py
new file mode 100644
index 000000000..4250db2ce
--- /dev/null
+++ b/pyload/plugins/network/CurlChunk.py
@@ -0,0 +1,299 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from os import remove, stat, fsync
+from os.path import exists
+from time import sleep
+from re import search
+
+import codecs
+import pycurl
+
+from pyload.utils import remove_chars
+from pyload.utils.fs import fs_encode
+
+from CurlRequest import CurlRequest
+
+class WrongFormat(Exception):
+ pass
+
+
+class ChunkInfo():
+ def __init__(self, name):
+ self.name = unicode(name)
+ self.size = 0
+ self.resume = False
+ self.chunks = []
+
+ def __repr__(self):
+ ret = "ChunkInfo: %s, %s\n" % (self.name, self.size)
+ for i, c in enumerate(self.chunks):
+ ret += "%s# %s\n" % (i, c[1])
+
+ return ret
+
+ def setSize(self, size):
+ self.size = int(size)
+
+ def addChunk(self, name, range):
+ self.chunks.append((name, range))
+
+ def clear(self):
+ self.chunks = []
+
+ def createChunks(self, chunks):
+ self.clear()
+ chunk_size = self.size / chunks
+
+ current = 0
+ for i in range(chunks):
+ end = self.size - 1 if (i == chunks - 1) else current + chunk_size
+ self.addChunk("%s.chunk%s" % (self.name, i), (current, end))
+ current += chunk_size + 1
+
+
+ def save(self):
+ fs_name = fs_encode("%s.chunks" % self.name)
+ fh = codecs.open(fs_name, "w", "utf_8")
+ fh.write("name:%s\n" % self.name)
+ fh.write("size:%s\n" % self.size)
+ for i, c in enumerate(self.chunks):
+ fh.write("#%d:\n" % i)
+ fh.write("\tname:%s\n" % c[0])
+ fh.write("\trange:%i-%i\n" % c[1])
+ fh.close()
+
+ @staticmethod
+ def load(name):
+ fs_name = fs_encode("%s.chunks" % name)
+ if not exists(fs_name):
+ raise IOError()
+ fh = codecs.open(fs_name, "r", "utf_8")
+ name = fh.readline()[:-1]
+ size = fh.readline()[:-1]
+ if name.startswith("name:") and size.startswith("size:"):
+ name = name[5:]
+ size = size[5:]
+ else:
+ fh.close()
+ raise WrongFormat()
+ ci = ChunkInfo(name)
+ ci.loaded = True
+ ci.setSize(size)
+ while True:
+ if not fh.readline(): #skip line
+ break
+ name = fh.readline()[1:-1]
+ range = fh.readline()[1:-1]
+ if name.startswith("name:") and range.startswith("range:"):
+ name = name[5:]
+ range = range[6:].split("-")
+ else:
+ raise WrongFormat()
+
+ ci.addChunk(name, (long(range[0]), long(range[1])))
+ fh.close()
+ return ci
+
+ def remove(self):
+ fs_name = fs_encode("%s.chunks" % self.name)
+ if exists(fs_name): remove(fs_name)
+
+ def getCount(self):
+ return len(self.chunks)
+
+ def getChunkName(self, index):
+ return self.chunks[index][0]
+
+ def getChunkRange(self, index):
+ return self.chunks[index][1]
+
+
+class CurlChunk(CurlRequest):
+ def __init__(self, id, parent, range=None, resume=False):
+ self.setContext(*parent.getContext())
+
+ self.id = id
+ self.p = parent # CurlDownload instance
+ self.range = range # tuple (start, end)
+ self.resume = resume
+ self.log = parent.log
+
+ self.size = range[1] - range[0] if range else -1
+ self.arrived = 0
+ self.lastURL = self.p.referer
+
+ self.c = pycurl.Curl()
+
+ self.header = ""
+ self.headerParsed = False #indicates if the header has been processed
+
+ self.fp = None #file handle
+
+ self.initContext()
+
+ self.BOMChecked = False # check and remove byte order mark
+
+ self.rep = None
+
+ self.sleep = 0.000
+ self.lastSize = 0
+
+ def __repr__(self):
+ return "<CurlChunk id=%d, size=%d, arrived=%d>" % (self.id, self.size, self.arrived)
+
+ @property
+ def cj(self):
+ return self.p.context
+
+ def getHandle(self):
+ """ returns a Curl handle ready to use for perform/multiperform """
+
+ self.setRequestContext(self.p.url, self.p.get, self.p.post, self.p.referer, self.cj)
+ self.c.setopt(pycurl.WRITEFUNCTION, self.writeBody)
+ self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)
+
+ # request all bytes, since some servers in russia seems to have a defect arihmetic unit
+
+ fs_name = fs_encode(self.p.info.getChunkName(self.id))
+ if self.resume:
+ self.fp = open(fs_name, "ab")
+ self.arrived = self.fp.tell()
+ if not self.arrived:
+ self.arrived = stat(fs_name).st_size
+
+ if self.range:
+ #do nothing if chunk already finished
+ if self.arrived + self.range[0] >= self.range[1]: return None
+
+ if self.id == len(self.p.info.chunks) - 1: #as last chunk dont set end range, so we get everything
+ range = "%i-" % (self.arrived + self.range[0])
+ else:
+ range = "%i-%i" % (self.arrived + self.range[0], min(self.range[1] + 1, self.p.size - 1))
+
+ self.log.debug("Chunked resume with range %s" % range)
+ self.c.setopt(pycurl.RANGE, range)
+ else:
+ self.log.debug("Resume File from %i" % self.arrived)
+ self.c.setopt(pycurl.RESUME_FROM, self.arrived)
+
+ else:
+ if self.range:
+ if self.id == len(self.p.info.chunks) - 1: # see above
+ range = "%i-" % self.range[0]
+ else:
+ range = "%i-%i" % (self.range[0], min(self.range[1] + 1, self.p.size - 1))
+
+ self.log.debug("Chunked with range %s" % range)
+ self.c.setopt(pycurl.RANGE, range)
+
+ self.fp = open(fs_name, "wb")
+
+ return self.c
+
+ def writeHeader(self, buf):
+ self.header += buf
+ #@TODO forward headers?, this is possibly unneeded, when we just parse valid 200 headers
+ # as first chunk, we will parse the headers
+ if not self.range and self.header.endswith("\r\n\r\n"):
+ self.parseHeader()
+ elif not self.range and buf.startswith("150") and "data connection" in buf: #ftp file size parsing
+ size = search(r"(\d+) bytes", buf)
+ if size:
+ self.p._size = int(size.group(1))
+ self.p.chunkSupport = True
+
+ self.headerParsed = True
+
+ def writeBody(self, buf):
+ #ignore BOM, it confuses unrar
+ if not self.BOMChecked:
+ if [ord(b) for b in buf[:3]] == [239, 187, 191]:
+ buf = buf[3:]
+ self.BOMChecked = True
+
+ size = len(buf)
+
+ self.arrived += size
+
+ self.fp.write(buf)
+
+ if self.p.bucket:
+ sleep(self.p.bucket.consumed(size))
+ else:
+ # Avoid small buffers, increasing sleep time slowly if buffer size gets smaller
+ # otherwise reduce sleep time percentile (values are based on tests)
+ # So in general cpu time is saved without reducing bandwidth too much
+
+ if size < self.lastSize:
+ self.sleep += 0.002
+ else:
+ self.sleep *= 0.7
+
+ self.lastSize = size
+
+ sleep(self.sleep)
+
+ if self.range and self.arrived > self.size:
+ return 0 #close if we have enough data
+
+
+ def parseHeader(self):
+ """parse data from received header"""
+ for orgline in self.decodeResponse(self.header).splitlines():
+ line = orgline.strip().lower()
+ if line.startswith("accept-ranges") and "bytes" in line:
+ self.p.chunkSupport = True
+
+ if "content-disposition" in line:
+
+ m = search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", line)
+ if m:
+ name = remove_chars(m.groupdict()['name'], "\"';/").strip()
+ self.p._name = name
+ self.log.debug("Content-Disposition: %s" % name)
+
+ if not self.resume and line.startswith("content-length"):
+ self.p._size = int(line.split(":")[1])
+
+ self.headerParsed = True
+
+ def stop(self):
+ """The download will not proceed after next call of writeBody"""
+ self.range = [0,0]
+ self.size = 0
+
+ def resetRange(self):
+ """ Reset the range, so the download will load all data available """
+ self.range = None
+
+ def setRange(self, range):
+ self.range = range
+ self.size = range[1] - range[0]
+
+ def flushFile(self):
+ """ flush and close file """
+ self.fp.flush()
+ fsync(self.fp.fileno()) #make sure everything was written to disk
+ self.fp.close() #needs to be closed, or merging chunks will fail
+
+ def close(self):
+ """ closes everything, unusable after this """
+ if self.fp: self.fp.close()
+ self.c.close()
+ if hasattr(self, "p"): del self.p
diff --git a/pyload/plugins/network/CurlDownload.py b/pyload/plugins/network/CurlDownload.py
new file mode 100644
index 000000000..5de83ec7b
--- /dev/null
+++ b/pyload/plugins/network/CurlDownload.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from os import remove
+from os.path import dirname
+from time import time
+from shutil import move
+
+import pycurl
+
+from pyload.plugins.Base import Abort
+from pyload.utils.fs import save_join, fs_encode
+
+from ..Download import Download
+from CurlChunk import ChunkInfo, CurlChunk
+from CurlRequest import ResponseException
+
+# TODO: save content-disposition for resuming
+
+class CurlDownload(Download):
+ """ loads an url, http + ftp supported """
+
+ # def __init__(self, url, filename, get={}, post={}, referer=None, cj=None, bucket=None,
+ # options={}, disposition=False):
+
+ def __init__(self, *args, **kwargs):
+ Download.__init__(self, *args, **kwargs)
+
+ self.path = None
+ self.disposition = False
+
+ self.chunks = []
+ self.chunkSupport = None
+
+ self.m = pycurl.CurlMulti()
+
+ #needed for speed calculation
+ self.lastArrived = []
+ self.speeds = []
+ self.lastSpeeds = [0, 0]
+
+ @property
+ def speed(self):
+ last = [sum(x) for x in self.lastSpeeds if x]
+ return (sum(self.speeds) + sum(last)) / (1 + len(last))
+
+ @property
+ def arrived(self):
+ return sum(c.arrived for c in self.chunks) if self.chunks else self._size
+
+ @property
+ def name(self):
+ return self._name if self.disposition else None
+
+ def _copyChunks(self):
+ init = fs_encode(self.info.getChunkName(0)) #initial chunk name
+
+ if self.info.getCount() > 1:
+ fo = open(init, "rb+") #first chunkfile
+ for i in range(1, self.info.getCount()):
+ #input file
+ fo.seek(
+ self.info.getChunkRange(i - 1)[1] + 1) #seek to beginning of chunk, to get rid of overlapping chunks
+ fname = fs_encode("%s.chunk%d" % (self.path, i))
+ fi = open(fname, "rb")
+ buf = 32 * 1024
+ while True: #copy in chunks, consumes less memory
+ data = fi.read(buf)
+ if not data:
+ break
+ fo.write(data)
+ fi.close()
+ if fo.tell() < self.info.getChunkRange(i)[1]:
+ fo.close()
+ remove(init)
+ self.info.remove() #there are probably invalid chunks
+ raise Exception("Downloaded content was smaller than expected. Try to reduce download connections.")
+ remove(fname) #remove chunk
+ fo.close()
+
+ if self.name:
+ self.filename = save_join(dirname(self.path), self.name)
+
+ move(init, fs_encode(self.path))
+ self.info.remove() #remove info file
+
+ def checkResume(self):
+ try:
+ self.info = ChunkInfo.load(self.path)
+ self.info.resume = True #resume is only possible with valid info file
+ self._size = self.info.size
+ self.infoSaved = True
+ except IOError:
+ self.info = ChunkInfo(self.path)
+
+ def download(self, uri, path, get={}, post={}, referer=True, disposition=False, chunks=1, resume=False):
+ """ returns new filename or None """
+ self.url = uri
+ self.path = path
+ self.disposition = disposition
+ self.get = get
+ self.post = post
+ self.referer = referer
+
+ self.checkResume()
+ chunks = max(1, chunks)
+ resume = self.info.resume and resume
+
+ try:
+ self._download(chunks, resume)
+ except pycurl.error, e:
+ #code 33 - no resume
+ code = e.args[0]
+ if code == 33:
+ # try again without resume
+ self.log.debug("Errno 33 -> Restart without resume")
+
+ #remove old handles
+ for chunk in self.chunks:
+ self.closeChunk(chunk)
+
+ return self._download(chunks, False)
+ else:
+ raise
+ finally:
+ self.close()
+
+ return self.name
+
+ def _download(self, chunks, resume):
+ if not resume:
+ self.info.clear()
+ self.info.addChunk("%s.chunk0" % self.path, (0, 0)) #create an initial entry
+
+ self.chunks = []
+
+ init = CurlChunk(0, self, None, resume) #initial chunk that will load complete file (if needed)
+
+ self.chunks.append(init)
+ self.m.add_handle(init.getHandle())
+
+ lastFinishCheck = 0
+ lastTimeCheck = 0
+ chunksDone = set() # list of curl handles that are finished
+ chunksCreated = False
+ done = False
+ if self.info.getCount() > 1: # This is a resume, if we were chunked originally assume still can
+ self.chunkSupport = True
+
+ while 1:
+ #need to create chunks
+ if not chunksCreated and self.chunkSupport and self.size: #will be set later by first chunk
+
+ if not resume:
+ self.info.setSize(self.size)
+ self.info.createChunks(chunks)
+ self.info.save()
+
+ chunks = self.info.getCount()
+
+ init.setRange(self.info.getChunkRange(0))
+
+ for i in range(1, chunks):
+ c = CurlChunk(i, self, self.info.getChunkRange(i), resume)
+
+ handle = c.getHandle()
+ if handle:
+ self.chunks.append(c)
+ self.m.add_handle(handle)
+ else:
+ #close immediately
+ self.log.debug("Invalid curl handle -> closed")
+ c.close()
+
+ chunksCreated = True
+
+ while 1:
+ ret, num_handles = self.m.perform()
+ if ret != pycurl.E_CALL_MULTI_PERFORM:
+ break
+
+ t = time()
+
+ # reduce these calls
+ # when num_q is 0, the loop is exited
+ while lastFinishCheck + 0.5 < t:
+ # list of failed curl handles
+ failed = []
+ ex = None # save only last exception, we can only raise one anyway
+
+ num_q, ok_list, err_list = self.m.info_read()
+ for c in ok_list:
+ chunk = self.findChunk(c)
+ try: # check if the header implies success, else add it to failed list
+ chunk.verifyHeader()
+ except ResponseException, e:
+ self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(e)))
+ failed.append(chunk)
+ ex = e
+ else:
+ chunksDone.add(c)
+
+ for c in err_list:
+ curl, errno, msg = c
+ chunk = self.findChunk(curl)
+ #test if chunk was finished
+ if errno != 23 or "0 !=" not in msg:
+ failed.append(chunk)
+ ex = pycurl.error(errno, msg)
+ self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(ex)))
+ continue
+
+ try: # check if the header implies success, else add it to failed list
+ chunk.verifyHeader()
+ except ResponseException, e:
+ self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(e)))
+ failed.append(chunk)
+ ex = e
+ else:
+ chunksDone.add(curl)
+ if not num_q: # no more info to get
+
+ # check if init is not finished so we reset download connections
+ # note that other chunks are closed and everything downloaded with initial connection
+ if failed and init not in failed and init.c not in chunksDone:
+ self.log.error(_("Download chunks failed, fallback to single connection | %s" % (str(ex))))
+
+ #list of chunks to clean and remove
+ to_clean = filter(lambda x: x is not init, self.chunks)
+ for chunk in to_clean:
+ self.closeChunk(chunk)
+ self.chunks.remove(chunk)
+ remove(fs_encode(self.info.getChunkName(chunk.id)))
+
+ #let first chunk load the rest and update the info file
+ init.resetRange()
+ self.info.clear()
+ self.info.addChunk("%s.chunk0" % self.filename, (0, self.size))
+ self.info.save()
+ elif failed:
+ raise ex
+
+ lastFinishCheck = t
+
+ if len(chunksDone) >= len(self.chunks):
+ if len(chunksDone) > len(self.chunks):
+ self.log.warning("Finished download chunks size incorrect, please report bug.")
+ done = True #all chunks loaded
+
+ break
+
+ if done:
+ break #all chunks loaded
+
+ # calc speed once per second, averaging over 3 seconds
+ if lastTimeCheck + 1 < t:
+ diff = [c.arrived - (self.lastArrived[i] if len(self.lastArrived) > i else 0) for i, c in
+ enumerate(self.chunks)]
+
+ self.lastSpeeds[1] = self.lastSpeeds[0]
+ self.lastSpeeds[0] = self.speeds
+ self.speeds = [float(a) / (t - lastTimeCheck) for a in diff]
+ self.lastArrived = [c.arrived for c in self.chunks]
+ lastTimeCheck = t
+
+ if self.doAbort:
+ raise Abort()
+
+ self.m.select(1)
+
+ for chunk in self.chunks:
+ chunk.flushFile() #make sure downloads are written to disk
+
+ self._copyChunks()
+
+ def findChunk(self, handle):
+ """ linear search to find a chunk (should be ok since chunk size is usually low) """
+ for chunk in self.chunks:
+ if chunk.c == handle: return chunk
+
+ def closeChunk(self, chunk):
+ try:
+ self.m.remove_handle(chunk.c)
+ except pycurl.error, e:
+ self.log.debug("Error removing chunk: %s" % str(e))
+ finally:
+ chunk.close()
+
+ def close(self):
+ """ cleanup """
+ for chunk in self.chunks:
+ self.closeChunk(chunk)
+ else:
+ #Workaround: pycurl segfaults when closing multi, that never had any curl handles
+ if hasattr(self, "m"):
+ c = pycurl.Curl()
+ self.m.add_handle(c)
+ self.m.remove_handle(c)
+ c.close()
+
+ self.chunks = []
+ if hasattr(self, "m"):
+ self.m.close()
+ del self.m
+ if hasattr(self, "cj"):
+ del self.cj
+ if hasattr(self, "info"):
+ del self.info \ No newline at end of file
diff --git a/pyload/plugins/network/CurlRequest.py b/pyload/plugins/network/CurlRequest.py
new file mode 100644
index 000000000..4630403df
--- /dev/null
+++ b/pyload/plugins/network/CurlRequest.py
@@ -0,0 +1,314 @@
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import pycurl
+
+from codecs import getincrementaldecoder, lookup, BOM_UTF8
+from urllib import quote, urlencode
+from httplib import responses
+from cStringIO import StringIO
+
+from pyload.plugins.Base import Abort
+from pyload.network.CookieJar import CookieJar
+
+from ..Request import Request, ResponseException
+
+
+def myquote(url):
+ return quote(url.encode('utf8') if isinstance(url, unicode) else url, safe="%/:=&?~#+!$,;'@()*[]")
+
+
+def myurlencode(data):
+ data = dict(data)
+ return urlencode(dict((x.encode('utf8') if isinstance(x, unicode) else x, \
+ y.encode('utf8') if isinstance(y, unicode) else y ) for x, y in data.iteritems()))
+
+
+bad_headers = range(400, 418) + range(500, 506)
+
+
+class CurlRequest(Request):
+ """ Request class based on libcurl """
+
+ __version__ = "0.1"
+
+ CONTEXT_CLASS = CookieJar
+
+ def __init__(self, *args, **kwargs):
+ self.c = pycurl.Curl()
+ Request.__init__(self, *args, **kwargs)
+
+ self.rep = StringIO()
+ self.lastURL = None
+ self.lastEffectiveURL = None
+
+ # cookiejar defines the context
+ self.cj = self.context
+
+ self.c.setopt(pycurl.WRITEFUNCTION, self.write)
+ self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)
+
+ # TODO: addAuth, addHeader
+
+ def initContext(self):
+ self.initHandle()
+
+ if self.config:
+ self.setInterface(self.config)
+ self.initOptions(self.config)
+
+ def initHandle(self):
+ """ sets common options to curl handle """
+
+ self.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.c.setopt(pycurl.MAXREDIRS, 5)
+ self.c.setopt(pycurl.CONNECTTIMEOUT, 30)
+ self.c.setopt(pycurl.NOSIGNAL, 1)
+ self.c.setopt(pycurl.NOPROGRESS, 1)
+ if hasattr(pycurl, "AUTOREFERER"):
+ self.c.setopt(pycurl.AUTOREFERER, 1)
+ self.c.setopt(pycurl.SSL_VERIFYPEER, 0)
+ # Interval for low speed, detects connection loss, but can abort dl if hoster stalls the download
+ self.c.setopt(pycurl.LOW_SPEED_TIME, 45)
+ self.c.setopt(pycurl.LOW_SPEED_LIMIT, 5)
+
+ # don't save the cookies
+ self.c.setopt(pycurl.COOKIEFILE, "")
+ self.c.setopt(pycurl.COOKIEJAR, "")
+
+ #self.c.setopt(pycurl.VERBOSE, 1)
+
+ self.c.setopt(pycurl.USERAGENT,
+ "Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0")
+ if pycurl.version_info()[7]:
+ self.c.setopt(pycurl.ENCODING, "gzip, deflate")
+ self.c.setopt(pycurl.HTTPHEADER, ["Accept: */*",
+ "Accept-Language: en-US,en",
+ "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7",
+ "Connection: keep-alive",
+ "Keep-Alive: 300",
+ "Expect:"])
+
+ def setInterface(self, options):
+
+ interface, proxy, ipv6 = options["interface"], options["proxies"], options["ipv6"]
+
+ if interface and interface.lower() != "none":
+ self.c.setopt(pycurl.INTERFACE, str(interface))
+
+ if proxy:
+ if proxy["type"] == "socks4":
+ self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS4)
+ elif proxy["type"] == "socks5":
+ self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
+ else:
+ self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP)
+
+ self.c.setopt(pycurl.PROXY, str(proxy["address"]))
+ self.c.setopt(pycurl.PROXYPORT, proxy["port"])
+
+ if proxy["username"]:
+ self.c.setopt(pycurl.PROXYUSERPWD, str("%s:%s" % (proxy["username"], proxy["password"])))
+
+ if ipv6:
+ self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
+ else:
+ self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
+
+ if "timeout" in options:
+ self.c.setopt(pycurl.LOW_SPEED_TIME, options["timeout"])
+
+ def initOptions(self, options):
+ """ Sets same options as available in pycurl """
+ for k, v in options.iteritems():
+ if hasattr(pycurl, k):
+ self.c.setopt(getattr(pycurl, k), v)
+
+ def setRequestContext(self, url, get, post, referer, cookies, multipart=False):
+ """ sets everything needed for the request """
+ url = myquote(url)
+
+ if get:
+ get = urlencode(get)
+ url = "%s?%s" % (url, get)
+
+ self.c.setopt(pycurl.URL, url)
+ self.lastURL = url
+
+ if post:
+ self.c.setopt(pycurl.POST, 1)
+ if not multipart:
+ if type(post) == unicode:
+ post = str(post) #unicode not allowed
+ elif type(post) == str:
+ pass
+ else:
+ post = myurlencode(post)
+
+ self.c.setopt(pycurl.POSTFIELDS, post)
+ else:
+ post = [(x, y.encode('utf8') if type(y) == unicode else y ) for x, y in post.iteritems()]
+ self.c.setopt(pycurl.HTTPPOST, post)
+ else:
+ self.c.setopt(pycurl.POST, 0)
+
+ if referer and self.lastURL:
+ self.c.setopt(pycurl.REFERER, str(self.lastURL))
+ else:
+ self.c.setopt(pycurl.REFERER, "")
+
+ if cookies:
+ self.c.setopt(pycurl.COOKIELIST, self.cj.output())
+ else:
+ # Magic string that erases all cookies
+ self.c.setopt(pycurl.COOKIELIST, "ALL")
+
+ # TODO: remove auth again
+ if "auth" in self.options:
+ self.c.setopt(pycurl.USERPWD, str(self.options["auth"]))
+
+
+ def load(self, url, get={}, post={}, referer=True, cookies=True, just_header=False, multipart=False, decode=False):
+ """ load and returns a given page """
+
+ self.setRequestContext(url, get, post, referer, cookies, multipart)
+
+ # TODO: use http/rfc message instead
+ self.header = ""
+
+ if "header" in self.options:
+ self.c.setopt(pycurl.HTTPHEADER, self.options["header"])
+
+ if just_header:
+ self.c.setopt(pycurl.FOLLOWLOCATION, 0)
+ self.c.setopt(pycurl.NOBODY, 1) #TODO: nobody= no post?
+
+ # overwrite HEAD request, we want a common request type
+ if post:
+ self.c.setopt(pycurl.CUSTOMREQUEST, "POST")
+ else:
+ self.c.setopt(pycurl.CUSTOMREQUEST, "GET")
+
+ try:
+ self.c.perform()
+ rep = self.header
+ finally:
+ self.c.setopt(pycurl.FOLLOWLOCATION, 1)
+ self.c.setopt(pycurl.NOBODY, 0)
+ self.c.unsetopt(pycurl.CUSTOMREQUEST)
+
+ else:
+ self.c.perform()
+ rep = self.getResponse()
+
+ self.c.setopt(pycurl.POSTFIELDS, "")
+ self.lastEffectiveURL = self.c.getinfo(pycurl.EFFECTIVE_URL)
+ self.code = self.verifyHeader()
+
+ if cookies:
+ self.parseCookies()
+
+ if decode:
+ rep = self.decodeResponse(rep)
+
+ return rep
+
+ def parseCookies(self):
+ for c in self.c.getinfo(pycurl.INFO_COOKIELIST):
+ #http://xiix.wordpress.com/2006/03/23/mozillafirefox-cookie-format
+ domain, flag, path, secure, expires, name, value = c.split("\t")
+ # http only was added in py 2.6
+ domain = domain.replace("#HttpOnly_", "")
+ self.cj.setCookie(domain, name, value, path, expires, secure)
+
+ def verifyHeader(self):
+ """ raise an exceptions on bad headers """
+ code = int(self.c.getinfo(pycurl.RESPONSE_CODE))
+ if code in bad_headers:
+ raise ResponseException(code, responses.get(code, "Unknown statuscode"))
+ return code
+
+ def getResponse(self):
+ """ retrieve response from string io """
+ if self.rep is None: return ""
+ value = self.rep.getvalue()
+ self.rep.close()
+ self.rep = StringIO()
+ return value
+
+ def decodeResponse(self, rep):
+ """ decode with correct encoding, relies on header """
+ header = self.header.splitlines()
+ encoding = "utf8" # default encoding
+
+ for line in header:
+ line = line.lower().replace(" ", "")
+ if not line.startswith("content-type:") or \
+ ("text" not in line and "application" not in line):
+ continue
+
+ none, delemiter, charset = line.rpartition("charset=")
+ if delemiter:
+ charset = charset.split(";")
+ if charset:
+ encoding = charset[0]
+
+ try:
+ #self.log.debug("Decoded %s" % encoding )
+ if lookup(encoding).name == 'utf-8' and rep.startswith(BOM_UTF8):
+ encoding = 'utf-8-sig'
+
+ decoder = getincrementaldecoder(encoding)("replace")
+ rep = decoder.decode(rep, True)
+
+ #TODO: html_unescape as default
+
+ except LookupError:
+ self.log.debug("No Decoder found for %s" % encoding)
+ except Exception:
+ self.log.debug("Error when decoding string from %s." % encoding)
+
+ return rep
+
+ def write(self, buf):
+ """ writes response """
+ if self.rep.tell() > 1000000 or self.doAbort:
+ rep = self.getResponse()
+ if self.doAbort: raise Abort()
+ f = open("response.dump", "wb")
+ f.write(rep)
+ f.close()
+ raise Exception("Loaded Url exceeded limit")
+
+ self.rep.write(buf)
+
+ def writeHeader(self, buf):
+ """ writes header """
+ self.header += buf
+
+ def reset(self):
+ self.cj.clear()
+ self.options.clear()
+
+ def close(self):
+ """ cleanup, unusable after this """
+ self.rep.close()
+ if hasattr(self, "cj"):
+ del self.cj
+ if hasattr(self, "c"):
+ self.c.close()
+ del self.c \ No newline at end of file
diff --git a/pyload/plugins/network/DefaultRequest.py b/pyload/plugins/network/DefaultRequest.py
new file mode 100644
index 000000000..dce486ea5
--- /dev/null
+++ b/pyload/plugins/network/DefaultRequest.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -*-
+
+from CurlRequest import CurlRequest
+from CurlDownload import CurlDownload
+
+__version__ = "0.1"
+
+DefaultRequest = CurlRequest
+DefaultDownload = CurlDownload \ No newline at end of file
diff --git a/pyload/plugins/network/__init__.py b/pyload/plugins/network/__init__.py
new file mode 100644
index 000000000..4b31e848b
--- /dev/null
+++ b/pyload/plugins/network/__init__.py
@@ -0,0 +1 @@
+__author__ = 'christian'
diff --git a/pyload/remote/ClickAndLoadBackend.py b/pyload/remote/ClickAndLoadBackend.py
new file mode 100644
index 000000000..ad8031587
--- /dev/null
+++ b/pyload/remote/ClickAndLoadBackend.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+import re
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+from cgi import FieldStorage
+from urllib import unquote
+from base64 import standard_b64decode
+from binascii import unhexlify
+
+try:
+ from Crypto.Cipher import AES
+except:
+ pass
+
+from RemoteManager import BackendBase
+
+core = None
+js = None
+
+class ClickAndLoadBackend(BackendBase):
+ def setup(self, host, port):
+ self.httpd = HTTPServer((host, port), CNLHandler)
+ global core, js
+ core = self.m.core
+ js = core.js
+
+ def serve(self):
+ while self.enabled:
+ self.httpd.handle_request()
+
+class CNLHandler(BaseHTTPRequestHandler):
+
+ def add_package(self, name, urls, queue=0):
+ print "name", name
+ print "urls", urls
+ print "queue", queue
+
+ def get_post(self, name, default=""):
+ if name in self.post:
+ return self.post[name]
+ else:
+ return default
+
+ def start_response(self, string):
+
+ self.send_response(200)
+
+ self.send_header("Content-Length", len(string))
+ self.send_header("Content-Language", "de")
+ self.send_header("Vary", "Accept-Language, Cookie")
+ self.send_header("Cache-Control", "no-cache, must-revalidate")
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+
+ def do_GET(self):
+ path = self.path.strip("/").lower()
+ #self.wfile.write(path+"\n")
+
+ self.map = [ (r"add$", self.add),
+ (r"addcrypted$", self.addcrypted),
+ (r"addcrypted2$", self.addcrypted2),
+ (r"flashgot", self.flashgot),
+ (r"crossdomain\.xml", self.crossdomain),
+ (r"checkSupportForUrl", self.checksupport),
+ (r"jdcheck.js", self.jdcheck),
+ (r"", self.flash) ]
+
+ func = None
+ for r, f in self.map:
+ if re.match(r"(flash(got)?/?)?"+r, path):
+ func = f
+ break
+
+ if func:
+ try:
+ resp = func()
+ if not resp: resp = "success"
+ resp += "\r\n"
+ self.start_response(resp)
+ self.wfile.write(resp)
+ except Exception,e :
+ self.send_error(500, str(e))
+ else:
+ self.send_error(404, "Not Found")
+
+ def do_POST(self):
+ form = FieldStorage(
+ fp=self.rfile,
+ headers=self.headers,
+ environ={'REQUEST_METHOD':'POST',
+ 'CONTENT_TYPE':self.headers['Content-Type'],
+ })
+
+ self.post = {}
+ for name in form.keys():
+ self.post[name] = form[name].value
+
+ return self.do_GET()
+
+ def flash(self):
+ return "JDownloader"
+
+ def add(self):
+ package = self.get_post('referer', 'ClickAndLoad Package')
+ urls = filter(lambda x: x != "", self.get_post('urls').split("\n"))
+
+ self.add_package(package, urls, 0)
+
+ def addcrypted(self):
+ package = self.get_post('referer', 'ClickAndLoad Package')
+ dlc = self.get_post('crypted').replace(" ", "+")
+
+ core.upload_container(package, dlc)
+
+ def addcrypted2(self):
+ package = self.get_post("source", "ClickAndLoad Package")
+ crypted = self.get_post("crypted")
+ jk = self.get_post("jk")
+
+ crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
+ jk = "%s f()" % jk
+ jk = js.eval(jk)
+ Key = unhexlify(jk)
+ IV = Key
+
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ result = obj.decrypt(crypted).replace("\x00", "").replace("\r","").split("\n")
+
+ result = filter(lambda x: x != "", result)
+
+ self.add_package(package, result, 0)
+
+
+ def flashgot(self):
+ autostart = int(self.get_post('autostart', 0))
+ package = self.get_post('package', "FlashGot")
+ urls = filter(lambda x: x != "", self.get_post('urls').split("\n"))
+
+ self.add_package(package, urls, autostart)
+
+ def crossdomain(self):
+ rep = "<?xml version=\"1.0\"?>\n"
+ rep += "<!DOCTYPE cross-domain-policy SYSTEM \"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\">\n"
+ rep += "<cross-domain-policy>\n"
+ rep += "<allow-access-from domain=\"*\" />\n"
+ rep += "</cross-domain-policy>"
+ return rep
+
+ def checksupport(self):
+ pass
+
+ def jdcheck(self):
+ rep = "jdownloader=true;\n"
+ rep += "var version='10629';\n"
+ return rep
diff --git a/pyload/remote/JSONClient.py b/pyload/remote/JSONClient.py
new file mode 100644
index 000000000..a2c07a132
--- /dev/null
+++ b/pyload/remote/JSONClient.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from urllib import urlopen, urlencode
+from httplib import UNAUTHORIZED, FORBIDDEN
+
+from json_converter import loads, dumps
+from apitypes import Unauthorized, Forbidden
+
+class JSONClient:
+ URL = "http://localhost:8001/api"
+
+ def __init__(self, url=None):
+ self.url = url or self.URL
+ self.session = None
+
+ def request(self, path, data):
+ ret = urlopen(self.url + path, urlencode(data))
+ if ret.code == 400:
+ raise loads(ret.read())
+ if ret.code == 404:
+ raise AttributeError("Unknown Method")
+ if ret.code == 500:
+ raise Exception("Remote Exception")
+ if ret.code == UNAUTHORIZED:
+ raise Unauthorized()
+ if ret.code == FORBIDDEN:
+ raise Forbidden()
+ return ret.read()
+
+ def login(self, username, password):
+ self.session = loads(self.request("/login", {'username': username, 'password': password}))
+ return self.session
+
+ def logout(self):
+ self.call("logout")
+ self.session = None
+
+ def call(self, func, *args, **kwargs):
+ # Add the current session
+ kwargs["session"] = self.session
+ path = "/" + func + "/" + "/".join(dumps(x) for x in args)
+ data = dict((k, dumps(v)) for k, v in kwargs.iteritems())
+ rep = self.request(path, data)
+ return loads(rep)
+
+ def __getattr__(self, item):
+ def call(*args, **kwargs):
+ return self.call(item, *args, **kwargs)
+
+ return call
+
+if __name__ == "__main__":
+ api = JSONClient()
+ api.login("User", "test")
+ print api.getServerVersion() \ No newline at end of file
diff --git a/pyload/remote/RemoteManager.py b/pyload/remote/RemoteManager.py
new file mode 100644
index 000000000..7aeeb8a7a
--- /dev/null
+++ b/pyload/remote/RemoteManager.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: mkaay
+"""
+
+from threading import Thread
+from traceback import print_exc
+
+class BackendBase(Thread):
+ def __init__(self, manager):
+ Thread.__init__(self)
+ self.m = manager
+ self.core = manager.core
+ self.enabled = True
+ self.running = False
+
+ def run(self):
+ self.running = True
+ try:
+ self.serve()
+ except Exception, e:
+ self.core.log.error(_("Remote backend error: %s") % e)
+ if self.core.debug:
+ print_exc()
+ finally:
+ self.running = False
+
+ def setup(self, host, port):
+ pass
+
+ def checkDeps(self):
+ return True
+
+ def serve(self):
+ pass
+
+ def shutdown(self):
+ pass
+
+ def stop(self):
+ self.enabled = False# set flag and call shutdowm message, so thread can react
+ self.shutdown()
+
+
+class RemoteManager():
+ available = []
+
+ def __init__(self, core):
+ self.core = core
+ self.backends = []
+
+ if self.core.remote:
+ self.available.append("WebSocketBackend")
+
+
+ def startBackends(self):
+ host = self.core.config["remote"]["listenaddr"]
+ port = self.core.config["remote"]["port"]
+
+ for b in self.available:
+ klass = getattr(__import__("pyload.remote.%s" % b, globals(), locals(), [b], -1), b)
+ backend = klass(self)
+ if not backend.checkDeps():
+ continue
+ try:
+ backend.setup(host, port)
+ self.core.log.info(_("Starting %(name)s: %(addr)s:%(port)s") % {"name": b, "addr": host, "port": port})
+ except Exception, e:
+ self.core.log.error(_("Failed loading backend %(name)s | %(error)s") % {"name": b, "error": str(e)})
+ if self.core.debug:
+ print_exc()
+ else:
+ backend.start()
+ self.backends.append(backend)
+
+ port += 1
diff --git a/pyload/remote/WSClient.py b/pyload/remote/WSClient.py
new file mode 100644
index 000000000..0e58c6afa
--- /dev/null
+++ b/pyload/remote/WSClient.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from websocket import create_connection
+from httplib import UNAUTHORIZED, FORBIDDEN
+
+from json_converter import loads, dumps
+from apitypes import Unauthorized, Forbidden
+
+class WSClient:
+ URL = "ws://localhost:7227/api"
+
+ def __init__(self, url=None):
+ self.url = url or self.URL
+ self.ws = None
+
+ def connect(self):
+ self.ws = create_connection(self.url)
+
+ def close(self):
+ self.ws.close()
+
+ def login(self, username, password):
+ if not self.ws: self.connect()
+ return self.call("login", username, password)
+
+ def call(self, func, *args, **kwargs):
+ if not self.ws:
+ raise Exception("Not Connected")
+
+ if kwargs:
+ self.ws.send(dumps([func, args, kwargs]))
+ else: # omit kwargs
+ self.ws.send(dumps([func, args]))
+
+ code, result = loads(self.ws.recv())
+ if code == 400:
+ raise result
+ if code == 404:
+ raise AttributeError("Unknown Method")
+ elif code == 500:
+ raise Exception("Remote Exception: %s" % result)
+ elif code == UNAUTHORIZED:
+ raise Unauthorized()
+ elif code == FORBIDDEN:
+ raise Forbidden()
+
+ return result
+
+ def __getattr__(self, item):
+ def call(*args, **kwargs):
+ return self.call(item, *args, **kwargs)
+
+ return call
+
+if __name__ == "__main__":
+ api = WSClient()
+ api.login("User", "test")
+ print api.getServerVersion() \ No newline at end of file
diff --git a/pyload/remote/WebSocketBackend.py b/pyload/remote/WebSocketBackend.py
new file mode 100644
index 000000000..d29470067
--- /dev/null
+++ b/pyload/remote/WebSocketBackend.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import logging
+
+from RemoteManager import BackendBase
+
+from mod_pywebsocket import util
+def get_class_logger(o=None):
+ return logging.getLogger('log')
+
+# Monkey patch for our logger
+util.get_class_logger = get_class_logger
+
+class WebSocketBackend(BackendBase):
+ def setup(self, host, port):
+
+ from wsbackend.Server import WebSocketServer, DefaultOptions
+ from wsbackend.Dispatcher import Dispatcher
+ from wsbackend.ApiHandler import ApiHandler
+ from wsbackend.AsyncHandler import AsyncHandler
+
+ options = DefaultOptions()
+ options.server_host = host
+ options.port = port
+ options.dispatcher = Dispatcher()
+ options.dispatcher.addHandler(ApiHandler.PATH, ApiHandler(self.core.api))
+ options.dispatcher.addHandler(AsyncHandler.PATH, AsyncHandler(self.core.api))
+
+ self.server = WebSocketServer(options)
+
+
+ def serve(self):
+ self.server.serve_forever()
diff --git a/pyload/remote/__init__.py b/pyload/remote/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/remote/__init__.py
diff --git a/pyload/remote/apitypes.py b/pyload/remote/apitypes.py
new file mode 100644
index 000000000..6cf7529fd
--- /dev/null
+++ b/pyload/remote/apitypes.py
@@ -0,0 +1,537 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Autogenerated by pyload
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+class BaseObject(object):
+ __slots__ = []
+
+ def __str__(self):
+ return "<%s %s>" % (self.__class__.__name__, ", ".join("%s=%s" % (k,getattr(self,k)) for k in self.__slots__))
+
+class ExceptionObject(Exception):
+ __slots__ = []
+
+class DownloadState:
+ All = 0
+ Finished = 1
+ Unfinished = 2
+ Failed = 3
+ Unmanaged = 4
+
+class DownloadStatus:
+ NA = 0
+ Offline = 1
+ Online = 2
+ Queued = 3
+ Paused = 4
+ Finished = 5
+ Skipped = 6
+ Failed = 7
+ Starting = 8
+ Waiting = 9
+ Downloading = 10
+ TempOffline = 11
+ Aborted = 12
+ Decrypting = 13
+ Processing = 14
+ Custom = 15
+ Unknown = 16
+
+class FileStatus:
+ Ok = 0
+ Missing = 1
+ Remote = 2
+
+class InputType:
+ NA = 0
+ Text = 1
+ Int = 2
+ File = 3
+ Folder = 4
+ Textbox = 5
+ Password = 6
+ Time = 7
+ Bool = 8
+ Click = 9
+ Select = 10
+ Multiple = 11
+ List = 12
+ PluginList = 13
+ Table = 14
+
+class Interaction:
+ All = 0
+ Notification = 1
+ Captcha = 2
+ Query = 4
+
+class MediaType:
+ All = 0
+ Other = 1
+ Audio = 2
+ Image = 4
+ Video = 8
+ Document = 16
+ Archive = 32
+
+class PackageStatus:
+ Ok = 0
+ Paused = 1
+ Folder = 2
+ Remote = 3
+
+class Permission:
+ All = 0
+ Add = 1
+ Delete = 2
+ Modify = 4
+ Download = 8
+ Accounts = 16
+ Interaction = 32
+ Plugins = 64
+
+class Role:
+ Admin = 0
+ User = 1
+
+class AccountInfo(BaseObject):
+ __slots__ = ['plugin', 'loginname', 'owner', 'valid', 'validuntil', 'trafficleft', 'maxtraffic', 'premium', 'activated', 'shared', 'options']
+
+ def __init__(self, plugin=None, loginname=None, owner=None, valid=None, validuntil=None, trafficleft=None, maxtraffic=None, premium=None, activated=None, shared=None, options=None):
+ self.plugin = plugin
+ self.loginname = loginname
+ self.owner = owner
+ self.valid = valid
+ self.validuntil = validuntil
+ self.trafficleft = trafficleft
+ self.maxtraffic = maxtraffic
+ self.premium = premium
+ self.activated = activated
+ self.shared = shared
+ self.options = options
+
+class AddonInfo(BaseObject):
+ __slots__ = ['func_name', 'description', 'value']
+
+ def __init__(self, func_name=None, description=None, value=None):
+ self.func_name = func_name
+ self.description = description
+ self.value = value
+
+class AddonService(BaseObject):
+ __slots__ = ['func_name', 'description', 'arguments', 'media']
+
+ def __init__(self, func_name=None, description=None, arguments=None, media=None):
+ self.func_name = func_name
+ self.description = description
+ self.arguments = arguments
+ self.media = media
+
+class ConfigHolder(BaseObject):
+ __slots__ = ['name', 'label', 'description', 'explanation', 'items', 'info']
+
+ def __init__(self, name=None, label=None, description=None, explanation=None, items=None, info=None):
+ self.name = name
+ self.label = label
+ self.description = description
+ self.explanation = explanation
+ self.items = items
+ self.info = info
+
+class ConfigInfo(BaseObject):
+ __slots__ = ['name', 'label', 'description', 'category', 'user_context', 'activated']
+
+ def __init__(self, name=None, label=None, description=None, category=None, user_context=None, activated=None):
+ self.name = name
+ self.label = label
+ self.description = description
+ self.category = category
+ self.user_context = user_context
+ self.activated = activated
+
+class ConfigItem(BaseObject):
+ __slots__ = ['name', 'label', 'description', 'input', 'value']
+
+ def __init__(self, name=None, label=None, description=None, input=None, value=None):
+ self.name = name
+ self.label = label
+ self.description = description
+ self.input = input
+ self.value = value
+
+class DownloadInfo(BaseObject):
+ __slots__ = ['url', 'plugin', 'hash', 'status', 'statusmsg', 'error']
+
+ def __init__(self, url=None, plugin=None, hash=None, status=None, statusmsg=None, error=None):
+ self.url = url
+ self.plugin = plugin
+ self.hash = hash
+ self.status = status
+ self.statusmsg = statusmsg
+ self.error = error
+
+class DownloadProgress(BaseObject):
+ __slots__ = ['fid', 'pid', 'speed', 'status']
+
+ def __init__(self, fid=None, pid=None, speed=None, status=None):
+ self.fid = fid
+ self.pid = pid
+ self.speed = speed
+ self.status = status
+
+class EventInfo(BaseObject):
+ __slots__ = ['eventname', 'event_args']
+
+ def __init__(self, eventname=None, event_args=None):
+ self.eventname = eventname
+ self.event_args = event_args
+
+class FileDoesNotExists(ExceptionObject):
+ __slots__ = ['fid']
+
+ def __init__(self, fid=None):
+ self.fid = fid
+
+class FileInfo(BaseObject):
+ __slots__ = ['fid', 'name', 'package', 'owner', 'size', 'status', 'media', 'added', 'fileorder', 'download']
+
+ def __init__(self, fid=None, name=None, package=None, owner=None, size=None, status=None, media=None, added=None, fileorder=None, download=None):
+ self.fid = fid
+ self.name = name
+ self.package = package
+ self.owner = owner
+ self.size = size
+ self.status = status
+ self.media = media
+ self.added = added
+ self.fileorder = fileorder
+ self.download = download
+
+class Forbidden(ExceptionObject):
+ pass
+
+class Input(BaseObject):
+ __slots__ = ['type', 'default_value', 'data']
+
+ def __init__(self, type=None, default_value=None, data=None):
+ self.type = type
+ self.default_value = default_value
+ self.data = data
+
+class InteractionTask(BaseObject):
+ __slots__ = ['iid', 'type', 'input', 'title', 'description', 'plugin']
+
+ def __init__(self, iid=None, type=None, input=None, title=None, description=None, plugin=None):
+ self.iid = iid
+ self.type = type
+ self.input = input
+ self.title = title
+ self.description = description
+ self.plugin = plugin
+
+class InvalidConfigSection(ExceptionObject):
+ __slots__ = ['section']
+
+ def __init__(self, section=None):
+ self.section = section
+
+class LinkStatus(BaseObject):
+ __slots__ = ['url', 'name', 'plugin', 'size', 'status', 'packagename']
+
+ def __init__(self, url=None, name=None, plugin=None, size=None, status=None, packagename=None):
+ self.url = url
+ self.name = name
+ self.plugin = plugin
+ self.size = size
+ self.status = status
+ self.packagename = packagename
+
+class OnlineCheck(BaseObject):
+ __slots__ = ['rid', 'data']
+
+ def __init__(self, rid=None, data=None):
+ self.rid = rid
+ self.data = data
+
+class PackageDoesNotExists(ExceptionObject):
+ __slots__ = ['pid']
+
+ def __init__(self, pid=None):
+ self.pid = pid
+
+class PackageInfo(BaseObject):
+ __slots__ = ['pid', 'name', 'folder', 'root', 'owner', 'site', 'comment', 'password', 'added', 'tags', 'status', 'shared', 'packageorder', 'stats', 'fids', 'pids']
+
+ def __init__(self, pid=None, name=None, folder=None, root=None, owner=None, site=None, comment=None, password=None, added=None, tags=None, status=None, shared=None, packageorder=None, stats=None, fids=None, pids=None):
+ self.pid = pid
+ self.name = name
+ self.folder = folder
+ self.root = root
+ self.owner = owner
+ self.site = site
+ self.comment = comment
+ self.password = password
+ self.added = added
+ self.tags = tags
+ self.status = status
+ self.shared = shared
+ self.packageorder = packageorder
+ self.stats = stats
+ self.fids = fids
+ self.pids = pids
+
+class PackageStats(BaseObject):
+ __slots__ = ['linkstotal', 'linksdone', 'sizetotal', 'sizedone']
+
+ def __init__(self, linkstotal=None, linksdone=None, sizetotal=None, sizedone=None):
+ self.linkstotal = linkstotal
+ self.linksdone = linksdone
+ self.sizetotal = sizetotal
+ self.sizedone = sizedone
+
+class ProgressInfo(BaseObject):
+ __slots__ = ['plugin', 'name', 'statusmsg', 'eta', 'done', 'total', 'download']
+
+ def __init__(self, plugin=None, name=None, statusmsg=None, eta=None, done=None, total=None, download=None):
+ self.plugin = plugin
+ self.name = name
+ self.statusmsg = statusmsg
+ self.eta = eta
+ self.done = done
+ self.total = total
+ self.download = download
+
+class ServerStatus(BaseObject):
+ __slots__ = ['speed', 'linkstotal', 'linksqueue', 'sizetotal', 'sizequeue', 'notifications', 'paused', 'download', 'reconnect']
+
+ def __init__(self, speed=None, linkstotal=None, linksqueue=None, sizetotal=None, sizequeue=None, notifications=None, paused=None, download=None, reconnect=None):
+ self.speed = speed
+ self.linkstotal = linkstotal
+ self.linksqueue = linksqueue
+ self.sizetotal = sizetotal
+ self.sizequeue = sizequeue
+ self.notifications = notifications
+ self.paused = paused
+ self.download = download
+ self.reconnect = reconnect
+
+class ServiceDoesNotExists(ExceptionObject):
+ __slots__ = ['plugin', 'func']
+
+ def __init__(self, plugin=None, func=None):
+ self.plugin = plugin
+ self.func = func
+
+class ServiceException(ExceptionObject):
+ __slots__ = ['msg']
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+class TreeCollection(BaseObject):
+ __slots__ = ['root', 'files', 'packages']
+
+ def __init__(self, root=None, files=None, packages=None):
+ self.root = root
+ self.files = files
+ self.packages = packages
+
+class Unauthorized(ExceptionObject):
+ pass
+
+class UserData(BaseObject):
+ __slots__ = ['uid', 'name', 'email', 'role', 'permission', 'folder', 'traffic', 'dllimit', 'dlquota', 'hddquota', 'user', 'templateName']
+
+ def __init__(self, uid=None, name=None, email=None, role=None, permission=None, folder=None, traffic=None, dllimit=None, dlquota=None, hddquota=None, user=None, templateName=None):
+ self.uid = uid
+ self.name = name
+ self.email = email
+ self.role = role
+ self.permission = permission
+ self.folder = folder
+ self.traffic = traffic
+ self.dllimit = dllimit
+ self.dlquota = dlquota
+ self.hddquota = hddquota
+ self.user = user
+ self.templateName = templateName
+
+class UserDoesNotExists(ExceptionObject):
+ __slots__ = ['user']
+
+ def __init__(self, user=None):
+ self.user = user
+
+class Iface(object):
+ def addFromCollector(self, name, paused):
+ pass
+ def addLinks(self, pid, links):
+ pass
+ def addLocalFile(self, pid, name, path):
+ pass
+ def addPackage(self, name, links, password):
+ pass
+ def addPackageChild(self, name, links, password, root, paused):
+ pass
+ def addPackageP(self, name, links, password, paused):
+ pass
+ def addToCollector(self, links):
+ pass
+ def addUser(self, username, password):
+ pass
+ def callAddon(self, plugin, func, arguments):
+ pass
+ def callAddonHandler(self, plugin, func, pid_or_fid):
+ pass
+ def checkOnlineStatus(self, urls):
+ pass
+ def checkOnlineStatusContainer(self, urls, filename, data):
+ pass
+ def checkURLs(self, urls):
+ pass
+ def createPackage(self, name, folder, root, password, site, comment, paused):
+ pass
+ def deleteCollLink(self, url):
+ pass
+ def deleteCollPack(self, name):
+ pass
+ def deleteConfig(self, plugin):
+ pass
+ def deleteFiles(self, fids):
+ pass
+ def deletePackages(self, pids):
+ pass
+ def findFiles(self, pattern):
+ pass
+ def findPackages(self, tags):
+ pass
+ def freeSpace(self):
+ pass
+ def generateAndAddPackages(self, links, paused):
+ pass
+ def generateDownloadLink(self, fid, timeout):
+ pass
+ def generatePackages(self, links):
+ pass
+ def getAccountTypes(self):
+ pass
+ def getAccounts(self, refresh):
+ pass
+ def getAddonHandler(self):
+ pass
+ def getAllFiles(self):
+ pass
+ def getAllUserData(self):
+ pass
+ def getAvailablePlugins(self):
+ pass
+ def getCollector(self):
+ pass
+ def getConfig(self):
+ pass
+ def getConfigValue(self, section, option):
+ pass
+ def getCoreConfig(self):
+ pass
+ def getFileInfo(self, fid):
+ pass
+ def getFileTree(self, pid, full):
+ pass
+ def getFilteredFileTree(self, pid, full, state):
+ pass
+ def getFilteredFiles(self, state):
+ pass
+ def getInteractionTasks(self, mode):
+ pass
+ def getLog(self, offset):
+ pass
+ def getPackageContent(self, pid):
+ pass
+ def getPackageInfo(self, pid):
+ pass
+ def getPluginConfig(self):
+ pass
+ def getProgressInfo(self):
+ pass
+ def getServerStatus(self):
+ pass
+ def getServerVersion(self):
+ pass
+ def getUserData(self):
+ pass
+ def getWSAddress(self):
+ pass
+ def hasAddonHandler(self, plugin, func):
+ pass
+ def isInteractionWaiting(self, mode):
+ pass
+ def loadConfig(self, name):
+ pass
+ def login(self, username, password):
+ pass
+ def moveFiles(self, fids, pid):
+ pass
+ def movePackage(self, pid, root):
+ pass
+ def orderFiles(self, fids, pid, position):
+ pass
+ def orderPackage(self, pids, position):
+ pass
+ def parseURLs(self, html, url):
+ pass
+ def pauseServer(self):
+ pass
+ def pollResults(self, rid):
+ pass
+ def quit(self):
+ pass
+ def recheckPackage(self, pid):
+ pass
+ def removeAccount(self, account):
+ pass
+ def removeUser(self, uid):
+ pass
+ def renameCollPack(self, name, new_name):
+ pass
+ def restart(self):
+ pass
+ def restartFailed(self):
+ pass
+ def restartFile(self, fid):
+ pass
+ def restartPackage(self, pid):
+ pass
+ def saveConfig(self, config):
+ pass
+ def searchSuggestions(self, pattern):
+ pass
+ def setConfigValue(self, section, option, value):
+ pass
+ def setInteractionResult(self, iid, result):
+ pass
+ def setPackageFolder(self, pid, path):
+ pass
+ def setPassword(self, username, old_password, new_password):
+ pass
+ def stopAllDownloads(self):
+ pass
+ def stopDownloads(self, fids):
+ pass
+ def togglePause(self):
+ pass
+ def toggleReconnect(self):
+ pass
+ def unpauseServer(self):
+ pass
+ def updateAccount(self, plugin, login, password):
+ pass
+ def updateAccountInfo(self, account):
+ pass
+ def updatePackage(self, pack):
+ pass
+ def updateUserData(self, data):
+ pass
+ def uploadContainer(self, filename, data):
+ pass
+
diff --git a/pyload/remote/apitypes_debug.py b/pyload/remote/apitypes_debug.py
new file mode 100644
index 000000000..7c62a6277
--- /dev/null
+++ b/pyload/remote/apitypes_debug.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Autogenerated by pyload
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+from ttypes import *
+
+enums = [
+ "DownloadState",
+ "DownloadStatus",
+ "FileStatus",
+ "InputType",
+ "Interaction",
+ "MediaType",
+ "PackageStatus",
+ "Permission",
+ "Role",
+]
+
+classes = {
+ 'AccountInfo' : [basestring, basestring, int, bool, int, int, int, bool, bool, bool, (dict, basestring, basestring)],
+ 'AddonInfo' : [basestring, basestring, basestring],
+ 'AddonService' : [basestring, basestring, (list, basestring), (None, int)],
+ 'ConfigHolder' : [basestring, basestring, basestring, basestring, (list, ConfigItem), (None, (list, AddonInfo))],
+ 'ConfigInfo' : [basestring, basestring, basestring, basestring, bool, (None, bool)],
+ 'ConfigItem' : [basestring, basestring, basestring, Input, basestring],
+ 'DownloadInfo' : [basestring, basestring, basestring, int, basestring, basestring],
+ 'DownloadProgress' : [int, int, int, int],
+ 'EventInfo' : [basestring, (list, basestring)],
+ 'FileDoesNotExists' : [int],
+ 'FileInfo' : [int, basestring, int, int, int, int, int, int, int, (None, DownloadInfo)],
+ 'Input' : [int, (None, basestring), (None, basestring)],
+ 'InteractionTask' : [int, int, Input, basestring, basestring, basestring],
+ 'InvalidConfigSection' : [basestring],
+ 'LinkStatus' : [basestring, basestring, basestring, int, int, basestring],
+ 'OnlineCheck' : [int, (dict, basestring, LinkStatus)],
+ 'PackageDoesNotExists' : [int],
+ 'PackageInfo' : [int, basestring, basestring, int, int, basestring, basestring, basestring, int, (list, basestring), int, bool, int, PackageStats, (list, int), (list, int)],
+ 'PackageStats' : [int, int, int, int],
+ 'ProgressInfo' : [basestring, basestring, basestring, int, int, int, (None, DownloadProgress)],
+ 'ServerStatus' : [int, int, int, int, int, bool, bool, bool, bool],
+ 'ServiceDoesNotExists' : [basestring, basestring],
+ 'ServiceException' : [basestring],
+ 'TreeCollection' : [PackageInfo, (dict, int, FileInfo), (dict, int, PackageInfo)],
+ 'UserData' : [int, basestring, basestring, int, int, basestring, int, int, basestring, int, int, basestring],
+ 'UserDoesNotExists' : [basestring],
+}
+
+methods = {
+ 'addFromCollector': int,
+ 'addLinks': None,
+ 'addLocalFile': None,
+ 'addPackage': int,
+ 'addPackageChild': int,
+ 'addPackageP': int,
+ 'addToCollector': None,
+ 'addUser': UserData,
+ 'callAddon': None,
+ 'callAddonHandler': None,
+ 'checkOnlineStatus': OnlineCheck,
+ 'checkOnlineStatusContainer': OnlineCheck,
+ 'checkURLs': (dict, basestring, list),
+ 'createPackage': int,
+ 'deleteCollLink': None,
+ 'deleteCollPack': None,
+ 'deleteConfig': None,
+ 'deleteFiles': None,
+ 'deletePackages': None,
+ 'findFiles': TreeCollection,
+ 'findPackages': TreeCollection,
+ 'freeSpace': int,
+ 'generateAndAddPackages': (list, int),
+ 'generateDownloadLink': basestring,
+ 'generatePackages': (dict, basestring, list),
+ 'getAccountTypes': (list, basestring),
+ 'getAccounts': (list, AccountInfo),
+ 'getAddonHandler': (dict, basestring, list),
+ 'getAllFiles': TreeCollection,
+ 'getAllUserData': (dict, int, UserData),
+ 'getAvailablePlugins': (list, ConfigInfo),
+ 'getCollector': (list, LinkStatus),
+ 'getConfig': (dict, basestring, ConfigHolder),
+ 'getConfigValue': basestring,
+ 'getCoreConfig': (list, ConfigInfo),
+ 'getFileInfo': FileInfo,
+ 'getFileTree': TreeCollection,
+ 'getFilteredFileTree': TreeCollection,
+ 'getFilteredFiles': TreeCollection,
+ 'getInteractionTasks': (list, InteractionTask),
+ 'getLog': (list, basestring),
+ 'getPackageContent': TreeCollection,
+ 'getPackageInfo': PackageInfo,
+ 'getPluginConfig': (list, ConfigInfo),
+ 'getProgressInfo': (list, ProgressInfo),
+ 'getServerStatus': ServerStatus,
+ 'getServerVersion': basestring,
+ 'getUserData': UserData,
+ 'getWSAddress': basestring,
+ 'hasAddonHandler': bool,
+ 'isInteractionWaiting': bool,
+ 'loadConfig': ConfigHolder,
+ 'login': bool,
+ 'moveFiles': bool,
+ 'movePackage': bool,
+ 'orderFiles': None,
+ 'orderPackage': None,
+ 'parseURLs': (dict, basestring, list),
+ 'pauseServer': None,
+ 'pollResults': OnlineCheck,
+ 'quit': None,
+ 'recheckPackage': None,
+ 'removeAccount': None,
+ 'removeUser': None,
+ 'renameCollPack': None,
+ 'restart': None,
+ 'restartFailed': None,
+ 'restartFile': None,
+ 'restartPackage': None,
+ 'saveConfig': None,
+ 'searchSuggestions': (list, basestring),
+ 'setConfigValue': None,
+ 'setInteractionResult': None,
+ 'setPackageFolder': bool,
+ 'setPassword': bool,
+ 'stopAllDownloads': None,
+ 'stopDownloads': None,
+ 'togglePause': bool,
+ 'toggleReconnect': bool,
+ 'unpauseServer': None,
+ 'updateAccount': None,
+ 'updateAccountInfo': None,
+ 'updatePackage': None,
+ 'updateUserData': None,
+ 'uploadContainer': int,
+}
diff --git a/pyload/remote/create_apitypes.py b/pyload/remote/create_apitypes.py
new file mode 100644
index 000000000..d596f07ac
--- /dev/null
+++ b/pyload/remote/create_apitypes.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+import inspect
+from os.path import abspath, dirname, join
+
+path = dirname(abspath(__file__))
+root = abspath(join(path, "..", ".."))
+
+from thrift.Thrift import TType
+from thriftgen.pyload import ttypes
+from thriftgen.pyload import Pyload
+
+# TODO: import and add version
+# from pyload import CURRENT_VERSION
+
+type_map = {
+ TType.BOOL: 'bool',
+ TType.DOUBLE: 'float',
+ TType.I16: 'int',
+ TType.I32: 'int',
+ TType.I64: 'int',
+ TType.STRING: 'basestring',
+ TType.MAP: 'dict',
+ TType.LIST: 'list',
+ TType.SET: 'set',
+ TType.VOID: 'None',
+ TType.STRUCT: 'BaseObject',
+ TType.UTF8: 'unicode',
+}
+
+def get_spec(spec, optional=False):
+ """ analyze the generated spec file and writes information into file """
+ if spec[1] == TType.STRUCT:
+ return spec[3][0].__name__
+ elif spec[1] == TType.LIST:
+ if spec[3][0] == TType.STRUCT:
+ ttype = spec[3][1][0].__name__
+ else:
+ ttype = type_map[spec[3][0]]
+ return "(list, %s)" % ttype
+ elif spec[1] == TType.MAP:
+ if spec[3][2] == TType.STRUCT:
+ ttype = spec[3][3][0].__name__
+ else:
+ ttype = type_map[spec[3][2]]
+
+ return "(dict, %s, %s)" % (type_map[spec[3][0]], ttype)
+ else:
+ return type_map[spec[1]]
+
+optional_re = "%d: +optional +[a-z0-9<>_-]+ +%s"
+
+def main():
+
+ enums = []
+ classes = []
+ tf = open(join(path, "pyload.thrift"), "rb").read()
+
+ print "generating apitypes.py"
+
+ for name in dir(ttypes):
+ klass = getattr(ttypes, name)
+
+ if name in ("TBase", "TExceptionBase") or name.startswith("_") or not (issubclass(klass, ttypes.TBase) or issubclass(klass, ttypes.TExceptionBase)):
+ continue
+
+ if hasattr(klass, "thrift_spec"):
+ classes.append(klass)
+ else:
+ enums.append(klass)
+
+
+ f = open(join(path, "apitypes.py"), "wb")
+ f.write(
+ """#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Autogenerated by pyload
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+class BaseObject(object):
+\t__slots__ = []
+
+\tdef __str__(self):
+\t\treturn "<%s %s>" % (self.__class__.__name__, ", ".join("%s=%s" % (k,getattr(self,k)) for k in self.__slots__))
+
+class ExceptionObject(Exception):
+\t__slots__ = []
+
+""")
+
+ dev = open(join(path, "apitypes_debug.py"), "wb")
+ dev.write("""#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Autogenerated by pyload
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n
+from ttypes import *\n
+""")
+
+ dev.write("enums = [\n")
+
+ ## generate enums
+ for enum in enums:
+ name = enum.__name__
+ f.write("class %s:\n" % name)
+
+ for attr in sorted(dir(enum), key=lambda x: getattr(enum, x)):
+ if attr.startswith("_") or attr in ("read", "write"): continue
+ f.write("\t%s = %s\n" % (attr, getattr(enum, attr)))
+
+ dev.write('\t"%s",\n' % name)
+ f.write("\n")
+
+ dev.write("]\n\n")
+
+ dev.write("classes = {\n")
+
+ for klass in classes:
+ name = klass.__name__
+ base = "ExceptionObject" if issubclass(klass, ttypes.TExceptionBase) else "BaseObject"
+ f.write("class %s(%s):\n" % (name, base))
+
+ # No attributes, don't write further info
+ if not klass.__slots__:
+ f.write("\tpass\n\n")
+ continue
+
+ f.write("\t__slots__ = %s\n\n" % klass.__slots__)
+ dev.write("\t'%s' : [" % name)
+
+ #create init
+ args = ["self"] + ["%s=None" % x for x in klass.__slots__]
+ specs = []
+
+ f.write("\tdef __init__(%s):\n" % ", ".join(args))
+ for i, attr in enumerate(klass.__slots__):
+ f.write("\t\tself.%s = %s\n" % (attr, attr))
+
+ spec = klass.thrift_spec[i+1]
+ # assert correct order, so the list of types is enough for check
+ assert spec[2] == attr
+ # dirty way to check optional attribute, since it is not in the generated code
+ # can produce false positives, but these are not critical
+ optional = re.search(optional_re % (i+1, attr), tf, re.I)
+ if optional:
+ specs.append("(None, %s)" % get_spec(spec))
+ else:
+ specs.append(get_spec(spec))
+
+ f.write("\n")
+ dev.write(", ".join(specs) + "],\n")
+
+ dev.write("}\n\n")
+
+ f.write("class Iface(object):\n")
+ dev.write("methods = {\n")
+
+ for name in dir(Pyload.Iface):
+ if name.startswith("_"): continue
+
+ func = inspect.getargspec(getattr(Pyload.Iface, name))
+
+ f.write("\tdef %s(%s):\n\t\tpass\n" % (name, ", ".join(func.args)))
+
+ spec = getattr(Pyload, "%s_result" % name).thrift_spec
+ if not spec or not spec[0]:
+ dev.write("\t'%s': None,\n" % name)
+ else:
+ spec = spec[0]
+ dev.write("\t'%s': %s,\n" % (name, get_spec(spec)))
+
+ f.write("\n")
+ dev.write("}\n")
+
+ f.close()
+ dev.close()
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/pyload/remote/create_jstypes.py b/pyload/remote/create_jstypes.py
new file mode 100644
index 000000000..90afa4c96
--- /dev/null
+++ b/pyload/remote/create_jstypes.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from os.path import abspath, dirname, join
+
+path = dirname(abspath(__file__))
+module = join(path, "..")
+
+import apitypes
+from apitypes_debug import enums
+
+# generate js enums
+def main():
+
+ print "generating apitypes.js"
+
+ f = open(join(module, 'web', 'app', 'scripts', 'utils', 'apitypes.js'), 'wb')
+ f.write("""// Autogenerated, do not edit!
+/*jslint -W070: false*/
+define([], function() {
+\t'use strict';
+\treturn {
+""")
+
+ for name in enums:
+ enum = getattr(apitypes, name)
+ values = dict([(attr, getattr(enum, attr)) for attr in dir(enum) if not attr.startswith("_")])
+
+ f.write("\t\t%s: %s,\n" % (name, str(values)))
+
+ f.write("\t};\n});")
+ f.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/pyload/remote/json_converter.py b/pyload/remote/json_converter.py
new file mode 100644
index 000000000..3e6c7f797
--- /dev/null
+++ b/pyload/remote/json_converter.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+try:
+ from pyload.utils import json
+except ImportError:
+ import json
+
+
+import apitypes
+from apitypes import BaseObject
+from apitypes import ExceptionObject
+
+# compact json separator
+separators = (',', ':')
+
+# json encoder that accepts api objects
+class BaseEncoder(json.JSONEncoder):
+
+ def default(self, o):
+ if isinstance(o, BaseObject) or isinstance(o, ExceptionObject):
+ ret = {"@class" : o.__class__.__name__}
+ for att in o.__slots__:
+ ret[att] = getattr(o, att)
+ return ret
+
+ return json.JSONEncoder.default(self, o)
+
+# more compact representation, only clients with information of the classes can handle it
+class BaseEncoderCompact(json.JSONEncoder):
+
+ def default(self, o):
+ if isinstance(o, BaseObject) or isinstance(o, ExceptionObject):
+ ret = {"@compact" : [o.__class__.__name__]}
+ ret["@compact"].extend(getattr(o, attr) for attr in o.__slots__)
+ return ret
+
+ return json.JSONEncoder.default(self, o)
+
+def convert_obj(dct):
+ if '@class' in dct:
+ cls = getattr(apitypes, dct['@class'])
+ del dct['@class']
+ return cls(**dct)
+ elif '@compact' in dct:
+ cls = getattr(apitypes, dct['@compact'][0])
+ return cls(*dct['@compact'][1:])
+
+ return dct
+
+def dumps(*args, **kwargs):
+ if 'compact' in kwargs:
+ kwargs['cls'] = BaseEncoderCompact
+ del kwargs['compact']
+ else:
+ kwargs['cls'] = BaseEncoder
+
+ kwargs['separators'] = separators
+ return json.dumps(*args, **kwargs)
+
+
+def loads(*args, **kwargs):
+ kwargs['object_hook'] = convert_obj
+ return json.loads(*args, **kwargs) \ No newline at end of file
diff --git a/pyload/remote/pyload.thrift b/pyload/remote/pyload.thrift
new file mode 100644
index 000000000..309c972cd
--- /dev/null
+++ b/pyload/remote/pyload.thrift
@@ -0,0 +1,539 @@
+namespace java org.pyload.thrift
+
+typedef i32 FileID
+typedef i32 PackageID
+typedef i32 ResultID
+typedef i32 InteractionID
+typedef i32 UserID
+typedef i64 UTCDate
+typedef i64 ByteCount
+typedef list<string> LinkList
+typedef string PluginName
+typedef string JSONString
+
+// NA - Not Available
+enum DownloadStatus {
+ NA,
+ Offline,
+ Online,
+ Queued,
+ Paused,
+ Finished,
+ Skipped,
+ Failed,
+ Starting,
+ Waiting,
+ Downloading,
+ TempOffline,
+ Aborted,
+ Decrypting,
+ Processing,
+ Custom,
+ Unknown
+}
+
+// Download states, combination of several downloadstatuses
+// defined in Api
+enum DownloadState {
+ All,
+ Finished,
+ Unfinished,
+ Failed,
+ Unmanaged // internal state
+}
+
+enum MediaType {
+ All = 0
+ Other = 1,
+ Audio = 2,
+ Image = 4,
+ Video = 8,
+ Document = 16,
+ Archive = 32,
+}
+
+enum FileStatus {
+ Ok,
+ Missing,
+ Remote, // file is available at remote location
+}
+
+enum PackageStatus {
+ Ok,
+ Paused,
+ Folder,
+ Remote,
+}
+
+// types for user interaction
+// some may only be place holder currently not supported
+// also all input - output combination are not reasonable, see InteractionManager for further info
+// Todo: how about: time, ip, s.o.
+enum InputType {
+ NA,
+ Text,
+ Int,
+ File,
+ Folder,
+ Textbox,
+ Password,
+ Time,
+ Bool, // confirm like, yes or no dialog
+ Click, // for positional captchas
+ Select, // select from list
+ Multiple, // multiple choice from list of elements
+ List, // arbitary list of elements
+ PluginList, // a list plugins from pyload
+ Table // table like data structure
+}
+// more can be implemented by need
+
+// this describes the type of the outgoing interaction
+// ensure they can be logcial or'ed
+enum Interaction {
+ All = 0,
+ Notification = 1,
+ Captcha = 2,
+ Query = 4,
+}
+
+enum Permission {
+ All = 0, // requires no permission, but login
+ Add = 1, // can add packages
+ Delete = 2, // can delete packages
+ Modify = 4, // modify some attribute of downloads
+ Download = 8, // can download from webinterface
+ Accounts = 16, // can access accounts
+ Interaction = 32, // can interact with plugins
+ Plugins = 64 // user can configure plugins and activate addons
+}
+
+enum Role {
+ Admin = 0, //admin has all permissions implicit
+ User = 1
+}
+
+struct Input {
+ 1: InputType type,
+ 2: optional JSONString default_value,
+ 3: optional JSONString data,
+}
+
+struct DownloadProgress {
+ 1: FileID fid,
+ 2: PackageID pid,
+ 3: ByteCount speed, // per second
+ 4: DownloadStatus status,
+}
+
+struct ProgressInfo {
+ 1: PluginName plugin,
+ 2: string name,
+ 3: string statusmsg,
+ 4: i32 eta, // in seconds
+ 5: ByteCount done,
+ 6: ByteCount total, // arbitary number, size in case of files
+ 7: optional DownloadProgress download
+}
+
+// download info for specific file
+struct DownloadInfo {
+ 1: string url,
+ 2: PluginName plugin,
+ 3: string hash,
+ 4: DownloadStatus status,
+ 5: string statusmsg,
+ 6: string error,
+}
+
+struct FileInfo {
+ 1: FileID fid,
+ 2: string name,
+ 3: PackageID package,
+ 4: UserID owner,
+ 5: ByteCount size,
+ 6: FileStatus status,
+ 7: MediaType media,
+ 8: UTCDate added,
+ 9: i16 fileorder,
+ 10: optional DownloadInfo download,
+}
+
+struct PackageStats {
+ 1: i16 linkstotal,
+ 2: i16 linksdone,
+ 3: ByteCount sizetotal,
+ 4: ByteCount sizedone,
+}
+
+struct PackageInfo {
+ 1: PackageID pid,
+ 2: string name,
+ 3: string folder,
+ 4: PackageID root,
+ 5: UserID owner,
+ 6: string site,
+ 7: string comment,
+ 8: string password,
+ 9: UTCDate added,
+ 10: list<string> tags,
+ 11: PackageStatus status,
+ 12: bool shared,
+ 13: i16 packageorder,
+ 14: PackageStats stats,
+ 15: list<FileID> fids,
+ 16: list<PackageID> pids,
+}
+
+// thrift does not allow recursive datatypes, so all data is accumulated and mapped with id
+struct TreeCollection {
+ 1: PackageInfo root,
+ 2: map<FileID, FileInfo> files,
+ 3: map<PackageID, PackageInfo> packages
+}
+
+// general info about link, used for collector and online results
+struct LinkStatus {
+ 1: string url,
+ 2: string name,
+ 3: PluginName plugin,
+ 4: ByteCount size, // size <= 0 : unknown
+ 5: DownloadStatus status,
+ 6: string packagename,
+}
+
+struct ServerStatus {
+ 1: ByteCount speed,
+ 2: i16 linkstotal,
+ 3: i16 linksqueue,
+ 4: ByteCount sizetotal,
+ 5: ByteCount sizequeue,
+ 6: bool notifications,
+ 7: bool paused,
+ 8: bool download,
+ 9: bool reconnect,
+}
+
+struct InteractionTask {
+ 1: InteractionID iid,
+ 2: Interaction type,
+ 3: Input input,
+ 4: string title,
+ 5: string description,
+ 6: PluginName plugin,
+}
+
+struct AddonService {
+ 1: string func_name,
+ 2: string description,
+ 3: list<string> arguments,
+ 4: optional i16 media,
+}
+
+struct AddonInfo {
+ 1: string func_name,
+ 2: string description,
+ 3: JSONString value,
+}
+
+struct ConfigItem {
+ 1: string name,
+ 2: string label,
+ 3: string description,
+ 4: Input input,
+ 5: JSONString value,
+}
+
+struct ConfigHolder {
+ 1: string name, // for plugin this is the PluginName
+ 2: string label,
+ 3: string description,
+ 4: string explanation,
+ 5: list<ConfigItem> items,
+ 6: optional list<AddonInfo> info,
+}
+
+struct ConfigInfo {
+ 1: string name
+ 2: string label,
+ 3: string description,
+ 4: string category,
+ 5: bool user_context,
+ 6: optional bool activated,
+}
+
+struct EventInfo {
+ 1: string eventname,
+ 2: list<JSONString> event_args, //will contain json objects
+}
+
+struct UserData {
+ 1: UserID uid,
+ 2: string name,
+ 3: string email,
+ 4: i16 role,
+ 5: i16 permission,
+ 6: string folder,
+ 7: ByteCount traffic
+ 8: i16 dllimit
+ 9: string dlquota,
+ 10: ByteCount hddquota,
+ 11: UserID user,
+ 12: string templateName
+}
+
+struct AccountInfo {
+ 1: PluginName plugin,
+ 2: string loginname,
+ 3: UserID owner,
+ 4: bool valid,
+ 5: UTCDate validuntil,
+ 6: ByteCount trafficleft,
+ 7: ByteCount maxtraffic,
+ 8: bool premium,
+ 9: bool activated,
+ 10: bool shared,
+ 11: map<string, string> options,
+}
+
+struct OnlineCheck {
+ 1: ResultID rid, // -1 -> nothing more to get
+ 2: map<string, LinkStatus> data, // url to result
+}
+
+// exceptions
+
+exception PackageDoesNotExists {
+ 1: PackageID pid
+}
+
+exception FileDoesNotExists {
+ 1: FileID fid
+}
+
+exception UserDoesNotExists {
+ 1: string user
+}
+
+exception ServiceDoesNotExists {
+ 1: string plugin
+ 2: string func
+}
+
+exception ServiceException {
+ 1: string msg
+}
+
+exception InvalidConfigSection {
+ 1: string section
+}
+
+exception Unauthorized {
+}
+
+exception Forbidden {
+}
+
+
+service Pyload {
+
+ ///////////////////////
+ // Core Status
+ ///////////////////////
+
+ string getServerVersion(),
+ string getWSAddress(),
+ ServerStatus getServerStatus(),
+ list<ProgressInfo> getProgressInfo(),
+
+ list<string> getLog(1: i32 offset),
+ ByteCount freeSpace(),
+
+ void pauseServer(),
+ void unpauseServer(),
+ bool togglePause(),
+ bool toggleReconnect(),
+
+ void quit(),
+ void restart(),
+
+ ///////////////////////
+ // Configuration
+ ///////////////////////
+
+ map<string, ConfigHolder> getConfig(),
+ string getConfigValue(1: string section, 2: string option),
+
+ // two methods with ambigous classification, could be configuration or addon/plugin related
+ list<ConfigInfo> getCoreConfig(),
+ list<ConfigInfo> getPluginConfig(),
+ list<ConfigInfo> getAvailablePlugins(),
+
+ ConfigHolder loadConfig(1: string name),
+
+ void setConfigValue(1: string section, 2: string option, 3: string value),
+ void saveConfig(1: ConfigHolder config),
+ void deleteConfig(1: PluginName plugin),
+
+ ///////////////////////
+ // Download Preparing
+ ///////////////////////
+
+ map<PluginName, LinkList> checkURLs(1: LinkList urls),
+ map<PluginName, LinkList> parseURLs(1: string html, 2: string url),
+
+ // parses results and generates packages
+ OnlineCheck checkOnlineStatus(1: LinkList urls),
+ OnlineCheck checkOnlineStatusContainer(1: LinkList urls, 2: string filename, 3: binary data)
+
+ // poll results from previously started online check
+ OnlineCheck pollResults(1: ResultID rid),
+
+ // packagename -> urls
+ map<string, LinkList> generatePackages(1: LinkList links),
+
+ ///////////////////////
+ // Download
+ ///////////////////////
+
+ list<PackageID> generateAndAddPackages(1: LinkList links, 2: bool paused),
+
+ PackageID createPackage(1: string name, 2: string folder, 3: PackageID root, 4: string password,
+ 5: string site, 6: string comment, 7: bool paused),
+
+ PackageID addPackage(1: string name, 2: LinkList links, 3: string password),
+ // same as above with paused attribute
+ PackageID addPackageP(1: string name, 2: LinkList links, 3: string password, 4: bool paused),
+
+ // pid -1 is toplevel
+ PackageID addPackageChild(1: string name, 2: LinkList links, 3: string password, 4: PackageID root, 5: bool paused),
+
+ PackageID uploadContainer(1: string filename, 2: binary data),
+
+ void addLinks(1: PackageID pid, 2: LinkList links) throws (1: PackageDoesNotExists e),
+ void addLocalFile(1: PackageID pid, 2: string name, 3: string path) throws (1: PackageDoesNotExists e)
+
+ // these are real file operations and WILL delete files on disk
+ void deleteFiles(1: list<FileID> fids),
+ void deletePackages(1: list<PackageID> pids), // delete the whole folder recursive
+
+ // Modify Downloads
+
+ void restartPackage(1: PackageID pid),
+ void restartFile(1: FileID fid),
+ void recheckPackage(1: PackageID pid),
+ void restartFailed(),
+ void stopDownloads(1: list<FileID> fids),
+ void stopAllDownloads(),
+
+ ///////////////////////
+ // Collector
+ ///////////////////////
+
+ list<LinkStatus> getCollector(),
+
+ void addToCollector(1: LinkList links),
+ PackageID addFromCollector(1: string name, 2: bool paused),
+ void renameCollPack(1: string name, 2: string new_name),
+ void deleteCollPack(1: string name),
+ void deleteCollLink(1: string url),
+
+ ////////////////////////////
+ // File Information retrieval
+ ////////////////////////////
+
+ TreeCollection getAllFiles(),
+ TreeCollection getFilteredFiles(1: DownloadState state),
+
+ // pid -1 for root, full=False only delivers first level in tree
+ TreeCollection getFileTree(1: PackageID pid, 2: bool full),
+ TreeCollection getFilteredFileTree(1: PackageID pid, 2: bool full, 3: DownloadState state),
+
+ // same as above with full=False
+ TreeCollection getPackageContent(1: PackageID pid),
+
+ PackageInfo getPackageInfo(1: PackageID pid) throws (1: PackageDoesNotExists e),
+ FileInfo getFileInfo(1: FileID fid) throws (1: FileDoesNotExists e),
+
+ TreeCollection findFiles(1: string pattern),
+ TreeCollection findPackages(1: list<string> tags),
+ list<string> searchSuggestions(1: string pattern),
+
+ // Modify Files/Packages
+
+ // moving package while downloading is not possible, so they will return bool to indicate success
+ void updatePackage(1: PackageInfo pack) throws (1: PackageDoesNotExists e),
+ bool setPackageFolder(1: PackageID pid, 2: string path) throws (1: PackageDoesNotExists e),
+
+ // as above, this will move files on disk
+ bool movePackage(1: PackageID pid, 2: PackageID root) throws (1: PackageDoesNotExists e),
+ bool moveFiles(1: list<FileID> fids, 2: PackageID pid) throws (1: PackageDoesNotExists e),
+
+ void orderPackage(1: list<PackageID> pids, 2: i16 position),
+ void orderFiles(1: list<FileID> fids, 2: PackageID pid, 3: i16 position),
+
+ ///////////////////////
+ // User Interaction
+ ///////////////////////
+
+ // mode = interaction types binary ORed
+ bool isInteractionWaiting(1: i16 mode),
+ list<InteractionTask> getInteractionTasks(1: i16 mode),
+ void setInteractionResult(1: InteractionID iid, 2: JSONString result),
+
+ // generate a download link, everybody can download the file until timeout reached
+ string generateDownloadLink(1: FileID fid, 2: i16 timeout),
+
+ ///////////////////////
+ // Account Methods
+ ///////////////////////
+
+ list<AccountInfo> getAccounts(1: bool refresh),
+ list<string> getAccountTypes(),
+ void updateAccount(1: PluginName plugin, 2: string login, 3: string password),
+ void updateAccountInfo(1: AccountInfo account),
+ void removeAccount(1: AccountInfo account),
+
+ /////////////////////////
+ // Auth+User Information
+ /////////////////////////
+
+ bool login(1: string username, 2: string password),
+ // returns own user data
+ UserData getUserData(),
+
+ // all user, for admins only
+ map<UserID, UserData> getAllUserData(),
+
+ UserData addUser(1: string username, 2:string password),
+
+ // normal user can only update their own userdata and not all attributes
+ void updateUserData(1: UserData data),
+ void removeUser(1: UserID uid),
+
+ // works contextual, admin can change every password
+ bool setPassword(1: string username, 2: string old_password, 3: string new_password),
+
+ ///////////////////////
+ // Addon Methods
+ ///////////////////////
+
+ //map<PluginName, list<AddonInfo>> getAllInfo(),
+ //list<AddonInfo> getInfoByPlugin(1: PluginName plugin),
+
+ map<PluginName, list<AddonService>> getAddonHandler(),
+ bool hasAddonHandler(1: PluginName plugin, 2: string func),
+
+ void callAddon(1: PluginName plugin, 2: string func, 3: list<JSONString> arguments)
+ throws (1: ServiceDoesNotExists e, 2: ServiceException ex),
+
+ // special variant of callAddon that works on the media types, acccepting integer
+ void callAddonHandler(1: PluginName plugin, 2: string func, 3: PackageID pid_or_fid)
+ throws (1: ServiceDoesNotExists e, 2: ServiceException ex),
+
+
+ //scheduler
+
+ // TODO
+
+}
diff --git a/pyload/remote/ttypes.py b/pyload/remote/ttypes.py
new file mode 100644
index 000000000..1f91403d5
--- /dev/null
+++ b/pyload/remote/ttypes.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Autogenerated by pyload
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+class BaseObject(object):
+ __slots__ = []
+
+ def __str__(self):
+ return "<%s %s>" % (self.__class__.__name__, ", ".join("%s=%s" % (k,getattr(self,k)) for k in self.__slots__))
+
+class ExceptionObject(Exception):
+ __slots__ = []
+
+class DownloadState:
+ All = 0
+ Finished = 1
+ Unfinished = 2
+ Failed = 3
+ Unmanaged = 4
+
+class DownloadStatus:
+ NA = 0
+ Offline = 1
+ Online = 2
+ Queued = 3
+ Paused = 4
+ Finished = 5
+ Skipped = 6
+ Failed = 7
+ Starting = 8
+ Waiting = 9
+ Downloading = 10
+ TempOffline = 11
+ Aborted = 12
+ Decrypting = 13
+ Processing = 14
+ Custom = 15
+ Unknown = 16
+
+class FileStatus:
+ Ok = 0
+ Missing = 1
+ Remote = 2
+
+class Input:
+ NA = 0
+ Text = 1
+ Int = 2
+ File = 3
+ Folder = 4
+ Textbox = 5
+ Password = 6
+ Bool = 7
+ Click = 8
+ Select = 9
+ Multiple = 10
+ List = 11
+ Table = 12
+
+class MediaType:
+ All = 0
+ Other = 1
+ Audio = 2
+ Image = 4
+ Video = 8
+ Document = 16
+ Archive = 32
+
+class Output:
+ All = 0
+ Notification = 1
+ Captcha = 2
+ Query = 4
+
+class PackageStatus:
+ Ok = 0
+ Paused = 1
+ Folder = 2
+ Remote = 3
+
+class Permission:
+ All = 0
+ Add = 1
+ Delete = 2
+ Modify = 4
+ Download = 8
+ Accounts = 16
+ Interaction = 32
+ Plugins = 64
+
+class Role:
+ Admin = 0
+ User = 1
+
+class AccountInfo(BaseObject):
+ __slots__ = ['plugin', 'loginname', 'owner', 'valid', 'validuntil', 'trafficleft', 'maxtraffic', 'premium', 'activated', 'shared', 'options']
+
+ def __init__(self, plugin=None, loginname=None, owner=None, valid=None, validuntil=None, trafficleft=None, maxtraffic=None, premium=None, activated=None, shared=None, options=None):
+ self.plugin = plugin
+ self.loginname = loginname
+ self.owner = owner
+ self.valid = valid
+ self.validuntil = validuntil
+ self.trafficleft = trafficleft
+ self.maxtraffic = maxtraffic
+ self.premium = premium
+ self.activated = activated
+ self.shared = shared
+ self.options = options
+
+class AddonInfo(BaseObject):
+ __slots__ = ['func_name', 'description', 'value']
+
+ def __init__(self, func_name=None, description=None, value=None):
+ self.func_name = func_name
+ self.description = description
+ self.value = value
+
+class AddonService(BaseObject):
+ __slots__ = ['func_name', 'description', 'arguments', 'media']
+
+ def __init__(self, func_name=None, description=None, arguments=None, media=None):
+ self.func_name = func_name
+ self.description = description
+ self.arguments = arguments
+ self.media = media
+
+class ConfigHolder(BaseObject):
+ __slots__ = ['name', 'label', 'description', 'long_description', 'items', 'info', 'handler']
+
+ def __init__(self, name=None, label=None, description=None, long_description=None, items=None, info=None, handler=None):
+ self.name = name
+ self.label = label
+ self.description = description
+ self.long_description = long_description
+ self.items = items
+ self.info = info
+ self.handler = handler
+
+class ConfigInfo(BaseObject):
+ __slots__ = ['name', 'label', 'description', 'category', 'user_context', 'activated']
+
+ def __init__(self, name=None, label=None, description=None, category=None, user_context=None, activated=None):
+ self.name = name
+ self.label = label
+ self.description = description
+ self.category = category
+ self.user_context = user_context
+ self.activated = activated
+
+class ConfigItem(BaseObject):
+ __slots__ = ['name', 'label', 'description', 'type', 'default_value', 'value']
+
+ def __init__(self, name=None, label=None, description=None, type=None, default_value=None, value=None):
+ self.name = name
+ self.label = label
+ self.description = description
+ self.type = type
+ self.default_value = default_value
+ self.value = value
+
+class DownloadInfo(BaseObject):
+ __slots__ = ['url', 'plugin', 'hash', 'status', 'statusmsg', 'error']
+
+ def __init__(self, url=None, plugin=None, hash=None, status=None, statusmsg=None, error=None):
+ self.url = url
+ self.plugin = plugin
+ self.hash = hash
+ self.status = status
+ self.statusmsg = statusmsg
+ self.error = error
+
+class DownloadProgress(BaseObject):
+ __slots__ = ['fid', 'pid', 'speed', 'status']
+
+ def __init__(self, fid=None, pid=None, speed=None, status=None):
+ self.fid = fid
+ self.pid = pid
+ self.speed = speed
+ self.status = status
+
+class EventInfo(BaseObject):
+ __slots__ = ['eventname', 'event_args']
+
+ def __init__(self, eventname=None, event_args=None):
+ self.eventname = eventname
+ self.event_args = event_args
+
+class FileDoesNotExists(ExceptionObject):
+ __slots__ = ['fid']
+
+ def __init__(self, fid=None):
+ self.fid = fid
+
+class FileInfo(BaseObject):
+ __slots__ = ['fid', 'name', 'package', 'owner', 'size', 'status', 'media', 'added', 'fileorder', 'download']
+
+ def __init__(self, fid=None, name=None, package=None, owner=None, size=None, status=None, media=None, added=None, fileorder=None, download=None):
+ self.fid = fid
+ self.name = name
+ self.package = package
+ self.owner = owner
+ self.size = size
+ self.status = status
+ self.media = media
+ self.added = added
+ self.fileorder = fileorder
+ self.download = download
+
+class Forbidden(ExceptionObject):
+ pass
+
+class InteractionTask(BaseObject):
+ __slots__ = ['iid', 'input', 'data', 'output', 'default_value', 'title', 'description', 'plugin']
+
+ def __init__(self, iid=None, input=None, data=None, output=None, default_value=None, title=None, description=None, plugin=None):
+ self.iid = iid
+ self.input = input
+ self.data = data
+ self.output = output
+ self.default_value = default_value
+ self.title = title
+ self.description = description
+ self.plugin = plugin
+
+class InvalidConfigSection(ExceptionObject):
+ __slots__ = ['section']
+
+ def __init__(self, section=None):
+ self.section = section
+
+class LinkStatus(BaseObject):
+ __slots__ = ['url', 'name', 'plugin', 'size', 'status', 'packagename']
+
+ def __init__(self, url=None, name=None, plugin=None, size=None, status=None, packagename=None):
+ self.url = url
+ self.name = name
+ self.plugin = plugin
+ self.size = size
+ self.status = status
+ self.packagename = packagename
+
+class OnlineCheck(BaseObject):
+ __slots__ = ['rid', 'data']
+
+ def __init__(self, rid=None, data=None):
+ self.rid = rid
+ self.data = data
+
+class PackageDoesNotExists(ExceptionObject):
+ __slots__ = ['pid']
+
+ def __init__(self, pid=None):
+ self.pid = pid
+
+class PackageInfo(BaseObject):
+ __slots__ = ['pid', 'name', 'folder', 'root', 'owner', 'site', 'comment', 'password', 'added', 'tags', 'status', 'shared', 'packageorder', 'stats', 'fids', 'pids']
+
+ def __init__(self, pid=None, name=None, folder=None, root=None, owner=None, site=None, comment=None, password=None, added=None, tags=None, status=None, shared=None, packageorder=None, stats=None, fids=None, pids=None):
+ self.pid = pid
+ self.name = name
+ self.folder = folder
+ self.root = root
+ self.owner = owner
+ self.site = site
+ self.comment = comment
+ self.password = password
+ self.added = added
+ self.tags = tags
+ self.status = status
+ self.shared = shared
+ self.packageorder = packageorder
+ self.stats = stats
+ self.fids = fids
+ self.pids = pids
+
+class PackageStats(BaseObject):
+ __slots__ = ['linkstotal', 'linksdone', 'sizetotal', 'sizedone']
+
+ def __init__(self, linkstotal=None, linksdone=None, sizetotal=None, sizedone=None):
+ self.linkstotal = linkstotal
+ self.linksdone = linksdone
+ self.sizetotal = sizetotal
+ self.sizedone = sizedone
+
+class ProgressInfo(BaseObject):
+ __slots__ = ['plugin', 'name', 'statusmsg', 'eta', 'done', 'total', 'download']
+
+ def __init__(self, plugin=None, name=None, statusmsg=None, eta=None, done=None, total=None, download=None):
+ self.plugin = plugin
+ self.name = name
+ self.statusmsg = statusmsg
+ self.eta = eta
+ self.done = done
+ self.total = total
+ self.download = download
+
+class ServerStatus(BaseObject):
+ __slots__ = ['queuedDownloads', 'totalDownloads', 'speed', 'pause', 'download', 'reconnect']
+
+ def __init__(self, queuedDownloads=None, totalDownloads=None, speed=None, pause=None, download=None, reconnect=None):
+ self.queuedDownloads = queuedDownloads
+ self.totalDownloads = totalDownloads
+ self.speed = speed
+ self.pause = pause
+ self.download = download
+ self.reconnect = reconnect
+
+class ServiceDoesNotExists(ExceptionObject):
+ __slots__ = ['plugin', 'func']
+
+ def __init__(self, plugin=None, func=None):
+ self.plugin = plugin
+ self.func = func
+
+class ServiceException(ExceptionObject):
+ __slots__ = ['msg']
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+class TreeCollection(BaseObject):
+ __slots__ = ['root', 'files', 'packages']
+
+ def __init__(self, root=None, files=None, packages=None):
+ self.root = root
+ self.files = files
+ self.packages = packages
+
+class Unauthorized(ExceptionObject):
+ pass
+
+class UserData(BaseObject):
+ __slots__ = ['uid', 'name', 'email', 'role', 'permission', 'folder', 'traffic', 'dllimit', 'dlquota', 'hddquota', 'user', 'templateName']
+
+ def __init__(self, uid=None, name=None, email=None, role=None, permission=None, folder=None, traffic=None, dllimit=None, dlquota=None, hddquota=None, user=None, templateName=None):
+ self.uid = uid
+ self.name = name
+ self.email = email
+ self.role = role
+ self.permission = permission
+ self.folder = folder
+ self.traffic = traffic
+ self.dllimit = dllimit
+ self.dlquota = dlquota
+ self.hddquota = hddquota
+ self.user = user
+ self.templateName = templateName
+
+class UserDoesNotExists(ExceptionObject):
+ __slots__ = ['user']
+
+ def __init__(self, user=None):
+ self.user = user
+
+class Iface(object):
+ def addFromCollector(self, name, paused):
+ pass
+ def addLinks(self, pid, links):
+ pass
+ def addLocalFile(self, pid, name, path):
+ pass
+ def addPackage(self, name, links, password):
+ pass
+ def addPackageChild(self, name, links, password, root, paused):
+ pass
+ def addPackageP(self, name, links, password, paused):
+ pass
+ def addToCollector(self, links):
+ pass
+ def addUser(self, username, password):
+ pass
+ def callAddon(self, plugin, func, arguments):
+ pass
+ def callAddonHandler(self, plugin, func, pid_or_fid):
+ pass
+ def checkOnlineStatus(self, urls):
+ pass
+ def checkOnlineStatusContainer(self, urls, filename, data):
+ pass
+ def checkURLs(self, urls):
+ pass
+ def configurePlugin(self, plugin):
+ pass
+ def createPackage(self, name, folder, root, password, site, comment, paused):
+ pass
+ def deleteCollLink(self, url):
+ pass
+ def deleteCollPack(self, name):
+ pass
+ def deleteConfig(self, plugin):
+ pass
+ def deleteFiles(self, fids):
+ pass
+ def deletePackages(self, pids):
+ pass
+ def findFiles(self, pattern):
+ pass
+ def findPackages(self, tags):
+ pass
+ def freeSpace(self):
+ pass
+ def generateAndAddPackages(self, links, paused):
+ pass
+ def generateDownloadLink(self, fid, timeout):
+ pass
+ def generatePackages(self, links):
+ pass
+ def getAccountTypes(self):
+ pass
+ def getAccounts(self, refresh):
+ pass
+ def getAddonHandler(self):
+ pass
+ def getAllFiles(self):
+ pass
+ def getAllUserData(self):
+ pass
+ def getAutocompletion(self, pattern):
+ pass
+ def getAvailablePlugins(self):
+ pass
+ def getCollector(self):
+ pass
+ def getConfig(self):
+ pass
+ def getConfigValue(self, section, option):
+ pass
+ def getCoreConfig(self):
+ pass
+ def getEvents(self, uuid):
+ pass
+ def getFileInfo(self, fid):
+ pass
+ def getFileTree(self, pid, full):
+ pass
+ def getFilteredFileTree(self, pid, full, state):
+ pass
+ def getFilteredFiles(self, state):
+ pass
+ def getInteractionTask(self, mode):
+ pass
+ def getLog(self, offset):
+ pass
+ def getNotifications(self):
+ pass
+ def getPackageContent(self, pid):
+ pass
+ def getPackageInfo(self, pid):
+ pass
+ def getPluginConfig(self):
+ pass
+ def getProgressInfo(self):
+ pass
+ def getServerStatus(self):
+ pass
+ def getServerVersion(self):
+ pass
+ def getUserData(self):
+ pass
+ def getWSAddress(self):
+ pass
+ def hasAddonHandler(self, plugin, func):
+ pass
+ def isInteractionWaiting(self, mode):
+ pass
+ def login(self, username, password):
+ pass
+ def moveFiles(self, fids, pid):
+ pass
+ def movePackage(self, pid, root):
+ pass
+ def orderFiles(self, fids, pid, position):
+ pass
+ def orderPackage(self, pids, position):
+ pass
+ def parseURLs(self, html, url):
+ pass
+ def pauseServer(self):
+ pass
+ def pollResults(self, rid):
+ pass
+ def quit(self):
+ pass
+ def recheckPackage(self, pid):
+ pass
+ def removeAccount(self, plugin, account):
+ pass
+ def removeUser(self, uid):
+ pass
+ def renameCollPack(self, name, new_name):
+ pass
+ def restart(self):
+ pass
+ def restartFailed(self):
+ pass
+ def restartFile(self, fid):
+ pass
+ def restartPackage(self, pid):
+ pass
+ def saveConfig(self, config):
+ pass
+ def setConfigHandler(self, plugin, iid, value):
+ pass
+ def setConfigValue(self, section, option, value):
+ pass
+ def setInteractionResult(self, iid, result):
+ pass
+ def setPackageFolder(self, pid, path):
+ pass
+ def setPassword(self, username, old_password, new_password):
+ pass
+ def stopAllDownloads(self):
+ pass
+ def stopDownloads(self, fids):
+ pass
+ def togglePause(self):
+ pass
+ def toggleReconnect(self):
+ pass
+ def unpauseServer(self):
+ pass
+ def updateAccount(self, plugin, account, password):
+ pass
+ def updateAccountInfo(self, account):
+ pass
+ def updatePackage(self, pack):
+ pass
+ def updateUserData(self, data):
+ pass
+ def uploadContainer(self, filename, data):
+ pass
+
diff --git a/pyload/remote/wsbackend/AbstractHandler.py b/pyload/remote/wsbackend/AbstractHandler.py
new file mode 100644
index 000000000..8012d6cd8
--- /dev/null
+++ b/pyload/remote/wsbackend/AbstractHandler.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from mod_pywebsocket.msgutil import send_message
+from mod_pywebsocket.util import get_class_logger
+from pyload.remote.json_converter import loads, dumps
+
+
+class AbstractHandler:
+ """
+ Abstract Handler providing common methods shared across WebSocket handlers
+ """
+ PATH = "/"
+
+ OK = 200
+ BAD_REQUEST = 400
+ UNAUTHORIZED = 401
+ FORBIDDEN = 403
+ NOT_FOUND = 404
+ ERROR = 500
+
+ def __init__(self, api):
+ self.log = get_class_logger()
+ self.api = api
+ self.core = api.core
+
+ def do_extra_handshake(self, req):
+ self.log.debug("WS Connected: %s" % req)
+ req.api = None #when api is set client is logged in
+
+ # allow login via session when webinterface is active
+ if self.core.config['webinterface']['activated']:
+ cookie = req.headers_in.getheader('Cookie')
+ s = self.load_session(cookie)
+ if s:
+ uid = s.get('uid', None)
+ req.api = self.api.withUserContext(uid)
+ self.log.debug("WS authenticated user with cookie: %d" % uid)
+
+ self.on_open(req)
+
+ def on_open(self, req):
+ pass
+
+ def load_session(self, cookies):
+ from Cookie import SimpleCookie
+ from beaker.session import Session
+ from pyload.web.webinterface import session
+
+ cookies = SimpleCookie(cookies)
+ sid = cookies.get(session.options['key'])
+ if not sid:
+ return None
+
+ s = Session({}, use_cookies=False, id=sid.value, **session.options)
+ if s.is_new:
+ return None
+
+ return s
+
+ def passive_closing_handshake(self, req):
+ self.log.debug("WS Closed: %s" % req)
+ self.on_close(req)
+
+ def on_close(self, req):
+ pass
+
+ def transfer_data(self, req):
+ raise NotImplemented
+
+ def handle_call(self, msg, req):
+ """ Parses the msg for an argument call. If func is null an response was already sent.
+
+ :return: func, args, kwargs
+ """
+ try:
+ o = loads(msg)
+ except ValueError, e: #invalid json object
+ self.log.debug("Invalid Request: %s" % e)
+ self.send_result(req, self.ERROR, "No JSON request")
+ return None, None, None
+
+ if not isinstance(o, basestring) and type(o) != list and len(o) not in range(1, 4):
+ self.log.debug("Invalid Api call: %s" % o)
+ self.send_result(req, self.ERROR, "Invalid Api call")
+ return None, None, None
+
+ # called only with name, no args
+ if isinstance(o, basestring):
+ return o, [], {}
+ elif len(o) == 1: # arguments omitted
+ return o[0], [], {}
+ elif len(o) == 2:
+ func, args = o
+ if type(args) == list:
+ return func, args, {}
+ else:
+ return func, [], args
+ else:
+ return tuple(o)
+
+ def do_login(self, req, args, kwargs):
+ user = self.api.checkAuth(*args, **kwargs)
+ if user:
+ req.api = self.api.withUserContext(user.uid)
+ return self.send_result(req, self.OK, True)
+ else:
+ return self.send_result(req, self.FORBIDDEN, "Forbidden")
+
+ def do_logout(self, req):
+ req.api = None
+ return self.send_result(req, self.OK, True)
+
+ def send_result(self, req, code, result):
+ return send_message(req, dumps([code, result]))
+
+ def send(self, req, obj):
+ return send_message(req, dumps(obj)) \ No newline at end of file
diff --git a/pyload/remote/wsbackend/ApiHandler.py b/pyload/remote/wsbackend/ApiHandler.py
new file mode 100644
index 000000000..4685121d4
--- /dev/null
+++ b/pyload/remote/wsbackend/ApiHandler.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from mod_pywebsocket.msgutil import receive_message
+
+from pyload.Api import ExceptionObject
+
+from AbstractHandler import AbstractHandler
+
+class ApiHandler(AbstractHandler):
+ """Provides access to the API.
+
+ Send your request as json encoded string in the following manner:
+ ["function", [*args]] or ["function", {**kwargs}]
+
+ the result will be:
+
+ [code, result]
+
+ Don't forget to login first.
+ Non json request will be ignored.
+ """
+
+ PATH = "/api"
+
+ def transfer_data(self, req):
+ while True:
+ try:
+ line = receive_message(req)
+ except TypeError, e: # connection closed
+ self.log.debug("WS Error: %s" % e)
+ return self.passive_closing_handshake(req)
+
+ self.handle_message(line, req)
+
+ def handle_message(self, msg, req):
+
+ func, args, kwargs = self.handle_call(msg, req)
+ if not func:
+ return # handle_call already sent the result
+
+ if func == 'login':
+ return self.do_login(req, args, kwargs)
+ elif func == 'logout':
+ return self.do_logout(req)
+ else:
+ if not req.api:
+ return self.send_result(req, self.FORBIDDEN, "Forbidden")
+
+ if not self.api.isAuthorized(func, req.api.user):
+ return self.send_result(req, self.UNAUTHORIZED, "Unauthorized")
+
+ try:
+ result = getattr(req.api, func)(*args, **kwargs)
+ except ExceptionObject, e:
+ return self.send_result(req, self.BAD_REQUEST, e)
+ except AttributeError:
+ return self.send_result(req, self.NOT_FOUND, "Not Found")
+ except Exception, e:
+ self.core.print_exc()
+ return self.send_result(req, self.ERROR, str(e))
+
+ # None is invalid json type
+ if result is None: result = True
+
+ return self.send_result(req, self.OK, result) \ No newline at end of file
diff --git a/pyload/remote/wsbackend/AsyncHandler.py b/pyload/remote/wsbackend/AsyncHandler.py
new file mode 100644
index 000000000..b936de898
--- /dev/null
+++ b/pyload/remote/wsbackend/AsyncHandler.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import re
+from Queue import Queue, Empty
+from threading import Lock
+from time import time
+
+from mod_pywebsocket.msgutil import receive_message
+
+from pyload.Api import EventInfo, Interaction
+from pyload.utils import lock
+from AbstractHandler import AbstractHandler
+
+class Mode:
+ STANDBY = 1
+ RUNNING = 2
+
+class AsyncHandler(AbstractHandler):
+ """
+ Handler that provides asynchronous information about server status, running downloads, occurred events.
+
+ Progress information are continuous and will be pushed in a fixed interval when available.
+ After connect you have to login and can set the interval by sending the json command ["setInterval", xy].
+ To start receiving updates call "start", afterwards no more incoming messages will be accepted!
+ """
+
+ PATH = "/async"
+ COMMAND = "start"
+
+ PROGRESS_INTERVAL = 1.5
+ EVENT_PATTERN = re.compile(r"^(package|file|interaction)", re.I)
+ INTERACTION = Interaction.All
+
+ def __init__(self, api):
+ AbstractHandler.__init__(self, api)
+ self.clients = []
+ self.lock = Lock()
+
+ self.core.evm.listenTo("event", self.add_event)
+
+ @lock
+ def on_open(self, req):
+ req.queue = Queue()
+ req.interval = self.PROGRESS_INTERVAL
+ req.events = self.EVENT_PATTERN
+ req.interaction = self.INTERACTION
+ req.mode = Mode.STANDBY
+ req.t = time() # time when update should be pushed
+ self.clients.append(req)
+
+ @lock
+ def on_close(self, req):
+ try:
+ del req.queue
+ self.clients.remove(req)
+ except ValueError: # ignore when not in list
+ pass
+
+ @lock
+ def add_event(self, event, *args):
+ # Convert arguments to json suited instance
+ event = EventInfo(event, [x.toInfoData() if hasattr(x, 'toInfoData') else x for x in args])
+
+ for req in self.clients:
+ # Not logged in yet
+ if not req.api: continue
+
+ # filter events that these user is no owner of
+ # TODO: events are security critical, this should be revised later
+ # TODO: permissions? interaction etc
+ if not req.api.user.isAdmin():
+ skip = False
+ for arg in args:
+ if hasattr(arg, 'owner') and arg.owner != req.api.primaryUID:
+ skip = True
+ break
+
+ # user should not get this event
+ if skip: break
+
+ if req.events.search(event.eventname):
+ self.log.debug("Pushing event %s" % event)
+ req.queue.put(event)
+
+ def transfer_data(self, req):
+ while True:
+
+ if req.mode == Mode.STANDBY:
+ try:
+ line = receive_message(req)
+ except TypeError, e: # connection closed
+ self.log.debug("WS Error: %s" % e)
+ return self.passive_closing_handshake(req)
+
+ self.mode_standby(line, req)
+ else:
+ if self.mode_running(req):
+ return self.passive_closing_handshake(req)
+
+ def mode_standby(self, msg, req):
+ """ accepts calls before pushing updates """
+ func, args, kwargs = self.handle_call(msg, req)
+ if not func:
+ return # Result was already sent
+
+ if func == 'login':
+ return self.do_login(req, args, kwargs)
+
+ elif func == 'logout':
+ return self.do_logout(req)
+
+ else:
+ if not req.api:
+ return self.send_result(req, self.FORBIDDEN, "Forbidden")
+
+ if func == "setInterval":
+ req.interval = args[0]
+ elif func == "setEvents":
+ req.events = re.compile(args[0], re.I)
+ elif func == "setInteraction":
+ req.interaction = args[0]
+ elif func == self.COMMAND:
+ req.mode = Mode.RUNNING
+
+
+ def mode_running(self, req):
+ """ Listen for events, closes socket when returning True """
+ try:
+ # block length of update interval if necessary
+ ev = req.queue.get(True, req.interval)
+ try:
+ self.send(req, ev)
+ except TypeError:
+ self.log.debug("Event %s not converted" % ev)
+ ev.event_args = []
+ # Resend the event without arguments
+ self.send(req, ev)
+
+ except Empty:
+ pass
+
+ if req.t <= time():
+ # TODO: server status is not enough
+ # modify core api to include progress? think of other needed information to show
+ # eta is quite wrong currently
+ # notifications
+ self.send(req, self.api.getServerStatus())
+ self.send(req, self.api.getProgressInfo())
+
+ # update time for next update
+ req.t = time() + req.interval \ No newline at end of file
diff --git a/pyload/remote/wsbackend/Dispatcher.py b/pyload/remote/wsbackend/Dispatcher.py
new file mode 100644
index 000000000..44cc7555e
--- /dev/null
+++ b/pyload/remote/wsbackend/Dispatcher.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from mod_pywebsocket import util
+from mod_pywebsocket.dispatch import Dispatcher as BaseDispatcher
+
+class Dispatcher(BaseDispatcher):
+
+ def __init__(self):
+ self._logger = util.get_class_logger(self)
+
+ self._handler_suite_map = {}
+ self._source_warnings = []
+
+ def addHandler(self, path, handler):
+ self._handler_suite_map[path] = handler \ No newline at end of file
diff --git a/pyload/remote/wsbackend/Server.py b/pyload/remote/wsbackend/Server.py
new file mode 100644
index 000000000..af5e1cf19
--- /dev/null
+++ b/pyload/remote/wsbackend/Server.py
@@ -0,0 +1,733 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# A copy of standalone.py with uneeded stuff removed
+# some logging methods removed
+# Added api attribute to request
+
+import BaseHTTPServer
+import CGIHTTPServer
+import SocketServer
+import httplib
+import logging
+import os
+import re
+import select
+import socket
+import sys
+import threading
+
+_HAS_SSL = False
+_HAS_OPEN_SSL = False
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import http_header_util
+from mod_pywebsocket import memorizingfile
+from mod_pywebsocket import util
+
+
+_DEFAULT_LOG_MAX_BYTES = 1024 * 256
+_DEFAULT_LOG_BACKUP_COUNT = 5
+
+_DEFAULT_REQUEST_QUEUE_SIZE = 128
+
+# 1024 is practically large enough to contain WebSocket handshake lines.
+_MAX_MEMORIZED_LINES = 1024
+
+def import_ssl():
+ global _HAS_SSL, _HAS_OPEN_SSL
+ try:
+ import ssl
+ _HAS_SSL = True
+ except ImportError:
+ try:
+ import OpenSSL.SSL
+ _HAS_OPEN_SSL = True
+ except ImportError:
+ pass
+
+
+class _StandaloneConnection(object):
+ """Mimic mod_python mp_conn."""
+
+ def __init__(self, request_handler):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._request_handler = request_handler
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return (self._request_handler.server.server_name,
+ self._request_handler.server.server_port)
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr.
+
+ Setting the property in __init__ won't work because the request
+ handler is not initialized yet there."""
+
+ return self._request_handler.client_address
+ remote_addr = property(get_remote_addr)
+
+ def write(self, data):
+ """Mimic mp_conn.write()."""
+
+ return self._request_handler.wfile.write(data)
+
+ def read(self, length):
+ """Mimic mp_conn.read()."""
+
+ return self._request_handler.rfile.read(length)
+
+ def get_memorized_lines(self):
+ """Get memorized lines."""
+
+ return self._request_handler.rfile.get_memorized_lines()
+
+
+class _StandaloneRequest(object):
+ """Mimic mod_python request."""
+
+ def __init__(self, request_handler, use_tls):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request_handler = request_handler
+ self.connection = _StandaloneConnection(request_handler)
+ self._use_tls = use_tls
+ self.headers_in = request_handler.headers
+
+ def get_uri(self):
+ """Getter to mimic request.uri."""
+
+ return self._request_handler.path
+ uri = property(get_uri)
+
+ def get_method(self):
+ """Getter to mimic request.method."""
+
+ return self._request_handler.command
+ method = property(get_method)
+
+ def get_protocol(self):
+ """Getter to mimic request.protocol."""
+
+ return self._request_handler.request_version
+ protocol = property(get_protocol)
+
+ def is_https(self):
+ """Mimic request.is_https()."""
+
+ return self._use_tls
+
+ def _drain_received_data(self):
+ """Don't use this method from WebSocket handler. Drains unread data
+ in the receive buffer.
+ """
+
+ raw_socket = self._request_handler.connection
+ drained_data = util.drain_received_data(raw_socket)
+
+ if drained_data:
+ self._logger.debug(
+ 'Drained data following close frame: %r', drained_data)
+
+
+class _StandaloneSSLConnection(object):
+ """A wrapper class for OpenSSL.SSL.Connection to provide makefile method
+ which is not supported by the class.
+ """
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def __getattribute__(self, name):
+ if name in ('_connection', 'makefile'):
+ return object.__getattribute__(self, name)
+ return self._connection.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in ('_connection', 'makefile'):
+ return object.__setattr__(self, name, value)
+ return self._connection.__setattr__(name, value)
+
+ def makefile(self, mode='r', bufsize=-1):
+ return socket._fileobject(self._connection, mode, bufsize)
+
+
+def _alias_handlers(dispatcher, websock_handlers_map_file):
+ """Set aliases specified in websock_handler_map_file in dispatcher.
+
+ Args:
+ dispatcher: dispatch.Dispatcher instance
+ websock_handler_map_file: alias map file
+ """
+
+ fp = open(websock_handlers_map_file)
+ try:
+ for line in fp:
+ if line[0] == '#' or line.isspace():
+ continue
+ m = re.match('(\S+)\s+(\S+)', line)
+ if not m:
+ logging.warning('Wrong format in map file:' + line)
+ continue
+ try:
+ dispatcher.add_resource_path_alias(
+ m.group(1), m.group(2))
+ except dispatch.DispatchException, e:
+ logging.error(str(e))
+ finally:
+ fp.close()
+
+
+class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """HTTPServer specialized for WebSocket."""
+
+ # Overrides SocketServer.ThreadingMixIn.daemon_threads
+ daemon_threads = True
+ # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
+ allow_reuse_address = True
+
+ def __init__(self, options):
+ """Override SocketServer.TCPServer.__init__ to set SSL enabled
+ socket object to self.socket before server_bind and server_activate,
+ if necessary.
+ """
+ # Removed dispatcher init here
+ self._logger = logging.getLogger("log")
+
+ self.request_queue_size = options.request_queue_size
+ self.__ws_is_shut_down = threading.Event()
+ self.__ws_serving = False
+
+ SocketServer.BaseServer.__init__(
+ self, (options.server_host, options.port), WebSocketRequestHandler)
+
+ # Expose the options object to allow handler objects access it. We name
+ # it with websocket_ prefix to avoid conflict.
+ self.websocket_server_options = options
+
+ self._create_sockets()
+ self.server_bind()
+ self.server_activate()
+
+ def _create_sockets(self):
+ self.server_name, self.server_port = self.server_address
+ self._sockets = []
+ if not self.server_name:
+ # On platforms that doesn't support IPv6, the first bind fails.
+ # On platforms that supports IPv6
+ # - If it binds both IPv4 and IPv6 on call with AF_INET6, the
+ # first bind succeeds and the second fails (we'll see 'Address
+ # already in use' error).
+ # - If it binds only IPv6 on call with AF_INET6, both call are
+ # expected to succeed to listen both protocol.
+ addrinfo_array = [
+ (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
+ (socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
+ else:
+ addrinfo_array = socket.getaddrinfo(self.server_name,
+ self.server_port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ socket.IPPROTO_TCP)
+ for addrinfo in addrinfo_array:
+ family, socktype, proto, canonname, sockaddr = addrinfo
+ try:
+ socket_ = socket.socket(family, socktype)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ continue
+ if self.websocket_server_options.use_tls:
+ if _HAS_SSL:
+ if self.websocket_server_options.tls_client_auth:
+ client_cert_ = ssl.CERT_REQUIRED
+ else:
+ client_cert_ = ssl.CERT_NONE
+ socket_ = ssl.wrap_socket(socket_,
+ keyfile=self.websocket_server_options.private_key,
+ certfile=self.websocket_server_options.certificate,
+ ssl_version=ssl.PROTOCOL_SSLv23,
+ ca_certs=self.websocket_server_options.tls_client_ca,
+ cert_reqs=client_cert_)
+ if _HAS_OPEN_SSL:
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(
+ self.websocket_server_options.private_key)
+ ctx.use_certificate_file(
+ self.websocket_server_options.certificate)
+ socket_ = OpenSSL.SSL.Connection(ctx, socket_)
+ self._sockets.append((socket_, addrinfo))
+
+ def server_bind(self):
+ """Override SocketServer.TCPServer.server_bind to enable multiple
+ sockets bind.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ if self.allow_reuse_address:
+ socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ socket_.bind(self.server_address)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+ if self.server_address[1] == 0:
+ # The operating system assigns the actual port number for port
+ # number 0. This case, the second and later sockets should use
+ # the same port number. Also self.server_port is rewritten
+ # because it is exported, and will be used by external code.
+ self.server_address = (
+ self.server_name, socket_.getsockname()[1])
+ self.server_port = self.server_address[1]
+ self._logger.info('Port %r is assigned', self.server_port)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ def server_activate(self):
+ """Override SocketServer.TCPServer.server_activate to enable multiple
+ sockets listen.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.debug('Listen on: %r', addrinfo)
+ try:
+ socket_.listen(self.request_queue_size)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ if len(self._sockets) == 0:
+ self._logger.critical(
+ 'No sockets activated. Use info log level to see the reason.')
+
+ def server_close(self):
+ """Override SocketServer.TCPServer.server_close to enable multiple
+ sockets close.
+ """
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Close on: %r', addrinfo)
+ socket_.close()
+
+ def fileno(self):
+ """Override SocketServer.TCPServer.fileno."""
+
+ self._logger.critical('Not supported: fileno')
+ return self._sockets[0][0].fileno()
+
+ def handle_error(self, rquest, client_address):
+ """Override SocketServer.handle_error."""
+
+ self._logger.error(
+ 'Exception in processing request from: %r\n%s',
+ client_address,
+ util.get_stack_trace())
+ # Note: client_address is a tuple.
+
+ def get_request(self):
+ """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
+ object with _StandaloneSSLConnection to provide makefile method. We
+ cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
+ attribute.
+ """
+
+ accepted_socket, client_address = self.socket.accept()
+ if self.websocket_server_options.use_tls and _HAS_OPEN_SSL:
+ accepted_socket = _StandaloneSSLConnection(accepted_socket)
+ return accepted_socket, client_address
+
+ def serve_forever(self, poll_interval=0.5):
+ """Override SocketServer.BaseServer.serve_forever."""
+
+ self.__ws_serving = True
+ self.__ws_is_shut_down.clear()
+ handle_request = self.handle_request
+ if hasattr(self, '_handle_request_noblock'):
+ handle_request = self._handle_request_noblock
+ else:
+ self._logger.warning('Fallback to blocking request handler')
+ try:
+ while self.__ws_serving:
+ r, w, e = select.select(
+ [socket_[0] for socket_ in self._sockets],
+ [], [], poll_interval)
+ for socket_ in r:
+ self.socket = socket_
+ handle_request()
+ self.socket = None
+ finally:
+ self.__ws_is_shut_down.set()
+
+ def shutdown(self):
+ """Override SocketServer.BaseServer.shutdown."""
+
+ self.__ws_serving = False
+ self.__ws_is_shut_down.wait()
+
+
+class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
+ """CGIHTTPRequestHandler specialized for WebSocket."""
+
+ # Use httplib.HTTPMessage instead of mimetools.Message.
+ MessageClass = httplib.HTTPMessage
+
+ def setup(self):
+ """Override SocketServer.StreamRequestHandler.setup to wrap rfile
+ with MemorizingFile.
+
+ This method will be called by BaseRequestHandler's constructor
+ before calling BaseHTTPRequestHandler.handle.
+ BaseHTTPRequestHandler.handle will call
+ BaseHTTPRequestHandler.handle_one_request and it will call
+ WebSocketRequestHandler.parse_request.
+ """
+
+ # Call superclass's setup to prepare rfile, wfile, etc. See setup
+ # definition on the root class SocketServer.StreamRequestHandler to
+ # understand what this does.
+ CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
+
+ self.rfile = memorizingfile.MemorizingFile(
+ self.rfile,
+ max_memorized_lines=_MAX_MEMORIZED_LINES)
+
+ def __init__(self, request, client_address, server):
+ self._logger = util.get_class_logger(self)
+
+ self._options = server.websocket_server_options
+
+ # Overrides CGIHTTPServerRequestHandler.cgi_directories.
+ self.cgi_directories = self._options.cgi_directories
+ # Replace CGIHTTPRequestHandler.is_executable method.
+ if self._options.is_executable_method is not None:
+ self.is_executable = self._options.is_executable_method
+
+ # OWN MODIFICATION
+ # This actually calls BaseRequestHandler.__init__.
+ try:
+ CGIHTTPServer.CGIHTTPRequestHandler.__init__(
+ self, request, client_address, server)
+ except socket.error, e:
+ # Broken pipe, let it pass
+ if e.errno != 32:
+ raise
+ self._logger.debug("WS: Broken pipe")
+
+
+
+ def parse_request(self):
+ """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
+
+ Return True to continue processing for HTTP(S), False otherwise.
+
+ See BaseHTTPRequestHandler.handle_one_request method which calls
+ this method to understand how the return value will be handled.
+ """
+
+ # We hook parse_request method, but also call the original
+ # CGIHTTPRequestHandler.parse_request since when we return False,
+ # CGIHTTPRequestHandler.handle_one_request continues processing and
+ # it needs variables set by CGIHTTPRequestHandler.parse_request.
+ #
+ # Variables set by this method will be also used by WebSocket request
+ # handling (self.path, self.command, self.requestline, etc. See also
+ # how _StandaloneRequest's members are implemented using these
+ # attributes).
+ if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
+ return False
+
+ if self._options.use_basic_auth:
+ auth = self.headers.getheader('Authorization')
+ if auth != self._options.basic_auth_credential:
+ self.send_response(401)
+ self.send_header('WWW-Authenticate',
+ 'Basic realm="Pywebsocket"')
+ self.end_headers()
+ self._logger.info('Request basic authentication')
+ return True
+
+ host, port, resource = http_header_util.parse_uri(self.path)
+ if resource is None:
+ self._logger.info('Invalid URI: %r', self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ server_options = self.server.websocket_server_options
+ if host is not None:
+ validation_host = server_options.validation_host
+ if validation_host is not None and host != validation_host:
+ self._logger.info('Invalid host: %r (expected: %r)',
+ host,
+ validation_host)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ if port is not None:
+ validation_port = server_options.validation_port
+ if validation_port is not None and port != validation_port:
+ self._logger.info('Invalid port: %r (expected: %r)',
+ port,
+ validation_port)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ self.path = resource
+
+ request = _StandaloneRequest(self, self._options.use_tls)
+
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not self._options.dispatcher.get_handler_suite(self.path):
+ self._logger.info('No handler for resource: %r',
+ self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ except dispatch.DispatchException, e:
+ self._logger.info('%s', e)
+ self.send_error(e.status)
+ return False
+
+ # If any Exceptions without except clause setup (including
+ # DispatchException) is raised below this point, it will be caught
+ # and logged by WebSocketServer.
+
+ try:
+ try:
+ handshake.do_handshake(
+ request,
+ self._options.dispatcher,
+ allowDraft75=self._options.allow_draft75,
+ strict=self._options.strict)
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self.send_response(common.HTTP_STATUS_BAD_REQUEST)
+ self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ self.end_headers()
+ return False
+ except handshake.HandshakeException, e:
+ # Handshake for ws(s) failed.
+ self._logger.info('%s', e)
+ self.send_error(e.status)
+ return False
+
+ request._dispatcher = self._options.dispatcher
+ self._options.dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ return False
+
+ def log_request(self, code='-', size='-'):
+ """Override BaseHTTPServer.log_request."""
+
+ self._logger.info('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, *args):
+ """Override BaseHTTPServer.log_error."""
+
+ # Despite the name, this method is for warnings than for errors.
+ # For example, HTTP status code is logged by this method.
+ self._logger.warning('%s - %s',
+ self.address_string(),
+ args[0] % args[1:])
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Add extra check that self.path doesn't contains ..
+ Also check if the file is a executable file or not.
+ If the file is not executable, it is handled as static file or dir
+ rather than a CGI script.
+ """
+
+ if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
+ if '..' in self.path:
+ return False
+ # strip query parameter from request path
+ resource_name = self.path.split('?', 2)[0]
+ # convert resource_name into real path name in filesystem.
+ scriptfile = self.translate_path(resource_name)
+ if not os.path.isfile(scriptfile):
+ return False
+ if not self.is_executable(scriptfile):
+ return False
+ return True
+ return False
+
+
+def _get_logger_from_class(c):
+ return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
+
+
+def _configure_logging(options):
+ logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.getLevelName(options.log_level.upper()))
+ if options.log_file:
+ handler = logging.handlers.RotatingFileHandler(
+ options.log_file, 'a', options.log_max, options.log_count)
+ else:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter(
+ '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ deflate_log_level_name = logging.getLevelName(
+ options.deflate_log_level.upper())
+ _get_logger_from_class(util._Deflater).setLevel(
+ deflate_log_level_name)
+ _get_logger_from_class(util._Inflater).setLevel(
+ deflate_log_level_name)
+
+class DefaultOptions:
+ server_host = ''
+ port = common.DEFAULT_WEB_SOCKET_PORT
+ use_tls = False
+ private_key = ''
+ certificate = ''
+ ca_certificate = ''
+ dispatcher = None
+ request_queue_size = _DEFAULT_REQUEST_QUEUE_SIZE
+ use_basic_auth = False
+
+ allow_draft75 = False
+ strict = False
+ validation_host = None
+ validation_port = None
+ cgi_directories = ''
+ is_executable_method = False
+
+def _main(args=None):
+ """You can call this function from your own program, but please note that
+ this function has some side-effects that might affect your program. For
+ example, util.wrap_popen3_for_win use in this method replaces implementation
+ of os.popen3.
+ """
+
+ options, args = _parse_args_and_config(args=args)
+
+ os.chdir(options.document_root)
+
+ _configure_logging(options)
+
+ # TODO(tyoshino): Clean up initialization of CGI related values. Move some
+ # of code here to WebSocketRequestHandler class if it's better.
+ options.cgi_directories = []
+ options.is_executable_method = None
+ if options.cgi_paths:
+ options.cgi_directories = options.cgi_paths.split(',')
+ if sys.platform in ('cygwin', 'win32'):
+ cygwin_path = None
+ # For Win32 Python, it is expected that CYGWIN_PATH
+ # is set to a directory of cygwin binaries.
+ # For example, websocket_server.py in Chromium sets CYGWIN_PATH to
+ # full path of third_party/cygwin/bin.
+ if 'CYGWIN_PATH' in os.environ:
+ cygwin_path = os.environ['CYGWIN_PATH']
+ util.wrap_popen3_for_win(cygwin_path)
+
+ def __check_script(scriptpath):
+ return util.get_script_interp(scriptpath, cygwin_path)
+
+ options.is_executable_method = __check_script
+
+ if options.use_tls:
+ if not (_HAS_SSL or _HAS_OPEN_SSL):
+ logging.critical('TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ if not options.private_key or not options.certificate:
+ logging.critical(
+ 'To use TLS, specify private_key and certificate.')
+ sys.exit(1)
+
+ if options.tls_client_auth:
+ if not options.use_tls:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+ if not _HAS_SSL:
+ logging.critical('Client authentication requires ssl module.')
+
+ if not options.scan_dir:
+ options.scan_dir = options.websock_handlers
+
+ if options.use_basic_auth:
+ options.basic_auth_credential = 'Basic ' + base64.b64encode(
+ options.basic_auth_credential)
+
+ try:
+ if options.thread_monitor_interval_in_sec > 0:
+ # Run a thread monitor to show the status of server threads for
+ # debugging.
+ ThreadMonitor(options.thread_monitor_interval_in_sec).start()
+
+ server = WebSocketServer(options)
+ server.serve_forever()
+ except Exception, e:
+ logging.critical('mod_pywebsocket: %s' % e)
+ logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ _main(sys.argv[1:])
+
+
+# vi:sts=4 sw=4 et
diff --git a/pyload/remote/wsbackend/__init__.py b/pyload/remote/wsbackend/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/remote/wsbackend/__init__.py
diff --git a/pyload/setup/System_Checks.py b/pyload/setup/System_Checks.py
new file mode 100644
index 000000000..cef46956b
--- /dev/null
+++ b/pyload/setup/System_Checks.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+from getpass import getpass
+import module.common.pylgettext as gettext
+import os
+from os import makedirs
+from os.path import abspath, dirname, exists, join
+from subprocess import PIPE, call
+import sys
+from sys import exit
+from module.utils import get_console_encoding
+
+class System_Checks():
+ def __init__(self):
+ self.result = ""
+
+ def print_str(self, text, translate = True):
+ if translate:
+ self.result += _(text) + "\n"
+ else:
+ self.result += text + "\n"
+
+ def print_dep(self, name, value):
+ """Print Status of dependency"""
+ if value:
+ self.print_str(name + ": OK", False)
+ else:
+ self.print_str(name + ": missing", False)
+
+ def check_basic(self):
+ self.result = "" #clear result
+ python = False
+ if sys.version_info[:2] > (2, 7):
+ self.print_str("Your python version is to new, Please use Python 2.6/2.7")
+ elif sys.version_info[:2] < (2, 5):
+ self.print_str("Your python version is to old, Please use at least Python 2.5")
+ else:
+ self.print_str("Python Version: OK")
+ python = True
+
+ curl = self.check_module("pycurl")
+ self.print_dep("pycurl", curl)
+
+ sqlite = self.check_module("sqlite3")
+ self.print_dep("sqlite3", sqlite)
+
+ beaker = self.check_module("beaker")
+ self.print_dep("beaker", beaker)
+
+ jinja = True
+ try:
+ import jinja2
+ v = jinja2.__version__
+ if v and "unknown" not in v:
+ if not v.startswith("2.5") and not v.startswith("2.6"):
+ self.print_str("Your installed jinja2 version %s seems too old.") % jinja2.__version__
+ self.print_str("You can safely continue but if the webinterface is not working,")
+ self.print_str("please upgrade or deinstall it, pyLoad includes a sufficient jinja2 library.")
+ jinja = False
+ except:
+ pass
+ self.print_dep("jinja2", jinja)
+
+ return self.result, (python and curl and sqlite and (beaker or jinja))
+
+ def check_ssl(self):
+ self.result = "" #clear result
+ ssl = self.check_module("OpenSSL")
+ self.print_dep("py-OpenSSL", ssl)
+ return self.result, ssl
+
+ def check_crypto(self):
+ self.result = "" #clear result
+ crypto = self.check_module("Crypto")
+ self.print_dep("pycrypto", crypto)
+ return self.result, crypto
+
+ def check_captcha(self):
+ self.result = "" #clear result
+ pil = self.check_module("Image")
+ self.print_dep("py-imaging", pil)
+ if os.name == "nt":
+ tesser = self.check_prog([join(pypath, "tesseract", "tesseract.exe"), "-v"])
+ else:
+ tesser = self.check_prog(["tesseract", "-v"])
+ self.print_dep("tesseract", tesser)
+ return self.result, pil and tesser
+
+ def check_js(self):
+ self.result = "" #clear result
+ from module.common import JsEngine
+ js = True if JsEngine.ENGINE else False
+ self.print_dep(_("JS engine"), js)
+ return self.result, pil and tesser
+
+ def check_module(self, module):
+ try:
+ __import__(module)
+ return True
+ except:
+ return False
+
+ def check_prog(self, command):
+ pipe = PIPE
+ try:
+ call(command, stdout=pipe, stderr=pipe)
+ return True
+ except:
+ return False
+
diff --git a/pyload/threads/AddonThread.py b/pyload/threads/AddonThread.py
new file mode 100644
index 000000000..afb56f66b
--- /dev/null
+++ b/pyload/threads/AddonThread.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from copy import copy
+from traceback import print_exc
+
+from BaseThread import BaseThread
+
+class AddonThread(BaseThread):
+ """thread for addons"""
+
+ def __init__(self, m, function, args, kwargs):
+ """Constructor"""
+ BaseThread.__init__(self, m)
+
+ self.f = function
+ self.args = args
+ self.kwargs = kwargs
+
+ self.active = []
+
+ m.localThreads.append(self)
+
+ self.start()
+
+ def getActiveFiles(self):
+ return self.active
+
+ def addActive(self, pyfile):
+ """ Adds a pyfile to active list and thus will be displayed on overview"""
+ if pyfile not in self.active:
+ self.active.append(pyfile)
+
+ def finishFile(self, pyfile):
+ if pyfile in self.active:
+ self.active.remove(pyfile)
+
+ pyfile.finishIfDone()
+
+ def run(self): #TODO: approach via func_code
+ try:
+ try:
+ self.kwargs["thread"] = self
+ self.f(*self.args, **self.kwargs)
+ except TypeError, e:
+ #dirty method to filter out exceptions
+ if "unexpected keyword argument 'thread'" not in e.args[0]:
+ raise
+
+ del self.kwargs["thread"]
+ self.f(*self.args, **self.kwargs)
+ except Exception, e:
+ if hasattr(self.f, "im_self"):
+ addon = self.f.im_self
+ addon.logError(_("An Error occurred"), e)
+ if self.m.core.debug:
+ print_exc()
+ self.writeDebugReport(addon.__name__, plugin=addon)
+
+ finally:
+ local = copy(self.active)
+ for x in local:
+ self.finishFile(x)
+
+ self.m.localThreads.remove(self) \ No newline at end of file
diff --git a/pyload/threads/BaseThread.py b/pyload/threads/BaseThread.py
new file mode 100644
index 000000000..deaf03461
--- /dev/null
+++ b/pyload/threads/BaseThread.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from threading import Thread
+from time import strftime, gmtime
+from sys import exc_info
+from types import MethodType
+from pprint import pformat
+from traceback import format_exc
+
+from pyload.utils import primary_uid
+from pyload.utils.fs import listdir, join, save_join, stat, exists
+
+class BaseThread(Thread):
+ """abstract base class for thread types"""
+
+ def __init__(self, manager):
+ Thread.__init__(self)
+ self.setDaemon(True)
+ self.m = manager #thread manager
+ self.core = manager.core
+ self.log = manager.core.log
+
+ #: Owner of the thread, every type should set it
+ self.owner = None
+
+ @property
+ def user(self):
+ return primary_uid(self.owner)
+
+ def getProgress(self):
+ """ retrieves progress information about the current running task
+
+ :return: :class:`ProgressInfo`
+ """
+
+ # Debug Stuff
+ def writeDebugReport(self, name, pyfile=None, plugin=None):
+ """ writes a debug report to disk """
+
+ dump_name = "debug_%s_%s.zip" % (name, strftime("%d-%m-%Y_%H-%M-%S"))
+ if pyfile:
+ dump = self.getFileDump(pyfile)
+ else:
+ dump = self.getPluginDump(plugin)
+
+ try:
+ import zipfile
+
+ zip = zipfile.ZipFile(dump_name, "w")
+
+ if exists(join("tmp", name)):
+ for f in listdir(join("tmp", name)):
+ try:
+ # avoid encoding errors
+ zip.write(join("tmp", name, f), save_join(name, f))
+ except:
+ pass
+
+ info = zipfile.ZipInfo(save_join(name, "debug_Report.txt"), gmtime())
+ info.external_attr = 0644 << 16L # change permissions
+ zip.writestr(info, dump)
+
+ info = zipfile.ZipInfo(save_join(name, "system_Report.txt"), gmtime())
+ info.external_attr = 0644 << 16L
+ zip.writestr(info, self.getSystemDump())
+
+ zip.close()
+
+ if not stat(dump_name).st_size:
+ raise Exception("Empty Zipfile")
+
+ except Exception, e:
+ self.log.debug("Error creating zip file: %s" % e)
+
+ dump_name = dump_name.replace(".zip", ".txt")
+ f = open(dump_name, "wb")
+ f.write(dump)
+ f.close()
+
+ self.log.info("Debug Report written to %s" % dump_name)
+ return dump_name
+
+ def getFileDump(self, pyfile):
+ dump = "pyLoad %s Debug Report of %s %s \n\nTRACEBACK:\n %s \n\nFRAMESTACK:\n" % (
+ self.m.core.api.getServerVersion(), pyfile.pluginname, pyfile.plugin.__version__, format_exc())
+
+ tb = exc_info()[2]
+ stack = []
+ while tb:
+ stack.append(tb.tb_frame)
+ tb = tb.tb_next
+
+ for frame in stack[1:]:
+ dump += "\nFrame %s in %s at line %s\n" % (frame.f_code.co_name,
+ frame.f_code.co_filename,
+ frame.f_lineno)
+
+ for key, value in frame.f_locals.items():
+ dump += "\t%20s = " % key
+ try:
+ dump += pformat(value) + "\n"
+ except Exception, e:
+ dump += "<ERROR WHILE PRINTING VALUE> " + str(e) + "\n"
+
+ del frame
+
+ del stack #delete it just to be sure...
+
+ dump += "\n\nPLUGIN OBJECT DUMP: \n\n"
+
+ for name in dir(pyfile.plugin):
+ attr = getattr(pyfile.plugin, name)
+ if not name.endswith("__") and type(attr) != MethodType:
+ dump += "\t%20s = " % name
+ try:
+ dump += pformat(attr) + "\n"
+ except Exception, e:
+ dump += "<ERROR WHILE PRINTING VALUE> " + str(e) + "\n"
+
+ dump += "\nPYFILE OBJECT DUMP: \n\n"
+
+ for name in dir(pyfile):
+ attr = getattr(pyfile, name)
+ if not name.endswith("__") and type(attr) != MethodType:
+ dump += "\t%20s = " % name
+ try:
+ dump += pformat(attr) + "\n"
+ except Exception, e:
+ dump += "<ERROR WHILE PRINTING VALUE> " + str(e) + "\n"
+
+ dump += "\n\nCONFIG: \n\n"
+ dump += pformat(self.m.core.config.values) + "\n"
+
+ return dump
+
+ #TODO
+ def getPluginDump(self, plugin):
+ return ""
+
+ def getSystemDump(self):
+ return ""
diff --git a/pyload/threads/DecrypterThread.py b/pyload/threads/DecrypterThread.py
new file mode 100644
index 000000000..77502569c
--- /dev/null
+++ b/pyload/threads/DecrypterThread.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from time import sleep
+from traceback import print_exc
+
+from pyload.utils import uniqify
+from pyload.plugins.Base import Retry
+from pyload.plugins.Crypter import Package
+
+from BaseThread import BaseThread
+
+class DecrypterThread(BaseThread):
+ """thread for decrypting"""
+
+ def __init__(self, manager, data, pid):
+ """constructor"""
+ BaseThread.__init__(self, manager)
+ self.data = data
+ self.pid = pid
+
+ self.start()
+
+ def run(self):
+ plugin_map = {}
+ for url, plugin in self.data:
+ if plugin in plugin_map:
+ plugin_map[plugin].append(url)
+ else:
+ plugin_map[plugin] = [url]
+
+ self.decrypt(plugin_map)
+
+ def decrypt(self, plugin_map):
+ pack = self.m.core.files.getPackage(self.pid)
+ result = []
+
+ for name, urls in plugin_map.iteritems():
+ klass = self.m.core.pluginManager.loadClass("crypter", name)
+ plugin = klass(self.m.core, pack, pack.password)
+ plugin_result = []
+
+ try:
+ try:
+ plugin_result = plugin._decrypt(urls)
+ except Retry:
+ sleep(1)
+ plugin_result = plugin._decrypt(urls)
+ except Exception, e:
+ plugin.logError(_("Decrypting failed"), e)
+ if self.m.core.debug:
+ print_exc()
+ self.writeDebugReport(plugin.__name__, plugin=plugin)
+
+ plugin.logDebug("Decrypted", plugin_result)
+ result.extend(plugin_result)
+
+ #TODO
+ result = uniqify(result)
+ pack_names = {}
+ urls = []
+
+ for p in result:
+ if isinstance(p, Package):
+ if p.name in pack_names:
+ pack_names[p.name].urls.extend(p.urls)
+ else:
+ pack_names[p.name] = p
+ else:
+ urls.append(p)
+
+ if urls:
+ self.log.info(_("Decrypted %(count)d links into package %(name)s") % {"count": len(urls), "name": pack.name})
+ self.m.core.api.addFiles(self.pid, urls)
+
+ for p in pack_names.itervalues():
+ self.m.core.api.addPackage(p.name, p.urls, pack.password)
+
+ if not result:
+ self.log.info(_("No links decrypted"))
+
diff --git a/pyload/threads/DownloadThread.py b/pyload/threads/DownloadThread.py
new file mode 100644
index 000000000..b5a45185f
--- /dev/null
+++ b/pyload/threads/DownloadThread.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from Queue import Queue
+from time import sleep, time
+from traceback import print_exc
+from sys import exc_clear
+from pycurl import error
+
+from pyload.plugins.Base import Fail, Retry, Abort
+from pyload.plugins.Hoster import Reconnect, SkipDownload
+from pyload.network.HTTPRequest import BadHeader
+
+from BaseThread import BaseThread
+
+class DownloadThread(BaseThread):
+ """thread for downloading files from 'real' hoster plugins"""
+
+ def __init__(self, manager):
+ """Constructor"""
+ BaseThread.__init__(self, manager)
+
+ self.queue = Queue() # job queue
+ self.active = None
+
+ self.start()
+
+ def run(self):
+ """run method"""
+ pyfile = None
+
+ while True:
+ del pyfile
+ self.active = self.queue.get()
+ pyfile = self.active
+
+ if self.active == "quit":
+ self.active = None
+ self.m.threads.remove(self)
+ return True
+
+ try:
+ if not pyfile.hasPlugin(): continue
+ #this pyfile was deleted while queuing
+
+ pyfile.plugin.checkForSameFiles(starting=True)
+ self.log.info(_("Download starts: %s" % pyfile.name))
+
+ # start download
+ self.core.addonManager.downloadPreparing(pyfile)
+ pyfile.plugin.preprocessing(self)
+
+ self.log.info(_("Download finished: %s") % pyfile.name)
+ self.core.addonManager.downloadFinished(pyfile)
+ self.core.files.checkPackageFinished(pyfile)
+
+ except NotImplementedError:
+ self.log.error(_("Plugin %s is missing a function.") % pyfile.pluginname)
+ pyfile.setStatus("failed")
+ pyfile.error = "Plugin does not work"
+ self.clean(pyfile)
+ continue
+
+ except Abort:
+ try:
+ self.log.info(_("Download aborted: %s") % pyfile.name)
+ except:
+ pass
+
+ pyfile.setStatus("aborted")
+
+ self.clean(pyfile)
+ continue
+
+ except Reconnect:
+ self.queue.put(pyfile)
+ #pyfile.req.clearCookies()
+
+ while self.m.reconnecting.isSet():
+ sleep(0.5)
+
+ continue
+
+ except Retry, e:
+ reason = e.args[0]
+ self.log.info(_("Download restarted: %(name)s | %(msg)s") % {"name": pyfile.name, "msg": reason})
+ self.queue.put(pyfile)
+ continue
+ except Fail, e:
+ msg = e.args[0]
+
+ # TODO: activate former skipped downloads
+
+ if msg == "offline":
+ pyfile.setStatus("offline")
+ self.log.warning(_("Download is offline: %s") % pyfile.name)
+ elif msg == "temp. offline":
+ pyfile.setStatus("temp. offline")
+ self.log.warning(_("Download is temporary offline: %s") % pyfile.name)
+ else:
+ pyfile.setStatus("failed")
+ self.log.warning(_("Download failed: %(name)s | %(msg)s") % {"name": pyfile.name, "msg": msg})
+ pyfile.error = msg
+
+ self.core.addonManager.downloadFailed(pyfile)
+ self.clean(pyfile)
+ continue
+
+ except error, e:
+ if len(e.args) == 2:
+ code, msg = e.args
+ else:
+ code = 0
+ msg = e.args
+
+ self.log.debug("pycurl exception %s: %s" % (code, msg))
+
+ if code in (7, 18, 28, 52, 56):
+ self.log.warning(_("Couldn't connect to host or connection reset, waiting 1 minute and retry."))
+ wait = time() + 60
+
+ pyfile.waitUntil = wait
+ pyfile.setStatus("waiting")
+ while time() < wait:
+ sleep(1)
+ if pyfile.abort:
+ break
+
+ if pyfile.abort:
+ self.log.info(_("Download aborted: %s") % pyfile.name)
+ pyfile.setStatus("aborted")
+
+ self.clean(pyfile)
+ else:
+ self.queue.put(pyfile)
+
+ continue
+
+ else:
+ pyfile.setStatus("failed")
+ self.log.error("pycurl error %s: %s" % (code, msg))
+ if self.core.debug:
+ print_exc()
+ self.writeDebugReport(pyfile.plugin.__name__, pyfile)
+
+ self.core.addonManager.downloadFailed(pyfile)
+
+ self.clean(pyfile)
+ continue
+
+ except SkipDownload, e:
+ pyfile.setStatus("skipped")
+
+ self.log.info(_("Download skipped: %(name)s due to %(plugin)s")
+ % {"name": pyfile.name, "plugin": e.message})
+
+ self.clean(pyfile)
+
+ self.core.files.checkPackageFinished(pyfile)
+
+ self.active = False
+ self.core.files.save()
+
+ continue
+
+
+ except Exception, e:
+ if isinstance(e, BadHeader) and e.code == 500:
+ pyfile.setStatus("temp. offline")
+ self.log.warning(_("Download is temporary offline: %s") % pyfile.name)
+ pyfile.error = _("Internal Server Error")
+
+ else:
+ pyfile.setStatus("failed")
+ self.log.warning(_("Download failed: %(name)s | %(msg)s") % {"name": pyfile.name, "msg": str(e)})
+ pyfile.error = str(e)
+
+ if self.core.debug:
+ print_exc()
+ self.writeDebugReport(pyfile.plugin.__name__, pyfile)
+
+ self.core.addonManager.downloadFailed(pyfile)
+ self.clean(pyfile)
+ continue
+
+ finally:
+ self.core.files.save()
+ pyfile.checkIfProcessed()
+ exc_clear()
+
+
+ #pyfile.plugin.req.clean()
+
+ self.active = False
+ pyfile.finishIfDone()
+ self.core.files.save()
+
+ def getProgress(self):
+ if self.active:
+ return self.active.getProgressInfo()
+
+
+ def put(self, job):
+ """assign a job to the thread"""
+ self.queue.put(job)
+
+ def clean(self, pyfile):
+ """ set thread inactive and release pyfile """
+ self.active = False
+ pyfile.release()
+
+ def stop(self):
+ """stops the thread"""
+ self.put("quit")
diff --git a/pyload/threads/InfoThread.py b/pyload/threads/InfoThread.py
new file mode 100644
index 000000000..fba2a9056
--- /dev/null
+++ b/pyload/threads/InfoThread.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from time import time
+from traceback import print_exc
+
+from pyload.Api import LinkStatus
+from pyload.utils.packagetools import parseNames
+from pyload.utils import has_method, accumulate
+
+from BaseThread import BaseThread
+
+class InfoThread(BaseThread):
+ def __init__(self, manager, data, pid=-1, rid=-1):
+ """Constructor"""
+ BaseThread.__init__(self, manager)
+
+ self.data = data
+ self.pid = pid # package id
+ # [ .. (name, plugin) .. ]
+
+ self.rid = rid #result id
+
+ self.cache = [] #accumulated data
+
+ self.start()
+
+ def run(self):
+ """run method"""
+
+ plugins = accumulate(self.data)
+ crypter = {}
+
+ # filter out crypter plugins
+ for name in self.m.core.pluginManager.getPlugins("crypter"):
+ if name in plugins:
+ crypter[name] = plugins[name]
+ del plugins[name]
+
+ #directly write to database
+ if self.pid > -1:
+ for pluginname, urls in plugins.iteritems():
+ plugin = self.m.core.pluginManager.getPluginModule(pluginname)
+ klass = self.m.core.pluginManager.getPluginClass(pluginname)
+ if has_method(klass, "getInfo"):
+ self.fetchForPlugin(pluginname, klass, urls, self.updateDB)
+ self.m.core.files.save()
+ elif has_method(plugin, "getInfo"):
+ self.log.debug("Deprecated .getInfo() method on module level, use classmethod instead")
+ self.fetchForPlugin(pluginname, plugin, urls, self.updateDB)
+ self.m.core.files.save()
+
+ else: #post the results
+ for name, urls in crypter:
+ #attach container content
+ try:
+ data = self.decrypt(name, urls)
+ except:
+ print_exc()
+ self.m.log.error("Could not decrypt container.")
+ data = []
+
+ accumulate(data, plugins)
+
+ self.m.infoResults[self.rid] = {}
+
+ for pluginname, urls in plugins.iteritems():
+ plugin = self.m.core.pluginManager.getPlugin(pluginname, True)
+ klass = getattr(plugin, pluginname)
+ if has_method(klass, "getInfo"):
+ self.fetchForPlugin(pluginname, plugin, urls, self.updateResult, True)
+ #force to process cache
+ if self.cache:
+ self.updateResult(pluginname, [], True)
+ elif has_method(plugin, "getInfo"):
+ self.log.debug("Deprecated .getInfo() method on module level, use staticmethod instead")
+ self.fetchForPlugin(pluginname, plugin, urls, self.updateResult, True)
+ #force to process cache
+ if self.cache:
+ self.updateResult(pluginname, [], True)
+ else:
+ #generate default result
+ result = [(url, 0, 3, url) for url in urls]
+
+ self.updateResult(pluginname, result, True)
+
+ self.m.infoResults[self.rid]["ALL_INFO_FETCHED"] = {}
+
+ self.m.timestamp = time() + 5 * 60
+
+
+ def updateDB(self, plugin, result):
+ self.m.core.files.updateFileInfo(result, self.pid)
+
+ def updateResult(self, plugin, result, force=False):
+ #parse package name and generate result
+ #accumulate results
+
+ self.cache.extend(result)
+
+ if len(self.cache) >= 20 or force:
+ #used for package generating
+ tmp = [(name, (url, LinkStatus(name, plugin, "unknown", status, int(size))))
+ for name, size, status, url in self.cache]
+
+ data = parseNames(tmp)
+ result = {}
+ for k, v in data.iteritems():
+ for url, status in v:
+ status.packagename = k
+ result[url] = status
+
+ self.m.setInfoResults(self.rid, result)
+
+ self.cache = []
+
+ def updateCache(self, plugin, result):
+ self.cache.extend(result)
+
+ def fetchForPlugin(self, pluginname, plugin, urls, cb, err=None):
+ try:
+ result = [] #result loaded from cache
+ process = [] #urls to process
+ for url in urls:
+ if url in self.m.infoCache:
+ result.append(self.m.infoCache[url])
+ else:
+ process.append(url)
+
+ if result:
+ self.m.log.debug("Fetched %d values from cache for %s" % (len(result), pluginname))
+ cb(pluginname, result)
+
+ if process:
+ self.m.log.debug("Run Info Fetching for %s" % pluginname)
+ for result in plugin.getInfo(process):
+ #result = [ .. (name, size, status, url) .. ]
+ if not type(result) == list: result = [result]
+
+ for res in result:
+ self.m.infoCache[res[3]] = res
+
+ cb(pluginname, result)
+
+ self.m.log.debug("Finished Info Fetching for %s" % pluginname)
+ except Exception, e:
+ self.m.log.warning(_("Info Fetching for %(name)s failed | %(err)s") %
+ {"name": pluginname, "err": str(e)})
+ if self.m.core.debug:
+ print_exc()
+
+ # generate default results
+ if err:
+ result = [(url, 0, 3, url) for url in urls]
+ cb(pluginname, result)
+
+
+ def decrypt(self, plugin, urls):
+ self.m.log.debug("Pre decrypting %s" % plugin)
+ klass = self.m.core.pluginManager.loadClass("crypter", plugin)
+
+ # only decrypt files
+ if has_method(klass, "decryptFile"):
+ urls = klass.decrypt(urls)
+ data, crypter = self.m.core.pluginManager.parseUrls(urls)
+ return data
+
+ return []
diff --git a/pyload/threads/ThreadManager.py b/pyload/threads/ThreadManager.py
new file mode 100644
index 000000000..086e8ba51
--- /dev/null
+++ b/pyload/threads/ThreadManager.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2012 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+from os.path import exists, join
+import re
+from subprocess import Popen
+from threading import Event, Lock
+from time import sleep, time
+from traceback import print_exc
+from random import choice
+
+import pycurl
+
+from pyload.datatypes.PyFile import PyFile
+from pyload.network.RequestFactory import getURL
+from pyload.utils import lock, uniqify
+from pyload.utils.fs import free_space
+
+from DecrypterThread import DecrypterThread
+from DownloadThread import DownloadThread
+from InfoThread import InfoThread
+
+class ThreadManager:
+ """manages the download threads, assign jobs, reconnect etc"""
+
+
+ def __init__(self, core):
+ """Constructor"""
+ self.core = core
+ self.log = core.log
+
+ self.threads = [] # thread list
+ self.localThreads = [] #addon+decrypter threads
+
+ self.pause = True
+
+ self.reconnecting = Event()
+ self.reconnecting.clear()
+ self.downloaded = 0 #number of files downloaded since last cleanup
+
+ self.lock = Lock()
+
+ # some operations require to fetch url info from hoster, so we caching them so it wont be done twice
+ # contains a timestamp and will be purged after timeout
+ self.infoCache = {}
+
+ # pool of ids for online check
+ self.resultIDs = 0
+
+ # threads which are fetching hoster results
+ self.infoResults = {}
+ # timeout for cache purge
+ self.timestamp = 0
+
+ pycurl.global_init(pycurl.GLOBAL_DEFAULT)
+
+ for i in range(self.core.config.get("download", "max_downloads")):
+ self.createThread()
+
+
+ def createThread(self):
+ """create a download thread"""
+
+ thread = DownloadThread(self)
+ self.threads.append(thread)
+
+ def createInfoThread(self, data, pid):
+ """ start a thread which fetches online status and other info's """
+ self.timestamp = time() + 5 * 60
+ if data: InfoThread(self, data, pid)
+
+ @lock
+ def createResultThread(self, data):
+ """ creates a thread to fetch online status, returns result id """
+ self.timestamp = time() + 5 * 60
+
+ rid = self.resultIDs
+ self.resultIDs += 1
+
+ InfoThread(self, data, rid=rid)
+
+ return rid
+
+ @lock
+ def createDecryptThread(self, data, pid):
+ """ Start decrypting of entered data, all links in one package are accumulated to one thread."""
+ if data: DecrypterThread(self, data, pid)
+
+
+ @lock
+ def getInfoResult(self, rid):
+ """returns result and clears it"""
+ self.timestamp = time() + 5 * 60
+
+ if rid in self.infoResults:
+ data = self.infoResults[rid]
+ self.infoResults[rid] = {}
+ return data
+ else:
+ return {}
+
+ @lock
+ def setInfoResults(self, rid, result):
+ self.infoResults[rid].update(result)
+
+ def getActiveDownloads(self, user=None):
+ # TODO: user context
+ return [x.active for x in self.threads if x.active and isinstance(x.active, PyFile)]
+
+ def getProgressList(self, user=None):
+ info = []
+
+ # TODO: local threads can create multiple progresses
+ for thread in self.threads + self.localThreads:
+ # skip if not belong to current user
+ if user and thread.user != user: continue
+
+ progress = thread.getProgress()
+ if progress: info.append(progress)
+
+ return info
+
+ def getActiveFiles(self):
+ active = self.getActiveDownloads()
+
+ for t in self.localThreads:
+ active.extend(t.getActiveFiles())
+
+ return active
+
+ def processingIds(self):
+ """get a id list of all pyfiles processed"""
+ return [x.id for x in self.getActiveFiles()]
+
+ def work(self):
+ """run all task which have to be done (this is for repetetive call by core)"""
+ try:
+ self.tryReconnect()
+ except Exception, e:
+ self.log.error(_("Reconnect Failed: %s") % str(e) )
+ self.reconnecting.clear()
+ self.core.print_exc()
+
+ self.checkThreadCount()
+
+ try:
+ self.assignJob()
+ except Exception, e:
+ self.log.warning("Assign job error", e)
+ self.core.print_exc()
+
+ sleep(0.5)
+ self.assignJob()
+ #it may be failed non critical so we try it again
+
+ if (self.infoCache or self.infoResults) and self.timestamp < time():
+ self.infoCache.clear()
+ self.infoResults.clear()
+ self.log.debug("Cleared Result cache")
+
+ def tryReconnect(self):
+ """checks if reconnect needed"""
+
+ if not (self.core.config["reconnect"]["activated"] and self.core.api.isTimeReconnect()):
+ return False
+
+ active = [x.active.plugin.wantReconnect and x.active.plugin.waiting for x in self.threads if x.active]
+
+ if not (0 < active.count(True) == len(active)):
+ return False
+
+ if not exists(self.core.config['reconnect']['method']):
+ if exists(join(pypath, self.core.config['reconnect']['method'])):
+ self.core.config['reconnect']['method'] = join(pypath, self.core.config['reconnect']['method'])
+ else:
+ self.core.config["reconnect"]["activated"] = False
+ self.log.warning(_("Reconnect script not found!"))
+ return
+
+ self.reconnecting.set()
+
+ #Do reconnect
+ self.log.info(_("Starting reconnect"))
+
+ while [x.active.plugin.waiting for x in self.threads if x.active].count(True) != 0:
+ sleep(0.25)
+
+ ip = self.getIP()
+
+ self.core.evm.dispatchEvent("reconnect:before", ip)
+
+ self.log.debug("Old IP: %s" % ip)
+
+ try:
+ reconn = Popen(self.core.config['reconnect']['method'], bufsize=-1, shell=True)#, stdout=subprocess.PIPE)
+ except:
+ self.log.warning(_("Failed executing reconnect script!"))
+ self.core.config["reconnect"]["activated"] = False
+ self.reconnecting.clear()
+ if self.core.debug:
+ print_exc()
+ return
+
+ reconn.wait()
+ sleep(1)
+ ip = self.getIP()
+ self.core.evm.dispatchEvent("reconnect:after", ip)
+
+ self.log.info(_("Reconnected, new IP: %s") % ip)
+
+ self.reconnecting.clear()
+
+ def getIP(self):
+ """retrieve current ip"""
+ services = [("http://automation.whatismyip.com/n09230945.asp", "(\S+)"),
+ ("http://checkip.dyndns.org/",".*Current IP Address: (\S+)</body>.*")]
+
+ ip = ""
+ for i in range(10):
+ try:
+ sv = choice(services)
+ ip = getURL(sv[0])
+ ip = re.match(sv[1], ip).group(1)
+ break
+ except:
+ ip = ""
+ sleep(1)
+
+ return ip
+
+ def checkThreadCount(self):
+ """checks if there is a need for increasing or reducing thread count"""
+
+ if len(self.threads) == self.core.config.get("download", "max_downloads"):
+ return True
+ elif len(self.threads) < self.core.config.get("download", "max_downloads"):
+ self.createThread()
+ else:
+ free = [x for x in self.threads if not x.active]
+ if free:
+ free[0].put("quit")
+
+
+ def cleanPycurl(self):
+ """ make a global curl cleanup (currently unused) """
+ if self.processingIds():
+ return False
+ pycurl.global_cleanup()
+ pycurl.global_init(pycurl.GLOBAL_DEFAULT)
+ self.downloaded = 0
+ self.log.debug("Cleaned up pycurl")
+ return True
+
+
+ def assignJob(self):
+ """assign a job to a thread if possible"""
+
+ if self.pause or not self.core.api.isTimeDownload(): return
+
+ #if self.downloaded > 20:
+ # if not self.cleanPyCurl(): return
+
+ free = [x for x in self.threads if not x.active]
+
+ inuse = [(x.active.pluginname, x.active.plugin.getDownloadLimit()) for x in self.threads if x.active and x.active.hasPlugin()]
+ inuse = [(x[0], x[1], len([y for y in self.threads if y.active and y.active.pluginname == x[0]])) for x in inuse]
+ occ = tuple(sorted(uniqify([x[0] for x in inuse if 0 < x[1] <= x[2]])))
+
+ job = self.core.files.getJob(occ)
+ if job:
+ try:
+ job.initPlugin()
+ except Exception, e:
+ self.log.critical(str(e))
+ print_exc()
+ job.setStatus("failed")
+ job.error = str(e)
+ job.release()
+ return
+
+ spaceLeft = free_space(self.core.config["general"]["download_folder"]) / 1024 / 1024
+ if spaceLeft < self.core.config["general"]["min_free_space"]:
+ self.log.warning(_("Not enough space left on device"))
+ self.pause = True
+
+ if free and not self.pause:
+ thread = free[0]
+ #self.downloaded += 1
+ thread.put(job)
+ else:
+ #put job back
+ if occ not in self.core.files.jobCache:
+ self.core.files.jobCache[occ] = []
+ self.core.files.jobCache[occ].append(job.id)
+
+ def cleanup(self):
+ """do global cleanup, should be called when finished with pycurl"""
+ pycurl.global_cleanup()
diff --git a/pyload/threads/__init__.py b/pyload/threads/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/threads/__init__.py
diff --git a/pyload/utils/ImportDebugger.py b/pyload/utils/ImportDebugger.py
new file mode 100644
index 000000000..a997f7b0c
--- /dev/null
+++ b/pyload/utils/ImportDebugger.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+import sys
+
+class ImportDebugger(object):
+
+ def __init__(self):
+ self.imported = {}
+
+ def find_module(self, name, path=None):
+
+ if name not in self.imported:
+ self.imported[name] = 0
+
+ self.imported[name] += 1
+
+ print name, path
+
+sys.meta_path.append(ImportDebugger()) \ No newline at end of file
diff --git a/pyload/utils/JsEngine.py b/pyload/utils/JsEngine.py
new file mode 100644
index 000000000..ef7494d16
--- /dev/null
+++ b/pyload/utils/JsEngine.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from imp import find_module
+from os.path import join, exists
+from urllib import quote
+
+
+ENGINE = ""
+
+DEBUG = False
+JS = False
+PYV8 = False
+NODE = False
+RHINO = False
+
+# TODO: Refactor + clean up this class
+
+if not ENGINE:
+ try:
+ import subprocess
+
+ subprocess.Popen(["js", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["js", "-e", "print(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "js"
+ JS = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ find_module("PyV8")
+ ENGINE = "pyv8"
+ PYV8 = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ import subprocess
+ subprocess.Popen(["node", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["node", "-e", "console.log(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "node"
+ NODE = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ path = "" #path where to find rhino
+
+ if exists("/usr/share/java/js.jar"):
+ path = "/usr/share/java/js.jar"
+ elif exists("js.jar"):
+ path = "js.jar"
+ elif exists(join(pypath, "js.jar")): #may raises an exception, but js.jar wasnt found anyway
+ path = join(pypath, "js.jar")
+
+ if not path:
+ raise Exception
+
+ import subprocess
+
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", "print(23+19)"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "rhino"
+ RHINO = True
+ except:
+ pass
+
+class JsEngine():
+ def __init__(self):
+ self.engine = ENGINE
+ self.init = False
+
+ def __nonzero__(self):
+ return False if not ENGINE else True
+
+ def set_debug(self, value):
+ global DEBUG
+ DEBUG = value
+
+ def eval(self, script):
+ if not self.init:
+ if ENGINE == "pyv8" or (DEBUG and PYV8):
+ import PyV8
+ global PyV8
+
+ self.init = True
+
+ if type(script) == unicode:
+ script = script.encode("utf8")
+
+ if not ENGINE:
+ raise Exception("No JS Engine")
+
+ if not DEBUG:
+ if ENGINE == "pyv8":
+ return self.eval_pyv8(script)
+ elif ENGINE == "js":
+ return self.eval_js(script)
+ elif ENGINE == "node":
+ return self.eval_node(script)
+ elif ENGINE == "rhino":
+ return self.eval_rhino(script)
+ else:
+ results = []
+ if PYV8:
+ res = self.eval_pyv8(script)
+ print "PyV8:", res
+ results.append(res)
+ if JS:
+ res = self.eval_js(script)
+ print "JS:", res
+ results.append(res)
+ if NODE:
+ res = self.eval_node(script)
+ print "NODE:", res
+ results.append(res)
+ if RHINO:
+ res = self.eval_rhino(script)
+ print "Rhino:", res
+ results.append(res)
+
+ warning = False
+ for x in results:
+ for y in results:
+ if x != y:
+ warning = True
+
+ if warning: print "### WARNING ###: Different results"
+
+ return results[0]
+
+ def eval_pyv8(self, script):
+ rt = PyV8.JSContext()
+ rt.enter()
+ return rt.eval(script)
+
+ def eval_js(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["js", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_node(self, script):
+ script = "console.log(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["node", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_rhino(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", script],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res.decode("utf8").encode("ISO-8859-1")
+
+ def error(self):
+ return _("No js engine detected, please install either Spidermonkey, ossp-js, pyv8, nodejs or rhino")
+
+if __name__ == "__main__":
+ js = JsEngine()
+ js.set_debug(True)
+
+ test = u'"Ì"+"À"'
+ js.eval(test) \ No newline at end of file
diff --git a/pyload/utils/__init__.py b/pyload/utils/__init__.py
new file mode 100644
index 000000000..c9c24ac40
--- /dev/null
+++ b/pyload/utils/__init__.py
@@ -0,0 +1,232 @@
+# -*- coding: utf-8 -*-
+
+""" Store all usefull functions here """
+
+import os
+import time
+import re
+from string import maketrans
+from itertools import islice
+from htmlentitydefs import name2codepoint
+
+# abstraction layer for json operations
+try: # since python 2.6
+ import json
+except ImportError: #use system simplejson if available
+ import simplejson as json
+
+json_loads = json.loads
+json_dumps = json.dumps
+
+def decode(string):
+ """ decode string to unicode with utf8 """
+ if type(string) == str:
+ return string.decode("utf8", "replace")
+ else:
+ return string
+
+def encode(string):
+ """ decode string to utf8 """
+ if type(string) == unicode:
+ return string.encode("utf8", "replace")
+ else:
+ return string
+
+
+def remove_chars(string, repl):
+ """ removes all chars in repl from string"""
+ if type(string) == str:
+ return string.translate(maketrans("", ""), repl)
+ elif type(string) == unicode:
+ return string.translate(dict([(ord(s), None) for s in repl]))
+
+
+def get_console_encoding(enc):
+ if os.name == "nt":
+ if enc == "cp65001": # aka UTF-8
+ print "WARNING: Windows codepage 65001 is not supported."
+ enc = "cp850"
+ else:
+ enc = "utf8"
+
+ return enc
+
+def compare_time(start, end):
+ start = map(int, start)
+ end = map(int, end)
+
+ if start == end: return True
+
+ now = list(time.localtime()[3:5])
+ if start < now < end: return True
+ elif start > end and (now > start or now < end): return True
+ elif start < now > end < start: return True
+ else: return False
+
+def to_list(value):
+ return value if type(value) == list else ([value] if value is not None else [])
+
+def formatSize(size):
+ print "Deprecated formatSize, use format_size"
+ return format_size(size)
+
+def format_size(bytes):
+ bytes = int(bytes)
+ steps = 0
+ sizes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB")
+ while bytes > 1000:
+ bytes /= 1024.0
+ steps += 1
+ return "%.2f %s" % (bytes, sizes[steps])
+
+def formatSpeed(speed):
+ print "Deprecated formatSpeed, use format_speed"
+ return format_speed(speed)
+
+def format_speed(speed):
+ return format_size(speed) + "/s"
+
+def format_time(seconds):
+ if seconds < 0: return "00:00:00"
+ hours, seconds = divmod(seconds, 3600)
+ minutes, seconds = divmod(seconds, 60)
+ return "%.2i:%.2i:%.2i" % (hours, minutes, seconds)
+
+def uniqify(seq): #by Dave Kirby
+ """ removes duplicates from list, preserve order """
+ seen = set()
+ return [x for x in seq if x not in seen and not seen.add(x)]
+
+def bits_set(bits, compare):
+ """ checks if all bits are set in compare, or bits is 0 """
+ return bits == (bits & compare)
+
+def parseFileSize(string, unit=None): #returns bytes
+ if not unit:
+ m = re.match(r"([\d.,]+) *([a-zA-Z]*)", string.strip().lower())
+ if m:
+ traffic = float(m.group(1).replace(",", "."))
+ unit = m.group(2)
+ else:
+ return 0
+ else:
+ if isinstance(string, basestring):
+ traffic = float(string.replace(",", "."))
+ else:
+ traffic = string
+
+ #ignore case
+ unit = unit.lower().strip()
+
+ if unit in ("gb", "gig", "gbyte", "gigabyte", "gib", "g"):
+ traffic *= 1 << 30
+ elif unit in ("mb", "mbyte", "megabyte", "mib", "m"):
+ traffic *= 1 << 20
+ elif unit in ("kb", "kib", "kilobyte", "kbyte", "k"):
+ traffic *= 1 << 10
+
+ return traffic
+
+
+def lock(func):
+ def new(*args, **kwargs):
+ #print "Handler: %s args: %s" % (func,args[1:])
+ args[0].lock.acquire()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+def read_lock(func):
+ def new(*args, **kwargs):
+ args[0].lock.acquire(shared=True)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+def chunks(iterable, size):
+ it = iter(iterable)
+ item = list(islice(it, size))
+ while item:
+ yield item
+ item = list(islice(it, size))
+
+
+def fixup(m):
+ text = m.group(0)
+ if text[:2] == "&#":
+ # character reference
+ try:
+ if text[:3] == "&#x":
+ return unichr(int(text[3:-1], 16))
+ else:
+ return unichr(int(text[2:-1]))
+ except ValueError:
+ pass
+ else:
+ # named entity
+ try:
+ name = text[1:-1]
+ text = unichr(name2codepoint[name])
+ except KeyError:
+ pass
+
+ return text # leave as is
+
+
+def has_method(obj, name):
+ """ checks if 'name' was defined in obj, (false if it was inhereted) """
+ return name in obj.__dict__
+
+def accumulate(it, inv_map=None):
+ """ accumulate (key, value) data to {value : [keylist]} dictionary """
+ if not inv_map:
+ inv_map = {}
+
+ for key, value in it:
+ if value in inv_map:
+ inv_map[value].append(key)
+ else:
+ inv_map[value] = [key]
+
+ return inv_map
+
+def to_string(value):
+ return str(value) if not isinstance(value, basestring) else value
+
+def to_bool(value):
+ if not isinstance(value, basestring): return value
+ return True if value.lower() in ("1", "true", "on", "an", "yes") else False
+
+def to_int(string, default=0):
+ """ return int from string or default """
+ try:
+ return int(string)
+ except ValueError:
+ return default
+
+def get_index(l, value):
+ """ .index method that also works on tuple and python 2.5 """
+ for pos, t in enumerate(l):
+ if t == value:
+ return pos
+
+ # Matches behavior of list.index
+ raise ValueError("list.index(x): x not in list")
+
+def primary_uid(user):
+ """ Gets primary user id for user instances or ints """
+ if type(user) == int: return user
+ return user.primary if user else None
+
+def html_unescape(text):
+ """Removes HTML or XML character references and entities from a text string"""
+ return re.sub("&#?\w+;", fixup, text)
+
+if __name__ == "__main__":
+ print remove_chars("ab'cdgdsf''ds'", "'ghd")
diff --git a/pyload/utils/fs.py b/pyload/utils/fs.py
new file mode 100644
index 000000000..92cc605e7
--- /dev/null
+++ b/pyload/utils/fs.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+from os.path import join
+from . import decode, remove_chars
+
+# File System Encoding functions:
+# Use fs_encode before accessing files on disk, it will encode the string properly
+
+if sys.getfilesystemencoding().startswith('ANSI'):
+ def fs_encode(string):
+ if type(string) == unicode:
+ return string.encode('utf8')
+ return string
+
+ fs_decode = decode #decode utf8
+
+else:
+ fs_encode = fs_decode = lambda x: x # do nothing
+
+# FS utilities
+def chmod(path, mode):
+ try:
+ return os.chmod(fs_encode(path), mode)
+ except :
+ pass
+
+def dirname(path):
+ return fs_decode(os.path.dirname(fs_encode(path)))
+
+def abspath(path):
+ return fs_decode(os.path.abspath(fs_encode(path)))
+
+def chown(path, uid, gid):
+ return os.chown(fs_encode(path), uid, gid)
+
+def remove(path):
+ return os.remove(fs_encode(path))
+
+def exists(path):
+ return os.path.exists(fs_encode(path))
+
+def makedirs(path, mode=0755):
+ return os.makedirs(fs_encode(path), mode)
+
+# fs_decode?
+def listdir(path):
+ return [fs_decode(x) for x in os.listdir(fs_encode(path))]
+
+def save_filename(name):
+ #remove some chars
+ if os.name == 'nt':
+ return remove_chars(name, '/\\?%*:|"<>,')
+ else:
+ return remove_chars(name, '/\\"')
+
+def stat(name):
+ return os.stat(fs_encode(name))
+
+def save_join(*args):
+ """ joins a path, encoding aware """
+ return fs_encode(join(*[x if type(x) == unicode else decode(x) for x in args]))
+
+def free_space(folder):
+ folder = fs_encode(folder)
+
+ if os.name == "nt":
+ import ctypes
+
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
+ return free_bytes.value
+ else:
+ from os import statvfs
+
+ s = statvfs(folder)
+ return s.f_frsize * s.f_bavail
diff --git a/pyload/utils/json_layer.py b/pyload/utils/json_layer.py
new file mode 100644
index 000000000..cf9743603
--- /dev/null
+++ b/pyload/utils/json_layer.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# abstraction layer for json operations
+
+print ".json_layer is deprecated, use .json instead"
+
+try: # since python 2.6
+ import json
+ from json import loads as json_loads
+ from json import dumps as json_dumps
+except ImportError: #use system simplejson if available
+ import simplejson as json
+ from simplejson import loads as json_loads
+ from simplejson import dumps as json_dumps
diff --git a/pyload/utils/packagetools.py b/pyload/utils/packagetools.py
new file mode 100644
index 000000000..791a46d51
--- /dev/null
+++ b/pyload/utils/packagetools.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# JDownloader/src/jd/controlling/LinkGrabberPackager.java
+
+import re
+from urlparse import urlparse
+
+def matchFirst(string, *args):
+ """ matches against list of regexp and returns first match"""
+ for patternlist in args:
+ for pattern in patternlist:
+ r = pattern.search(string)
+ if r is not None:
+ name = r.group(1)
+ return name
+
+ return string
+
+
+def parseNames(files):
+ """ Generates packages names from name, data lists
+
+ :param files: list of (name, data)
+ :return: packagenames mapped to data lists (eg. urls)
+ """
+ packs = {}
+
+ endings = "\\.(3gp|7zip|7z|abr|ac3|aiff|aifc|aif|ai|au|avi|bin|bz2|cbr|cbz|ccf|cue|cvd|chm|dta|deb|divx|djvu|dlc|dmg|doc|docx|dot|eps|exe|ff|flv|f4v|gsd|gif|gz|iwd|iso|ipsw|java|jar|jpg|jpeg|jdeatme|load|mws|mw|m4v|m4a|mkv|mp2|mp3|mp4|mov|movie|mpeg|mpe|mpg|msi|msu|msp|nfo|npk|oga|ogg|ogv|otrkey|pkg|png|pdf|pptx|ppt|pps|ppz|pot|psd|qt|rmvb|rm|rar|ram|ra|rev|rnd|r\\d+|rpm|run|rsdf|rtf|sh(!?tml)|srt|snd|sfv|swf|tar|tif|tiff|ts|txt|viv|vivo|vob|wav|wmv|xla|xls|xpi|zeno|zip|z\\d+|_[_a-z]{2}|\\d+$)"
+
+ rarPats = [re.compile("(.*)(\\.|_|-)pa?r?t?\\.?[0-9]+.(rar|exe)$", re.I),
+ re.compile("(.*)(\\.|_|-)part\\.?[0]*[1].(rar|exe)$", re.I),
+ re.compile("(.*)\\.rar$", re.I),
+ re.compile("(.*)\\.r\\d+$", re.I),
+ re.compile("(.*)(\\.|_|-)\\d+$", re.I)]
+
+ zipPats = [re.compile("(.*)\\.zip$", re.I),
+ re.compile("(.*)\\.z\\d+$", re.I),
+ re.compile("(?is).*\\.7z\\.[\\d]+$", re.I),
+ re.compile("(.*)\\.a.$", re.I)]
+
+ ffsjPats = [re.compile("(.*)\\._((_[a-z])|([a-z]{2}))(\\.|$)"),
+ re.compile("(.*)(\\.|_|-)[\\d]+(" + endings + "$)", re.I)]
+
+ iszPats = [re.compile("(.*)\\.isz$", re.I),
+ re.compile("(.*)\\.i\\d{2}$", re.I)]
+
+ pat1 = re.compile("(\\.?CD\\d+)", re.I)
+ pat2 = re.compile("(\\.?part\\d+)", re.I)
+
+ pat3 = re.compile("(.+)[\\.\\-_]+$")
+ pat4 = re.compile("(.+)\\.\\d+\\.xtm$")
+
+ for file, url in files:
+ patternMatch = False
+
+ if file is None:
+ continue
+
+ # remove trailing /
+ name = file.rstrip('/')
+
+ # extract last path part .. if there is a path
+ split = name.rsplit("/", 1)
+ if len(split) > 1:
+ name = split.pop(1)
+
+ #check if an already existing package may be ok for this file
+ # found = False
+ # for pack in packs:
+ # if pack in file:
+ # packs[pack].append(url)
+ # found = True
+ # break
+ #
+ # if found: continue
+
+ # unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
+ before = name
+ name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
+ if before != name:
+ patternMatch = True
+
+ # xtremsplit pattern
+ r = pat4.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # remove part and cd pattern
+ r = pat1.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ r = pat2.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ # additional checks if extension pattern matched
+ if patternMatch:
+ # remove extension
+ index = name.rfind(".")
+ if index <= 0:
+ index = name.rfind("_")
+ if index > 0:
+ length = len(name) - index
+ if length <= 4:
+ name = name[:-length]
+
+ # remove endings like . _ -
+ r = pat3.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # replace . and _ with space
+ name = name.replace(".", " ")
+ name = name.replace("_", " ")
+
+ name = name.strip()
+ else:
+ name = ""
+
+ # fallback: package by hoster
+ if not name:
+ name = urlparse(file).hostname
+ if name: name = name.replace("www.", "")
+
+ # fallback : default name
+ if not name:
+ name = "unknown"
+
+ # build mapping
+ if name in packs:
+ packs[name].append(url)
+ else:
+ packs[name] = [url]
+
+ return packs
+
+
+if __name__ == "__main__":
+ from os.path import join
+ from pprint import pprint
+
+ f = open(join("..", "..", "testlinks2.txt"), "rb")
+ urls = [(x.strip(), x.strip()) for x in f.readlines() if x.strip()]
+ f.close()
+
+ print "Having %d urls." % len(urls)
+
+ packs = parseNames(urls)
+
+ pprint(packs)
+
+ print "Got %d urls." % sum([len(x) for x in packs.itervalues()])
diff --git a/pyload/utils/pylgettext.py b/pyload/utils/pylgettext.py
new file mode 100644
index 000000000..fb36fecee
--- /dev/null
+++ b/pyload/utils/pylgettext.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from gettext import *
+
+_searchdirs = None
+
+origfind = find
+
+def setpaths(pathlist):
+ global _searchdirs
+ if isinstance(pathlist, list):
+ _searchdirs = pathlist
+ else:
+ _searchdirs = list(pathlist)
+
+
+def addpath(path):
+ global _searchdirs
+ if _searchdirs is None:
+ _searchdirs = list(path)
+ else:
+ if path not in _searchdirs:
+ _searchdirs.append(path)
+
+
+def delpath(path):
+ global _searchdirs
+ if _searchdirs is not None:
+ if path in _searchdirs:
+ _searchdirs.remove(path)
+
+
+def clearpath():
+ global _searchdirs
+ if _searchdirs is not None:
+ _searchdirs = None
+
+
+def find(domain, localedir=None, languages=None, all=False):
+ if _searchdirs is None:
+ return origfind(domain, localedir, languages, all)
+ searches = [localedir] + _searchdirs
+ results = list()
+ for dir in searches:
+ res = origfind(domain, dir, languages, all)
+ if all is False:
+ results.append(res)
+ else:
+ results.extend(res)
+ if all is False:
+ results = filter(lambda x: x is not None, results)
+ if len(results) == 0:
+ return None
+ else:
+ return results[0]
+ else:
+ return results
+
+#Is there a smarter/cleaner pythonic way for this?
+translation.func_globals['find'] = find
diff --git a/pyload/web/.bowerrc b/pyload/web/.bowerrc
new file mode 100644
index 000000000..f594df7a7
--- /dev/null
+++ b/pyload/web/.bowerrc
@@ -0,0 +1,3 @@
+{
+ "directory": "app/components"
+}
diff --git a/pyload/web/Gruntfile.js b/pyload/web/Gruntfile.js
new file mode 100644
index 000000000..92bb33da9
--- /dev/null
+++ b/pyload/web/Gruntfile.js
@@ -0,0 +1,425 @@
+'use strict';
+var LIVERELOAD_PORT = 35729;
+var lrSnippet = require('connect-livereload')({port: LIVERELOAD_PORT});
+var mountFolder = function(connect, dir) {
+ return connect.static(require('path').resolve(dir));
+};
+var fs = require('fs');
+var path = require('path');
+
+// # Globbing
+// for performance reasons we're only matching one level down:
+// 'test/spec/{,*/}*.js'
+// use this if you want to recursively match all subfolders:
+// 'test/spec/**/*.js'
+
+module.exports = function(grunt) {
+ // load all grunt tasks
+ require('matchdep').filterDev('grunt-*').forEach(grunt.loadNpmTasks);
+
+ // configurable paths
+ var yeomanConfig = {
+ app: 'app',
+ dist: 'dist',
+ banner: '/* Copyright(c) 2008-2013 pyLoad Team */\n'
+ };
+
+ grunt.initConfig({
+ yeoman: yeomanConfig,
+ watch: {
+ options: {
+ nospawn: true
+ },
+ less: {
+ files: ['<%= yeoman.app %>/styles/**/*.less'],
+ tasks: ['less']
+ },
+ livereload: {
+ options: {
+ livereload: LIVERELOAD_PORT
+ },
+ files: [
+ '<%= yeoman.app %>/**/*.html',
+ '{<%= yeoman.app %>}/styles/**/*.css',
+ '{.tmp,<%= yeoman.app %>}/scripts/**/*.js',
+ '<%= yeoman.app %>/images/{,*/}*.{png,jpg,jpeg,gif,webp,svg}'
+ ]
+ }
+ },
+ connect: {
+ options: {
+ port: 9000,
+ // change this to '0.0.0.0' to access the server from outside
+ hostname: 'localhost'
+ },
+ livereload: {
+ options: {
+ middleware: function(connect) {
+ return [
+ lrSnippet,
+ mountFolder(connect, '.tmp'),
+ mountFolder(connect, yeomanConfig.app)
+ ];
+ }
+ }
+ },
+ test: {
+ options: {
+ middleware: function(connect) {
+ return [
+ mountFolder(connect, '.tmp'),
+ mountFolder(connect, 'test')
+ ];
+ }
+ }
+ },
+ dist: {
+ options: {
+ middleware: function(connect) {
+ return [
+ mountFolder(connect, yeomanConfig.dist)
+ ];
+ }
+ }
+ }
+ },
+ open: { // Opens the webbrowser
+ server: {
+ path: 'http://localhost:<%= connect.options.port %>'
+ }
+ },
+ clean: {
+ dist: {
+ files: [
+ {
+ dot: true,
+ src: [
+ '.tmp',
+ '<%= yeoman.dist %>/*',
+ '!<%= yeoman.dist %>/.git*'
+ ]
+ }
+ ]
+ },
+ server: '.tmp'
+ },
+ jshint: {
+ options: {
+ jshintrc: '<%= yeoman.app %>/components/pyload-common/.jshintrc'
+ },
+ all: [
+ 'Gruntfile.js',
+ '<%= yeoman.app %>/scripts/**/*.js',
+ '!<%= yeoman.app %>/scripts/vendor/*',
+ 'test/spec/{,*/}*.js'
+ ]
+ },
+ mocha: {
+ all: {
+ options: {
+ run: true,
+ urls: ['http://localhost:<%= connect.options.port %>/index.html']
+ }
+ }
+ },
+ less: {
+ options: {
+ paths: [yeomanConfig.app + '/components', yeomanConfig.app + '/components/pyload-common/styles',
+ yeomanConfig.app + '/styles/default']
+ //dumpLineNumbers: true
+ },
+ dist: {
+ files: [
+ {
+ expand: true, // Enable dynamic expansion.
+ cwd: '<%= yeoman.app %>/styles/', // Src matches are relative to this path.
+ src: ['**/main.less'], // Actual pattern(s) to match.
+ dest: '.tmp/styles', // Destination path prefix.
+ ext: '.css' // Dest filepaths will have this extension.
+ }
+ ]
+ }
+ },
+ // not used since Uglify task does concat,
+ // but still available if needed
+ /*concat: {
+ dist: {}
+ },*/
+ requirejs: {
+ dist: {
+ // Options: https://github.com/jrburke/r.js/blob/master/build/example.build.js
+ options: {
+ // `name` and `out` is set by grunt-usemin
+ baseUrl: yeomanConfig.app + '/scripts',
+ optimize: 'none',
+ // TODO: Figure out how to make sourcemaps work with grunt-usemin
+ // https://github.com/yeoman/grunt-usemin/issues/30
+ //generateSourceMaps: true,
+ // required to support SourceMaps
+ // http://requirejs.org/docs/errors.html#sourcemapcomments
+ preserveLicenseComments: false,
+ useStrict: true,
+ wrap: true,
+
+ // Delete already included files from dist
+ // TODO: For multiple modules it would delete to much files
+ done: function(done, output) {
+ var root = path.join(path.resolve('.'), yeomanConfig.app);
+ var parse = require('rjs-build-analysis').parse(output);
+ parse.bundles.forEach(function(bundle) {
+ var parent = path.relative(path.resolve('.'), bundle.parent);
+ bundle.children.forEach(function(f) {
+ // Skip templates
+ if (f.indexOf('hbs!') > -1) return;
+
+ var rel = path.relative(root, f);
+ var target = path.join(yeomanConfig.dist, rel);
+
+ if (target === parent)
+ return;
+
+ if (fs.existsSync(target)) {
+ console.log('Removing', target);
+ fs.unlinkSync(target);
+
+ // Remove the empty directories
+ var files = fs.readdirSync(path.dirname(target));
+ if (files.length === 0) {
+ fs.rmdirSync(path.dirname(target));
+ console.log('Removing dir', path.dirname(target));
+ }
+
+ }
+ });
+ });
+ done();
+ }
+ //uglify2: {} // https://github.com/mishoo/UglifyJS2
+ }
+ }
+ },
+ rev: {
+ dist: {
+ files: {
+ src: [
+ // TODO only main script needs a rev
+ '<%= yeoman.dist %>/scripts/default.js',
+ '<%= yeoman.dist %>/styles/{,*/}*.css'
+ ]
+ }
+ }
+ },
+ useminPrepare: {
+ options: {
+ dest: '<%= yeoman.dist %>'
+ },
+ html: '<%= yeoman.app %>/index.html'
+ },
+ usemin: {
+ options: {
+ dirs: ['<%= yeoman.dist %>']
+ },
+ html: ['<%= yeoman.dist %>/*.html'],
+ css: ['<%= yeoman.dist %>/styles/**/*.css']
+ },
+ imagemin: {
+ dist: {
+ files: [
+ {
+ expand: true,
+ cwd: '<%= yeoman.app %>/images',
+ src: '**/*.{png,jpg,jpeg}',
+ dest: '<%= yeoman.dist %>/images'
+ }
+ ]
+ }
+ },
+ svgmin: {
+ dist: {
+ files: [
+ {
+ expand: true,
+ cwd: '<%= yeoman.app %>/images',
+ src: '**/*.svg',
+ dest: '<%= yeoman.dist %>/images'
+ }
+ ]
+ }
+ },
+ htmlmin: {
+ dist: {
+ options: {
+ /*removeCommentsFromCDATA: true,
+ // https://github.com/yeoman/grunt-usemin/issues/44
+ //collapseWhitespace: true,
+ collapseBooleanAttributes: true,
+ removeAttributeQuotes: true,
+ removeRedundantAttributes: true,
+ useShortDoctype: true,
+ removeEmptyAttributes: true,
+ removeOptionalTags: true*/
+ },
+ files: [
+ {
+ expand: true,
+ cwd: '<%= yeoman.app %>',
+ src: ['*.html'],
+ dest: '<%= yeoman.dist %>'
+ }
+ ]
+ }
+ },
+ cssmin: {
+ options: {
+ banner: yeomanConfig.banner
+ },
+ dist: {
+ expand: true,
+ cwd: '<%= yeoman.dist %>',
+ src: ['**/*.css', '!*.min.css'],
+ dest: '<%= yeoman.dist %>',
+ ext: '.css'
+ }
+ },
+ uglify: { // JS min
+ options: {
+ mangle: true,
+ report: 'min',
+ preserveComments: false,
+ banner: yeomanConfig.banner
+ },
+ dist: {
+ expand: true,
+ cwd: '<%= yeoman.dist %>',
+ dest: '<%= yeoman.dist %>',
+ src: ['**/*.js', '!*.min.js']
+ }
+ },
+ // Put files not handled in other tasks here
+ copy: {
+ // Copy files from third party libraries
+ stageComponents: {
+ files: [
+ {
+ expand: true,
+ flatten: true,
+ cwd: '<%= yeoman.app %>',
+ dest: '.tmp/fonts',
+ src: [
+ '**/font-awesome/font/*'
+ ]
+ },
+ {
+ expand: true,
+ flatten: true,
+ cwd: '<%= yeoman.app %>',
+ dest: '.tmp/vendor',
+ src: [
+ '**/select2/select2.{png,css}',
+ '**/select2/select2-spinner.gif',
+ '**/select2/select2x2.png'
+ ]
+ },
+ {
+ expand: true,
+ cwd: '<%= yeoman.app %>/components/pyload-common',
+ dest: '.tmp',
+ src: [
+ 'favicon.ico',
+ 'images/*',
+ 'fonts/*'
+ ]
+ }
+ ]
+ },
+
+ dist: {
+ files: [
+ {
+ expand: true,
+ dot: true,
+ cwd: '<%= yeoman.app %>',
+ dest: '<%= yeoman.dist %>',
+ src: [
+ '*.{ico,txt}',
+ 'images/{,*/}*.{webp,gif}',
+ 'templates/**/*.html',
+ 'scripts/**/*.js',
+ 'styles/**/*.css',
+ 'fonts/*'
+ ]
+ }
+ ]
+ },
+
+ tmp: {
+ files: [
+ {
+ expand: true,
+ cwd: '.tmp/',
+ dest: '<%= yeoman.dist %>',
+ src: [
+ 'fonts/*',
+ 'images/*',
+ '**/*.{css,gif,png,js,html,ico}'
+ ]
+ }
+ ]
+ }
+ },
+ concurrent: {
+ server: [
+ 'copy:stageComponents',
+ 'less'
+ ],
+ test: [
+ 'less'
+ ],
+ dist: [
+ 'imagemin',
+ 'svgmin',
+ 'htmlmin',
+ 'cssmin'
+ ]
+ }
+ });
+
+ grunt.registerTask('server', function(target) {
+ if (target === 'dist') {
+ return grunt.task.run(['build', 'connect:dist:keepalive']);
+ }
+
+ grunt.task.run([
+ 'clean:server',
+ 'concurrent:server',
+ 'connect:livereload',
+ 'watch'
+ ]);
+ });
+
+ grunt.registerTask('test', [
+ 'clean:server',
+ 'concurrent:test',
+ 'connect:test',
+ 'mocha'
+ ]);
+
+ grunt.registerTask('build', [
+ 'clean:dist',
+ 'useminPrepare',
+ 'less',
+ 'copy', // Copy .tmp, components, app to dist
+ 'requirejs', // build the main script and remove included scripts
+ 'concat',
+ 'concurrent:dist', // Run minimisation
+ 'uglify', // minify js
+ 'rev',
+ 'usemin'
+ ]);
+
+ grunt.registerTask('default', [
+ 'jshint',
+// 'test',
+ 'build'
+ ]);
+};
diff --git a/pyload/web/ServerThread.py b/pyload/web/ServerThread.py
new file mode 100644
index 000000000..c55ddef0f
--- /dev/null
+++ b/pyload/web/ServerThread.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+from __future__ import with_statement
+from time import time, sleep
+
+import threading
+import logging
+
+from pyload.utils.fs import exists
+
+core = None
+setup = None
+log = logging.getLogger("log")
+
+
+class WebServer(threading.Thread):
+ def __init__(self, pycore=None, pysetup=None):
+ global core, setup
+ threading.Thread.__init__(self)
+
+ if pycore:
+ core = pycore
+ config = pycore.config
+ elif pysetup:
+ setup = pysetup
+ config = pysetup.config
+ else:
+ raise Exception("No config context provided")
+
+ self.server = config['webinterface']['server']
+ self.https = config['webinterface']['https']
+ self.cert = config["ssl"]["cert"]
+ self.key = config["ssl"]["key"]
+ self.host = config['webinterface']['host']
+ self.port = config['webinterface']['port']
+ self.debug = config['general']['debug_mode']
+ self.force_server = config['webinterface']['force_server']
+ self.error = None
+
+ self.setDaemon(True)
+
+ def run(self):
+ self.running = True
+ import webinterface
+
+ global webinterface
+
+ if self.https:
+ if not exists(self.cert) or not exists(self.key):
+ log.warning(_("SSL certificates not found."))
+ self.https = False
+
+ if webinterface.UNAVAILALBE:
+ log.warning(_("WebUI built is not available"))
+ elif webinterface.APP_PATH == "app":
+ log.info(_("Running webUI in development mode"))
+
+ prefer = None
+
+ # These cases covers all settings
+ if self.server == "threaded":
+ prefer = "threaded"
+ elif self.server == "fastcgi":
+ prefer = "flup"
+ elif self.server == "fallback":
+ prefer = "wsgiref"
+
+ server = self.select_server(prefer)
+
+ try:
+ self.start_server(server)
+
+ except Exception, e:
+ log.error(_("Failed starting webserver: " + e.message))
+ self.error = e
+ if core:
+ core.print_exc()
+
+ def select_server(self, prefer=None):
+ """ find a working server """
+ from servers import all_server
+
+ unavailable = []
+ server = None
+ for server in all_server:
+
+ if self.force_server and self.force_server == server.NAME:
+ break # Found server
+ # When force_server is set, no further checks have to be made
+ elif self.force_server:
+ continue
+
+ if prefer and prefer == server.NAME:
+ break # found prefered server
+ elif prefer: # prefer is similar to force, but force has precedence
+ continue
+
+ # Filter for server that offer ssl if needed
+ if self.https and not server.SSL:
+ continue
+
+ try:
+ if server.find():
+ break # Found a server
+ else:
+ unavailable.append(server.NAME)
+ except Exception, e:
+ log.error(_("Failed importing webserver: " + e.message))
+
+ if unavailable: # Just log whats not available to have some debug information
+ log.debug("Unavailable webserver: " + ",".join(unavailable))
+
+ if not server and self.force_server:
+ server = self.force_server # just return the name
+
+ return server
+
+
+ def start_server(self, server):
+
+ from servers import ServerAdapter
+
+ if issubclass(server, ServerAdapter):
+
+ if self.https and not server.SSL:
+ log.warning(_("This server offers no SSL, please consider using threaded instead"))
+ elif not self.https:
+ self.cert = self.key = None # This implicitly disables SSL
+ # there is no extra argument for the server adapter
+ # TODO: check for openSSL ?
+
+ # Now instantiate the serverAdapter
+ server = server(self.host, self.port, self.key, self.cert, 6, self.debug) # todo, num_connections
+ name = server.NAME
+
+ else: # server is just a string
+ name = server
+
+ log.info(
+ _("Starting %(name)s webserver: %(host)s:%(port)d") % {"name": name, "host": self.host, "port": self.port})
+ webinterface.run_server(host=self.host, port=self.port, server=server)
+
+
+ # check if an error was raised for n seconds
+ def check_error(self, n=1):
+ t = time() + n
+ while time() < t:
+ if self.error:
+ return self.error
+ sleep(0.1)
+
diff --git a/pyload/web/__init__.py b/pyload/web/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/web/__init__.py
diff --git a/pyload/web/api_app.py b/pyload/web/api_app.py
new file mode 100644
index 000000000..3ffc507aa
--- /dev/null
+++ b/pyload/web/api_app.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from urllib import unquote
+from itertools import chain
+from traceback import format_exc, print_exc
+
+from bottle import route, request, response, HTTPError, parse_auth
+
+from utils import set_session, get_user_api
+from webinterface import PYLOAD
+
+from pyload.Api import ExceptionObject
+from pyload.remote.json_converter import loads, dumps
+from pyload.utils import remove_chars
+
+def add_header(r):
+ r.headers.replace("Content-type", "application/json")
+ r.headers.append("Cache-Control", "no-cache, must-revalidate")
+ r.headers.append("Access-Control-Allow-Origin", request.get_header('Origin', '*'))
+ r.headers.append("Access-Control-Allow-Credentials", "true")
+
+# accepting positional arguments, as well as kwargs via post and get
+# only forbidden path symbol are "?", which is used to separate GET data and #
+@route("/api/<func><args:re:[^#?]*>")
+@route("/api/<func><args:re:[^#?]*>", method="POST")
+def call_api(func, args=""):
+ add_header(response)
+
+ s = request.environ.get('beaker.session')
+ # Accepts standard http auth
+ auth = parse_auth(request.get_header('Authorization', ''))
+ if 'session' in request.POST or 'session' in request.GET:
+ # removes "' so it works on json strings
+ s = s.get_by_id(remove_chars(request.params.get('session'), "'\""))
+ elif auth:
+ user = PYLOAD.checkAuth(auth[0], auth[1], request.environ.get('REMOTE_ADDR', None))
+ # if auth is correct create a pseudo session
+ if user: s = {'uid': user.uid}
+
+ api = get_user_api(s)
+ if not api:
+ return HTTPError(401, dumps("Unauthorized"), **response.headers)
+
+ if not PYLOAD.isAuthorized(func, api.user):
+ return HTTPError(403, dumps("Forbidden"), **response.headers)
+
+ if not hasattr(PYLOAD.EXTERNAL, func) or func.startswith("_"):
+ print "Invalid API call", func
+ return HTTPError(404, dumps("Not Found"), **response.headers)
+
+ # TODO: possible encoding
+ # TODO Better error codes on invalid input
+
+ args = [loads(unquote(arg)) for arg in args.split("/")[1:]]
+ kwargs = {}
+
+ # accepts body as json dict
+ if request.json:
+ kwargs = request.json
+
+ # convert arguments from json to obj separately
+ for x, y in chain(request.GET.iteritems(), request.POST.iteritems()):
+ if not x or not y or x == "session": continue
+ kwargs[x] = loads(unquote(y))
+
+ try:
+ result = getattr(api, func)(*args, **kwargs)
+ # null is invalid json response
+ if result is None: result = True
+ return dumps(result)
+
+ except ExceptionObject, e:
+ return HTTPError(400, dumps(e), **response.headers)
+ except Exception, e:
+ print_exc()
+ return HTTPError(500, dumps({"error": e.message, "traceback": format_exc()}), **response.headers)
+
+
+@route("/api/login")
+@route("/api/login", method="POST")
+def login():
+ add_header(response)
+
+ username = request.params.get("username")
+ password = request.params.get("password")
+
+ user = PYLOAD.checkAuth(username, password, request.environ.get('REMOTE_ADDR', None))
+
+ if not user:
+ return dumps(False)
+
+ s = set_session(request, user)
+
+ # get the session id by dirty way, documentations seems wrong
+ try:
+ sid = s._headers["cookie_out"].split("=")[1].split(";")[0]
+ return dumps(sid)
+ except:
+ print "Could not get session"
+ return dumps(True)
+
+
+@route("/api/logout")
+@route("/api/logout", method="POST")
+def logout():
+ add_header(response)
+
+ s = request.environ.get('beaker.session')
+ s.delete()
+
+ return dumps(True)
diff --git a/pyload/web/app/fonts/Abel-Regular.ttf b/pyload/web/app/fonts/Abel-Regular.ttf
new file mode 100755
index 000000000..e37beb972
--- /dev/null
+++ b/pyload/web/app/fonts/Abel-Regular.ttf
Binary files differ
diff --git a/pyload/web/app/fonts/Abel-Regular.woff b/pyload/web/app/fonts/Abel-Regular.woff
new file mode 100644
index 000000000..ab8954389
--- /dev/null
+++ b/pyload/web/app/fonts/Abel-Regular.woff
Binary files differ
diff --git a/pyload/web/app/images/default/checks_sheet.png b/pyload/web/app/images/default/checks_sheet.png
new file mode 100644
index 000000000..9662b8070
--- /dev/null
+++ b/pyload/web/app/images/default/checks_sheet.png
Binary files differ
diff --git a/pyload/web/app/images/icon.png b/pyload/web/app/images/icon.png
new file mode 100644
index 000000000..1ab4ca081
--- /dev/null
+++ b/pyload/web/app/images/icon.png
Binary files differ
diff --git a/pyload/web/app/index.html b/pyload/web/app/index.html
new file mode 100644
index 000000000..4a4195b13
--- /dev/null
+++ b/pyload/web/app/index.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <!-- TODO: dynamic title -->
+ <title>pyLoad WebUI</title>
+ <meta name="description" content="pyLoad WebUI">
+ <meta name="viewport" content="width=device-width">
+
+ <!-- TODO: basepath and templates -->
+ <link href="styles/font.css" rel="stylesheet" type="text/css"/>
+ <link href="styles/default/main.css" rel="stylesheet" type="text/css">
+ <link href="vendor/select2.css" rel="stylesheet" type="text/css"/>
+
+
+ <!-- build:js scripts/config.js -->
+ <script data-main="scripts/config" src="components/requirejs/require.js"></script>
+ <!-- endbuild -->
+
+ <script type="text/javascript">
+
+ // Use value set by templateEngine or default val
+ function configValue(string, defaultValue) {
+ if (string.indexOf('{{') > -1)
+ return defaultValue;
+ return string;
+ }
+
+ window.dates = {
+ weeks: ['week', 'weeks'],
+ days: ['day', 'days'],
+ hours: ['hour', 'hours'],
+ minutes: ['minute', 'minutes'],
+ seconds: ['second', 'seconds']
+ }; // TODO carefully when translating
+
+ window.hostProtocol = window.location.protocol + '//';
+ window.hostAddress = window.location.hostname;
+ window.hostPort = configValue('{{web}}', '8001');
+ // TODO
+ window.pathPrefix = '/';
+ window.wsAddress = configValue('{{ws}}', 'ws://%s:7227');
+
+ require(['config'], function(Config) {
+ require(['default'], function(App) {
+ });
+ })
+ </script>
+
+</head>
+<body>
+<div id="wrap">
+ <header>
+ <div class="container-fluid">
+ <div class="row-fluid" id="header">
+ <div class="span3">
+ <div class="logo"></div>
+ <span class="title visible-large-screen">pyLoad</span>
+ </div>
+ </div>
+ </div>
+ <div id="notification-area"></div>
+ <div id="selection-area"></div>
+ </header>
+ <div id="content-container" class="container-fluid">
+ <div class="row-fluid" id="actionbar">
+ </div>
+ <div class="row-fluid" id="content">
+ </div>
+ </div>
+</div>
+<footer>
+ <div class="container-fluid">
+ <div class="row-fluid">
+ <div class="span2 offset1">
+ <div class="copyright">
+ © 2008-2013<br>
+ <a href="http://pyload.org/" target="_blank">The pyLoad Team</a><br>
+ </div>
+ </div>
+ <div class="span2">
+ <h2 class="block-title">Powered by</h2>
+ <hr>
+ Bootstrap <br>
+ </div>
+
+ <div class="span2">
+ <h2 class="block-title">pyLoad</h2>
+ <hr>
+ dsfdsf <br>
+ </div>
+
+ <div class="span2">
+ <h2 class="block-title">Community</h2>
+ <hr>
+ asd <br>
+ </div>
+
+ <div class="span2">
+ <h2 class="block-title">Development</h2>
+ <hr>
+ asd <br>
+ </div>
+ </div>
+ </div>
+</footer>
+<div id="modal-overlay" class="hide"></div>
+</body>
+</html>
diff --git a/pyload/web/app/scripts/app.js b/pyload/web/app/scripts/app.js
new file mode 100644
index 000000000..af5c50b14
--- /dev/null
+++ b/pyload/web/app/scripts/app.js
@@ -0,0 +1,104 @@
+/*
+ * Global Application Object
+ * Contains all necessary logic shared across views
+ */
+define([
+
+ // Libraries.
+ 'jquery',
+ 'underscore',
+ 'backbone',
+ 'handlebars',
+ 'utils/animations',
+ 'utils/lazyRequire',
+ 'utils/dialogs',
+ 'marionette',
+ 'bootstrap',
+ 'animate'
+
+], function($, _, Backbone, Handlebars) {
+ 'use strict';
+
+ Backbone.Marionette.TemplateCache.prototype.compileTemplate = function(rawTemplate) {
+ return Handlebars.compile(rawTemplate);
+ };
+
+ // TODO: configurable root
+ var App = new Backbone.Marionette.Application({
+ root: '/'
+ });
+
+ App.addRegions({
+ header: '#header',
+ notification: '#notification-area',
+ selection: '#selection-area',
+ content: '#content',
+ actionbar: '#actionbar'
+ });
+
+ App.navigate = function(url) {
+ return Backbone.history.navigate(url, true);
+ };
+
+ App.apiUrl = function(path) {
+ var url = window.hostProtocol + window.hostAddress + ':' + window.hostPort + window.pathPrefix + path;
+ return url;
+ };
+
+ // Add Global Helper functions
+ // Generates options dict that can be used for xhr requests
+ App.apiRequest = function(method, data, options) {
+ options || (options = {});
+ options.url = App.apiUrl('api/' + method);
+ options.dataType = 'json';
+
+ if (data) {
+ options.type = 'POST';
+ options.data = {};
+ // Convert arguments to json
+ _.keys(data).map(function(key) {
+ options.data[key] = JSON.stringify(data[key]);
+ });
+ }
+
+ return options;
+ };
+
+ App.setTitle = function(name) {
+ var title = window.document.title;
+ var newTitle;
+ // page name separator
+ var index = title.indexOf('-');
+ if (index >= 0)
+ newTitle = name + ' - ' + title.substr(index + 2, title.length);
+ else
+ newTitle = name + ' - ' + title;
+
+ window.document.title = newTitle;
+ };
+
+ App.openWebSocket = function(path) {
+ return new WebSocket(window.wsAddress.replace('%s', window.hostAddress) + path);
+ };
+
+ App.on('initialize:after', function() {
+// TODO pushState variable
+ Backbone.history.start({
+ pushState: false,
+ root: App.root
+ });
+
+ // All links should be handled by backbone
+ $(document).on('click', 'a[data-nav]', function(evt) {
+ var href = { prop: $(this).prop('href'), attr: $(this).attr('href') };
+ var root = location.protocol + '//' + location.host + App.root;
+ if (href.prop.slice(0, root.length) === root) {
+ evt.preventDefault();
+ Backbone.history.navigate(href.attr, true);
+ }
+ });
+ });
+
+ // Returns the app object to be available to other modules through require.js.
+ return App;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/collections/AccountList.js b/pyload/web/app/scripts/collections/AccountList.js
new file mode 100644
index 000000000..bfc2af5a3
--- /dev/null
+++ b/pyload/web/app/scripts/collections/AccountList.js
@@ -0,0 +1,24 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'models/Account'], function($, Backbone, _, App, Account) {
+ 'use strict';
+
+ return Backbone.Collection.extend({
+
+ model: Account,
+
+ comparator: function(account) {
+ return account.get('plugin');
+ },
+
+ initialize: function() {
+
+ },
+
+ fetch: function(options) {
+ // TODO: refresh options?
+ options = App.apiRequest('getAccounts/false', null, options);
+ return Backbone.Collection.prototype.fetch.call(this, options);
+ }
+
+ });
+
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/collections/FileList.js b/pyload/web/app/scripts/collections/FileList.js
new file mode 100644
index 000000000..112dc5e51
--- /dev/null
+++ b/pyload/web/app/scripts/collections/FileList.js
@@ -0,0 +1,28 @@
+define(['jquery', 'backbone', 'underscore', 'models/File'], function($, Backbone, _, File) {
+ 'use strict';
+
+ return Backbone.Collection.extend({
+
+ model: File,
+
+ comparator: function(file) {
+ return file.get('fileorder');
+ },
+
+ isEqual: function(fileList) {
+ if (this.length !== fileList.length) return false;
+
+ // Assuming same order would be faster in false case
+ var diff = _.difference(this.models, fileList.models);
+
+ // If there is a difference models are unequal
+ return diff.length > 0;
+ },
+
+ initialize: function() {
+
+ }
+
+ });
+
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/collections/InteractionList.js b/pyload/web/app/scripts/collections/InteractionList.js
new file mode 100644
index 000000000..24f8b9248
--- /dev/null
+++ b/pyload/web/app/scripts/collections/InteractionList.js
@@ -0,0 +1,49 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'models/InteractionTask'],
+ function($, Backbone, _, App, InteractionTask) {
+ 'use strict';
+
+ return Backbone.Collection.extend({
+
+ model: InteractionTask,
+
+ comparator: function(task) {
+ return task.get('iid');
+ },
+
+ fetch: function(options) {
+ options = App.apiRequest('getInteractionTasks/0', null, options);
+ var self = this;
+ options.success = function(data) {
+ self.set(data);
+ };
+
+ return $.ajax(options);
+ },
+
+ toJSON: function() {
+ var data = {queries: 0, notifications: 0};
+
+ this.map(function(task) {
+ if (task.isNotification())
+ data.notifications++;
+ else
+ data.queries++;
+ });
+
+ return data;
+ },
+
+ // a task is waiting for attention (no notification)
+ hasTaskWaiting: function() {
+ var tasks = 0;
+ this.map(function(task) {
+ if (!task.isNotification())
+ tasks++;
+ });
+
+ return tasks > 0;
+ }
+
+ });
+
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/collections/PackageList.js b/pyload/web/app/scripts/collections/PackageList.js
new file mode 100644
index 000000000..7bee861a4
--- /dev/null
+++ b/pyload/web/app/scripts/collections/PackageList.js
@@ -0,0 +1,16 @@
+define(['jquery', 'backbone', 'underscore', 'models/Package'], function($, Backbone, _, Package) {
+ 'use strict';
+
+ return Backbone.Collection.extend({
+
+ model: Package,
+
+ comparator: function(pack) {
+ return pack.get('packageorder');
+ },
+
+ initialize: function() {
+ }
+
+ });
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/collections/ProgressList.js b/pyload/web/app/scripts/collections/ProgressList.js
new file mode 100644
index 000000000..51849d8de
--- /dev/null
+++ b/pyload/web/app/scripts/collections/ProgressList.js
@@ -0,0 +1,18 @@
+define(['jquery', 'backbone', 'underscore', 'models/Progress'], function($, Backbone, _, Progress) {
+ 'use strict';
+
+ return Backbone.Collection.extend({
+
+ model: Progress,
+
+ comparator: function(progress) {
+ return progress.get('eta');
+ },
+
+ initialize: function() {
+
+ }
+
+ });
+
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/config.js b/pyload/web/app/scripts/config.js
new file mode 100644
index 000000000..9d1d027d9
--- /dev/null
+++ b/pyload/web/app/scripts/config.js
@@ -0,0 +1,75 @@
+// Sets the require.js configuration for your application.
+'use strict';
+require.config({
+
+ deps: ['default'],
+
+ paths: {
+
+ jquery: '../components/jquery/jquery',
+ flot: '../components/flot/jquery.flot',
+ transit: '../components/jquery.transit/jquery.transit',
+ animate: '../components/jquery.animate-enhanced/scripts/src/jquery.animate-enhanced',
+ cookie: '../components/jquery.cookie/jquery.cookie',
+ omniwindow: 'vendor/jquery.omniwindow',
+ select2: '../components/select2/select2',
+ bootstrap: '../components/bootstrap-assets/js/bootstrap',
+ underscore: '../components/underscore/underscore',
+ backbone: '../components/backbone/backbone',
+ marionette: '../components/backbone.marionette/lib/backbone.marionette',
+ handlebars: '../components/handlebars.js/dist/handlebars',
+ jed: '../components/jed/jed',
+
+ // TODO: Two hbs dependencies could be replaced
+ i18nprecompile: '../components/require-handlebars-plugin/hbs/i18nprecompile',
+ json2: '../components/require-handlebars-plugin/hbs/json2',
+
+ // Plugins
+// text: '../components/requirejs-text/text',
+ hbs: '../components/require-handlebars-plugin/hbs',
+
+ // Shortcut
+ tpl: '../templates/default'
+ },
+
+ hbs: {
+ disableI18n: true,
+ helperPathCallback: // Callback to determine the path to look for helpers
+ function(name) {
+ if (name === '_' || name === 'ngettext')
+ name = 'gettext';
+
+ // Some helpers are accumulated into one file
+ if (name.indexOf('file') === 0)
+ name = 'fileHelper';
+
+ return 'helpers/' + name;
+ },
+ templateExtension: 'html'
+ },
+
+ // Sets the configuration for your third party scripts that are not AMD compatible
+ shim: {
+ underscore: {
+ exports: '_'
+ },
+
+ backbone: {
+ deps: ['underscore', 'jquery'],
+ exports: 'Backbone'
+ },
+
+ marionette: ['backbone'],
+ handlebars: {
+ exports: 'Handlebars'
+ },
+
+ flot: ['jquery'],
+ transit: ['jquery'],
+ cookie: ['jquery'],
+ omniwindow: ['jquery'],
+ select2: ['jquery'],
+ bootstrap: ['jquery'],
+ animate: ['jquery']
+ }
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/controller.js b/pyload/web/app/scripts/controller.js
new file mode 100644
index 000000000..60f604e5b
--- /dev/null
+++ b/pyload/web/app/scripts/controller.js
@@ -0,0 +1,72 @@
+define([
+ 'app',
+ 'backbone',
+ 'underscore',
+
+ // Views
+ 'views/headerView',
+ 'views/notificationView',
+ 'views/dashboard/dashboardView',
+ 'views/dashboard/selectionView',
+ 'views/dashboard/filterView',
+ 'views/loginView',
+ 'views/settings/settingsView',
+ 'views/accounts/accountListView'
+], function(
+ App, Backbone, _, HeaderView, NotificationView, DashboardView, SelectionView, FilterView, LoginView, SettingsView, AccountListView) {
+ 'use strict';
+ // TODO some views does not need to be loaded instantly
+
+ return {
+
+ header: function() {
+ if (!App.header.currentView) {
+ App.header.show(new HeaderView());
+ App.header.currentView.init();
+ App.notification.attachView(new NotificationView());
+ }
+ },
+
+ dashboard: function() {
+ this.header();
+
+ App.actionbar.show(new FilterView());
+
+ // TODO: not completely visible after reattaching
+ // now visible every time
+ if (_.isUndefined(App.selection.currentView) || _.isNull(App.selection.currentView))
+ App.selection.attachView(new SelectionView());
+
+ App.content.show(new DashboardView());
+ },
+
+ login: function() {
+ App.content.show(new LoginView());
+ },
+
+ logout: function() {
+ alert('Not implemented');
+ },
+
+ settings: function() {
+ this.header();
+
+ var view = new SettingsView();
+ App.actionbar.show(new view.actionbar());
+ App.content.show(view);
+ },
+
+ accounts: function() {
+ this.header();
+
+ var view = new AccountListView();
+ App.actionbar.show(new view.actionbar());
+ App.content.show(view);
+ },
+
+ admin: function() {
+ alert('Not implemented');
+ }
+ };
+
+});
diff --git a/pyload/web/app/scripts/default.js b/pyload/web/app/scripts/default.js
new file mode 100644
index 000000000..6c5ee9afb
--- /dev/null
+++ b/pyload/web/app/scripts/default.js
@@ -0,0 +1,30 @@
+define('default', ['backbone', 'jquery', 'app', 'router', 'models/UserSession'],
+ function(Backbone, $, App, Router, UserSession) {
+ 'use strict';
+
+ // Global ajax options
+ var options = {
+ statusCode: {
+ 401: function() {
+ console.log('Not logged in.');
+ App.navigate('login');
+ }
+ },
+ xhrFields: {withCredentials: true}
+ };
+
+ $.ajaxSetup(options);
+
+ Backbone.ajax = function() {
+ Backbone.$.ajaxSetup.call(Backbone.$, options);
+ return Backbone.$.ajax.apply(Backbone.$, arguments);
+ };
+
+ $(function() {
+ App.session = new UserSession();
+ App.router = new Router();
+ App.start();
+ });
+
+ return App;
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/helpers/fileHelper.js b/pyload/web/app/scripts/helpers/fileHelper.js
new file mode 100644
index 000000000..156be58f0
--- /dev/null
+++ b/pyload/web/app/scripts/helpers/fileHelper.js
@@ -0,0 +1,55 @@
+// Helpers to render the file view
+define('helpers/fileHelper', ['handlebars', 'utils/apitypes', 'helpers/formatTime'],
+ function(Handlebars, Api, formatTime) {
+ 'use strict';
+
+ function fileClass(file, options) {
+ if (file.finished)
+ return 'finished';
+ else if (file.failed)
+ return 'failed';
+ else if (file.offline)
+ return 'offline';
+ else if (file.online)
+ return 'online';
+ else if (file.waiting)
+ return 'waiting';
+ else if (file.downloading)
+ return 'downloading';
+
+ return '';
+ }
+
+ // TODO
+ function fileIcon(media, options) {
+ return 'icon-music';
+ }
+
+ // TODO rest of the states
+ function fileStatus(file, options) {
+ var s;
+ var msg = file.download.statusmsg;
+
+ if (file.failed) {
+ s = '<i class="icon-remove"></i>&nbsp;';
+ if (file.download.error)
+ s += file.download.error;
+ else s += msg;
+ } else if (file.finished)
+ s = '<i class="icon-ok"></i>&nbsp;' + msg;
+ else if (file.downloading)
+ s = '<div class="progress"><div class="bar" style="width: ' + file.progress + '%">&nbsp;&nbsp;' +
+ formatTime(file.eta) + '</div></div>';
+ else if (file.waiting)
+ s = '<i class="icon-time"></i>&nbsp;' + formatTime(file.eta);
+ else
+ s = msg;
+
+ return new Handlebars.SafeString(s);
+ }
+
+ Handlebars.registerHelper('fileClass', fileClass);
+ Handlebars.registerHelper('fileIcon', fileIcon);
+ Handlebars.registerHelper('fileStatus', fileStatus);
+ return fileClass;
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/helpers/formatSize.js b/pyload/web/app/scripts/helpers/formatSize.js
new file mode 100644
index 000000000..3b62e74c7
--- /dev/null
+++ b/pyload/web/app/scripts/helpers/formatSize.js
@@ -0,0 +1,15 @@
+// Format bytes in human readable format
+define('helpers/formatSize', ['handlebars'], function(Handlebars) {
+ 'use strict';
+
+ var sizes = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'];
+ function formatSize(bytes, options) {
+ if (!bytes || bytes === 0) return '0 B';
+ var i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)), 10);
+ // round to two digits
+ return (bytes / Math.pow(1024, i)).toFixed(2) + ' ' + sizes[i];
+ }
+
+ Handlebars.registerHelper('formatSize', formatSize);
+ return formatSize;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/helpers/formatTime.js b/pyload/web/app/scripts/helpers/formatTime.js
new file mode 100644
index 000000000..757ff73ad
--- /dev/null
+++ b/pyload/web/app/scripts/helpers/formatTime.js
@@ -0,0 +1,17 @@
+// Format bytes in human readable format
+define('helpers/formatTime', ['handlebars', 'vendor/remaining'], function(Handlebars, Remaining) {
+ 'use strict';
+
+ function formatTime(seconds, options) {
+ if (seconds === Infinity)
+ return '∞';
+ else if (!seconds || seconds <= 0)
+ return '-';
+
+ // TODO: digital or written string
+ return Remaining.getStringDigital(seconds, window.dates);
+ }
+
+ Handlebars.registerHelper('formatTime', formatTime);
+ return formatTime;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/helpers/gettext.js b/pyload/web/app/scripts/helpers/gettext.js
new file mode 100644
index 000000000..d73b5e378
--- /dev/null
+++ b/pyload/web/app/scripts/helpers/gettext.js
@@ -0,0 +1,16 @@
+require(['underscore', 'handlebars', 'utils/i18n'], function(_, Handlebars, i18n) {
+ 'use strict';
+ // These methods binds additional content directly to translated message
+ function ngettext(single, plural, n) {
+ return i18n.sprintf(i18n.ngettext(single, plural, n), n);
+ }
+
+ function gettext(key, message) {
+ return i18n.sprintf(i18n.gettext(key), message);
+ }
+
+ Handlebars.registerHelper('_', gettext);
+ Handlebars.registerHelper('gettext', gettext);
+ Handlebars.registerHelper('ngettext', ngettext);
+ return gettext;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/helpers/pluginIcon.js b/pyload/web/app/scripts/helpers/pluginIcon.js
new file mode 100644
index 000000000..6b2fdc67f
--- /dev/null
+++ b/pyload/web/app/scripts/helpers/pluginIcon.js
@@ -0,0 +1,14 @@
+// Resolves name of plugin to icon path
+define('helpers/pluginIcon', ['handlebars', 'app'], function(Handlebars, App) {
+ 'use strict';
+
+ function pluginIcon(name) {
+ if (typeof name === 'object' && typeof name.get === 'function')
+ name = name.get('plugin');
+
+ return App.apiUrl('icons/' + name);
+ }
+
+ Handlebars.registerHelper('pluginIcon', pluginIcon);
+ return pluginIcon;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/helpers/truncate.js b/pyload/web/app/scripts/helpers/truncate.js
new file mode 100644
index 000000000..fb351b776
--- /dev/null
+++ b/pyload/web/app/scripts/helpers/truncate.js
@@ -0,0 +1,25 @@
+require(['underscore','handlebars'], function(_, Handlebars) {
+ 'use strict';
+
+ function truncate(fullStr, options) {
+ var strLen = 30;
+ if (_.isNumber(options))
+ strLen = options;
+
+ if (fullStr.length <= strLen) return fullStr;
+
+ var separator = options.separator || '
';
+
+ var sepLen = separator.length,
+ charsToShow = strLen - sepLen,
+ frontChars = Math.ceil(charsToShow / 2),
+ backChars = Math.floor(charsToShow / 2);
+
+ return fullStr.substr(0, frontChars) +
+ separator +
+ fullStr.substr(fullStr.length - backChars);
+ }
+
+ Handlebars.registerHelper('truncate', truncate);
+ return truncate;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/Account.js b/pyload/web/app/scripts/models/Account.js
new file mode 100644
index 000000000..a2e24b056
--- /dev/null
+++ b/pyload/web/app/scripts/models/Account.js
@@ -0,0 +1,51 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes'], function($, Backbone, _, App, Api) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ // TODO
+ // generated, not submitted
+ idAttribute: 'user',
+
+ defaults: {
+ plugin: null,
+ loginname: null,
+ owner: -1,
+ valid: false,
+ validuntil: -1,
+ trafficleft: -1,
+ maxtraffic: -1,
+ premium: false,
+ activated: false,
+ shared: false,
+ options: null
+ },
+
+ // Model Constructor
+ initialize: function() {
+ },
+
+ // Any time a model attribute is set, this method is called
+ validate: function(attrs) {
+
+ },
+
+ save: function(options) {
+ options = App.apiRequest('updateAccountInfo', {account: this.toJSON()}, options);
+ return $.ajax(options);
+ },
+
+ destroy: function(options) {
+ options = App.apiRequest('removeAccount', {account: this.toJSON()}, options);
+ var self = this;
+ options.success = function() {
+ self.trigger('destroy', self, self.collection, options);
+ };
+
+ // TODO request is not dispatched
+// return Backbone.Model.prototype.destroy.call(this, options);
+ return $.ajax(options);
+ }
+ });
+
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/ConfigHolder.js b/pyload/web/app/scripts/models/ConfigHolder.js
new file mode 100644
index 000000000..40efbc7c0
--- /dev/null
+++ b/pyload/web/app/scripts/models/ConfigHolder.js
@@ -0,0 +1,68 @@
+define(['jquery', 'backbone', 'underscore', 'app', './ConfigItem'],
+ function($, Backbone, _, App, ConfigItem) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ defaults: {
+ name: '',
+ label: '',
+ description: '',
+ long_description: null,
+ // simple list but no collection
+ items: null,
+ info: null
+ },
+
+ // Model Constructor
+ initialize: function() {
+
+ },
+
+ // Loads it from server by name
+ fetch: function(options) {
+ options = App.apiRequest('loadConfig/"' + this.get('name') + '"', null, options);
+ return Backbone.Model.prototype.fetch.call(this, options);
+ },
+
+ save: function(options) {
+ var config = this.toJSON();
+ var items = [];
+ // Convert changed items to json
+ _.each(config.items, function(item) {
+ if (item.isChanged()) {
+ items.push(item.prepareSave());
+ }
+ });
+ config.items = items;
+ // TODO: only set new values on success
+
+ options = App.apiRequest('saveConfig', {config: config}, options);
+
+ return $.ajax(options);
+ },
+
+ parse: function(resp) {
+ // Create item models
+ resp.items = _.map(resp.items, function(item) {
+ return new ConfigItem(item);
+ });
+
+ return Backbone.Model.prototype.parse.call(this, resp);
+ },
+
+ isLoaded: function() {
+ return this.has('items') || this.has('long_description');
+ },
+
+ // check if any of the items has changes
+ hasChanges: function() {
+ var items = this.get('items');
+ if (!items) return false;
+ return _.reduce(items, function(a, b) {
+ return a || b.isChanged();
+ }, false);
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/ConfigItem.js b/pyload/web/app/scripts/models/ConfigItem.js
new file mode 100644
index 000000000..2d325c2a2
--- /dev/null
+++ b/pyload/web/app/scripts/models/ConfigItem.js
@@ -0,0 +1,40 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes'],
+ function($, Backbone, _, App, Api) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ defaults: {
+ name: '',
+ label: '',
+ description: '',
+ input: null,
+ default_value: null,
+ value: null,
+ // additional attributes
+ inputView: null
+ },
+
+ // Model Constructor
+ initialize: function() {
+
+ },
+
+ isChanged: function() {
+ return this.get('inputView') && this.get('inputView').getVal() !== this.get('value');
+ },
+
+ // set new value and return json
+ prepareSave: function() {
+ // set the new value
+ if (this.get('inputView'))
+ this.set('value', this.get('inputView').getVal());
+
+ var data = this.toJSON();
+ delete data.inputView;
+ delete data.description;
+
+ return data;
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/File.js b/pyload/web/app/scripts/models/File.js
new file mode 100644
index 000000000..562e6b0ae
--- /dev/null
+++ b/pyload/web/app/scripts/models/File.js
@@ -0,0 +1,97 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes'], function($, Backbone, _, App, Api) {
+ 'use strict';
+
+ var Finished = [Api.DownloadStatus.Finished, Api.DownloadStatus.Skipped];
+ var Failed = [Api.DownloadStatus.Failed, Api.DownloadStatus.Aborted, Api.DownloadStatus.TempOffline, Api.DownloadStatus.Offline];
+ // Unfinished - Other
+
+ return Backbone.Model.extend({
+
+ idAttribute: 'fid',
+
+ defaults: {
+ fid: -1,
+ name: null,
+ package: -1,
+ owner: -1,
+ size: -1,
+ status: -1,
+ media: -1,
+ added: -1,
+ fileorder: -1,
+ download: null,
+
+ // UI attributes
+ selected: false,
+ visible: true,
+ progress: 0,
+ eta: 0
+ },
+
+ // Model Constructor
+ initialize: function() {
+
+ },
+
+ fetch: function(options) {
+ options = App.apiRequest(
+ 'getFileInfo',
+ {fid: this.get('fid')},
+ options);
+
+ return Backbone.Model.prototype.fetch.call(this, options);
+ },
+
+ destroy: function(options) {
+ // also not working when using data
+ options = App.apiRequest(
+ 'deleteFiles/[' + this.get('fid') + ']',
+ null, options);
+ options.method = 'post';
+
+ return Backbone.Model.prototype.destroy.call(this, options);
+ },
+
+ // Does not send a request to the server
+ destroyLocal: function(options) {
+ this.trigger('destroy', this, this.collection, options);
+ },
+
+ restart: function(options) {
+ options = App.apiRequest(
+ 'restartFile',
+ {fid: this.get('fid')},
+ options);
+
+ return $.ajax(options);
+ },
+
+ // Any time a model attribute is set, this method is called
+ validate: function(attrs) {
+
+ },
+
+ setDownloadStatus: function(status) {
+ if (this.isDownload())
+ this.get('download').status = status;
+ },
+
+ isDownload: function() {
+ return this.has('download');
+ },
+
+ isFinished: function() {
+ return _.indexOf(Finished, this.get('download').status) > -1;
+ },
+
+ isUnfinished: function() {
+ return _.indexOf(Finished, this.get('download').status) === -1 && _.indexOf(Failed, this.get('download').status) === -1;
+ },
+
+ isFailed: function() {
+ return _.indexOf(Failed, this.get('download').status) > -1;
+ }
+
+ });
+
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/InteractionTask.js b/pyload/web/app/scripts/models/InteractionTask.js
new file mode 100644
index 000000000..54c739d4b
--- /dev/null
+++ b/pyload/web/app/scripts/models/InteractionTask.js
@@ -0,0 +1,41 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes'],
+ function($, Backbone, _, App, Api) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ idAttribute: 'iid',
+
+ defaults: {
+ iid: -1,
+ type: null,
+ input: null,
+ default_value: null,
+ title: '',
+ description: '',
+ plugin: '',
+ // additional attributes
+ result: ''
+ },
+
+ // Model Constructor
+ initialize: function() {
+
+ },
+
+ save: function(options) {
+ options = App.apiRequest('setInteractionResult/' + this.get('iid'),
+ {result: this.get('result')}, options);
+
+ return $.ajax(options);
+ },
+
+ isNotification: function() {
+ return this.get('type') === Api.Interaction.Notification;
+ },
+
+ isCaptcha: function() {
+ return this.get('type') === Api.Interaction.Captcha;
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/Package.js b/pyload/web/app/scripts/models/Package.js
new file mode 100644
index 000000000..a34ec1c69
--- /dev/null
+++ b/pyload/web/app/scripts/models/Package.js
@@ -0,0 +1,119 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'collections/FileList', 'require'],
+ function($, Backbone, _, App, FileList, require) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ idAttribute: 'pid',
+
+ defaults: {
+ pid: -1,
+ name: null,
+ folder: '',
+ root: -1,
+ owner: -1,
+ site: '',
+ comment: '',
+ password: '',
+ added: -1,
+ tags: null,
+ status: -1,
+ shared: false,
+ packageorder: -1,
+ stats: null,
+ fids: null,
+ pids: null,
+ files: null, // Collection
+ packs: null, // Collection
+
+ selected: false // For Checkbox
+ },
+
+ // Model Constructor
+ initialize: function() {
+ },
+
+ toJSON: function(options) {
+ var obj = Backbone.Model.prototype.toJSON.call(this, options);
+ obj.percent = Math.round(obj.stats.linksdone * 100 / obj.stats.linkstotal);
+
+ return obj;
+ },
+
+ // Changes url + method and delegates call to super class
+ fetch: function(options) {
+ options = App.apiRequest(
+ 'getFileTree/' + this.get('pid'),
+ {full: false},
+ options);
+
+ return Backbone.Model.prototype.fetch.call(this, options);
+ },
+
+ // Create a pseudo package und use search to populate data
+ search: function(qry, options) {
+ options = App.apiRequest(
+ 'findFiles',
+ {pattern: qry},
+ options);
+
+ return Backbone.Model.prototype.fetch.call(this, options);
+ },
+
+ save: function(options) {
+ // TODO
+ },
+
+ destroy: function(options) {
+ // TODO: Not working when using data?, array seems to break it
+ options = App.apiRequest(
+ 'deletePackages/[' + this.get('pid') + ']',
+ null, options);
+ options.method = 'post';
+
+ console.log(options);
+
+ return Backbone.Model.prototype.destroy.call(this, options);
+ },
+
+ restart: function(options) {
+ options = App.apiRequest(
+ 'restartPackage',
+ {pid: this.get('pid')},
+ options);
+
+ var self = this;
+ options.success = function() {
+ self.fetch();
+ };
+ return $.ajax(options);
+ },
+
+ parse: function(resp) {
+ // Package is loaded from tree collection
+ if (_.has(resp, 'root')) {
+ if (!this.has('files'))
+ resp.root.files = new FileList(_.values(resp.files));
+ else
+ this.get('files').set(_.values(resp.files));
+
+ // circular dependencies needs to be avoided
+ var PackageList = require('collections/PackageList');
+
+ if (!this.has('packs'))
+ resp.root.packs = new PackageList(_.values(resp.packages));
+ else
+ this.get('packs').set(_.values(resp.packages));
+
+ return resp.root;
+ }
+ return Backbone.model.prototype.parse.call(this, resp);
+ },
+
+ // Any time a model attribute is set, this method is called
+ validate: function(attrs) {
+
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/Progress.js b/pyload/web/app/scripts/models/Progress.js
new file mode 100644
index 000000000..b0bbb684d
--- /dev/null
+++ b/pyload/web/app/scripts/models/Progress.js
@@ -0,0 +1,50 @@
+define(['jquery', 'backbone', 'underscore', 'utils/apitypes'], function($, Backbone, _, Api) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ // generated, not submitted
+ idAttribute: 'pid',
+
+ defaults: {
+ pid: -1,
+ plugin: null,
+ name: null,
+ statusmsg: -1,
+ eta: -1,
+ done: -1,
+ total: -1,
+ download: null
+ },
+
+ getPercent: function() {
+ if (this.get('total') > 0)
+ return Math.round(this.get('done') * 100 / this.get('total'));
+ return 0;
+ },
+
+ // Model Constructor
+ initialize: function() {
+
+ },
+
+ // Any time a model attribute is set, this method is called
+ validate: function(attrs) {
+
+ },
+
+ toJSON: function(options) {
+ var obj = Backbone.Model.prototype.toJSON.call(this, options);
+ obj.percent = this.getPercent();
+ obj.downloading = this.isDownload() && this.get('download').status === Api.DownloadStatus.Downloading;
+
+ return obj;
+ },
+
+ isDownload : function() {
+ return this.has('download');
+ }
+
+ });
+
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/ServerStatus.js b/pyload/web/app/scripts/models/ServerStatus.js
new file mode 100644
index 000000000..59739b41e
--- /dev/null
+++ b/pyload/web/app/scripts/models/ServerStatus.js
@@ -0,0 +1,47 @@
+define(['jquery', 'backbone', 'underscore'],
+ function($, Backbone, _) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ defaults: {
+ speed: 0,
+ linkstotal: 0,
+ linksqueue: 0,
+ sizetotal: 0,
+ sizequeue: 0,
+ notifications: -1,
+ paused: false,
+ download: false,
+ reconnect: false
+ },
+
+ // Model Constructor
+ initialize: function() {
+
+ },
+
+ fetch: function(options) {
+ options || (options = {});
+ options.url = 'api/getServerStatus';
+
+ return Backbone.Model.prototype.fetch.call(this, options);
+ },
+
+ toJSON: function(options) {
+ var obj = Backbone.Model.prototype.toJSON.call(this, options);
+
+ obj.linksdone = obj.linkstotal - obj.linksqueue;
+ obj.sizedone = obj.sizetotal - obj.sizequeue;
+ if (obj.speed && obj.speed > 0)
+ obj.eta = Math.round(obj.sizequeue / obj.speed);
+ else if (obj.sizequeue > 0)
+ obj.eta = Infinity;
+ else
+ obj.eta = 0;
+
+ return obj;
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/TreeCollection.js b/pyload/web/app/scripts/models/TreeCollection.js
new file mode 100644
index 000000000..2f761e6cc
--- /dev/null
+++ b/pyload/web/app/scripts/models/TreeCollection.js
@@ -0,0 +1,50 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'models/Package', 'collections/FileList', 'collections/PackageList'],
+ function($, Backbone, _, App, Package, FileList, PackageList) {
+ 'use strict';
+
+ // TreeCollection
+ // A Model and not a collection, aggregates other collections
+ return Backbone.Model.extend({
+
+ defaults: {
+ root: null,
+ packages: null,
+ files: null
+ },
+
+ initialize: function() {
+
+ },
+
+ fetch: function(options) {
+ options || (options = {});
+ var pid = options.pid || -1;
+
+ options = App.apiRequest(
+ 'getFileTree/' + pid,
+ {full: false},
+ options);
+
+ console.log('Fetching package tree ' + pid);
+ return Backbone.Model.prototype.fetch.call(this, options);
+ },
+
+ // Parse the response and updates the collections
+ parse: function(resp) {
+ var ret = {};
+ if (!this.has('packages'))
+ ret.packages = new PackageList(_.values(resp.packages));
+ else
+ this.get('packages').set(_.values(resp.packages));
+
+ if (!this.has('files'))
+ ret.files = new FileList(_.values(resp.files));
+ else
+ this.get('files').set(_.values(resp.files));
+
+ ret.root = new Package(resp.root);
+ return ret;
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/models/UserSession.js b/pyload/web/app/scripts/models/UserSession.js
new file mode 100644
index 000000000..a7e9aa848
--- /dev/null
+++ b/pyload/web/app/scripts/models/UserSession.js
@@ -0,0 +1,20 @@
+define(['jquery', 'backbone', 'underscore', 'utils/apitypes', 'cookie'],
+ function($, Backbone, _, Api) {
+ 'use strict';
+
+ return Backbone.Model.extend({
+
+ idAttribute: 'username',
+
+ defaults: {
+ username: null,
+ permissions: null,
+ session: null
+ },
+
+ // Model Constructor
+ initialize: function() {
+ this.set('session', $.cookie('beaker.session.id'));
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/router.js b/pyload/web/app/scripts/router.js
new file mode 100644
index 000000000..68ea5575d
--- /dev/null
+++ b/pyload/web/app/scripts/router.js
@@ -0,0 +1,29 @@
+/**
+ * Router defines routes that are handled by registered controller
+ */
+define([
+ // Libraries
+ 'backbone',
+ 'marionette',
+
+ // Modules
+ 'controller'
+],
+ function(Backbone, Marionette, Controller) {
+ 'use strict';
+
+ return Backbone.Marionette.AppRouter.extend({
+
+ appRoutes: {
+ '': 'dashboard',
+ 'login': 'login',
+ 'logout': 'logout',
+ 'settings': 'settings',
+ 'accounts': 'accounts',
+ 'admin': 'admin'
+ },
+
+ // Our controller to handle the routes
+ controller: Controller
+ });
+ });
diff --git a/pyload/web/app/scripts/routers/defaultRouter.js b/pyload/web/app/scripts/routers/defaultRouter.js
new file mode 100644
index 000000000..4b00d160c
--- /dev/null
+++ b/pyload/web/app/scripts/routers/defaultRouter.js
@@ -0,0 +1,30 @@
+define(['jquery', 'backbone', 'views/headerView'], function($, Backbone, HeaderView) {
+ 'use strict';
+
+ var Router = Backbone.Router.extend({
+
+ initialize: function() {
+ Backbone.history.start();
+ },
+
+ // All of your Backbone Routes (add more)
+ routes: {
+
+ // When there is no hash bang on the url, the home method is called
+ '': 'home'
+
+ },
+
+ 'home': function() {
+ // Instantiating mainView and anotherView instances
+ var headerView = new HeaderView();
+
+ // Renders the mainView template
+ headerView.render();
+
+ }
+ });
+
+ // Returns the Router class
+ return Router;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/routers/mobileRouter.js b/pyload/web/app/scripts/routers/mobileRouter.js
new file mode 100644
index 000000000..e24cb7a34
--- /dev/null
+++ b/pyload/web/app/scripts/routers/mobileRouter.js
@@ -0,0 +1,56 @@
+define(['jquery', 'backbone', 'underscore'], function($, Backbone, _) {
+ 'use strict';
+
+ return Backbone.Router.extend({
+
+ initialize: function() {
+ _.bindAll(this, 'changePage');
+
+ this.$el = $('#content');
+
+ // Tells Backbone to start watching for hashchange events
+ Backbone.history.start();
+
+ },
+
+ // All of your Backbone Routes (add more)
+ routes: {
+
+ // When there is no hash bang on the url, the home method is called
+ '': 'home'
+
+ },
+
+ 'home': function() {
+
+ var self = this;
+
+ $('#p1').fastClick(function() {
+ self.changePage($('<div class=\'page\' style=\'background-color: #9acd32;\'><h1>Page 1</h1><br>some content<br>sdfdsf<br>sdffg<h3>oiuzz</h3></div>'));
+ });
+
+ $('#p2').bind('click', function() {
+ self.changePage($('<div class=\'page\' style=\'background-color: blue;\'><h1>Page 2</h1><br>some content<br>sdfdsf<br><h2>sdfsdf</h2>sdffg</div>'));
+ });
+
+ },
+
+ changePage: function(content) {
+
+ var oldpage = this.$el.find('.page');
+ content.css({x: '100%'});
+ this.$el.append(content);
+ content.transition({x: 0}, function() {
+ window.setTimeout(function() {
+ oldpage.remove();
+ }, 400);
+ });
+
+// $("#viewport").transition({x: "100%"}, function(){
+// $("#viewport").html(content);
+// $("#viewport").transition({x: 0});
+// });
+ }
+
+ });
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/utils/animations.js b/pyload/web/app/scripts/utils/animations.js
new file mode 100644
index 000000000..7f89afef1
--- /dev/null
+++ b/pyload/web/app/scripts/utils/animations.js
@@ -0,0 +1,129 @@
+define(['jquery', 'underscore', 'transit'], function(jQuery, _) {
+ 'use strict';
+
+ // Adds an element and computes its height, which is saved as data attribute
+ // Important function to have slide animations
+ jQuery.fn.appendWithHeight = function(element, hide) {
+ var o = jQuery(this[0]);
+ element = jQuery(element);
+
+ // TODO: additionally it could be placed out of viewport first
+ // The real height can only be retrieved when element is on DOM and display:true
+ element.css('visibility', 'hidden');
+ o.append(element);
+
+ var height = element.height();
+
+ // Hide the element
+ if (hide === true) {
+ element.hide();
+ element.height(0);
+ }
+
+ element.css('visibility', '');
+ element.data('height', height);
+
+ return this;
+ };
+
+ // Shortcut to have a animation when element is added
+ jQuery.fn.appendWithAnimation = function(element, animation) {
+ var o = jQuery(this[0]);
+ element = jQuery(element);
+
+ if (animation === true)
+ element.hide();
+
+ o.append(element);
+
+ if (animation === true)
+ element.fadeIn();
+
+// element.calculateHeight();
+
+ return this;
+ };
+
+ // calculate the height and write it to data, should be used on invisible elements
+ jQuery.fn.calculateHeight = function(setHeight) {
+ var o = jQuery(this[0]);
+ var height = o.height();
+ if (!height) {
+ var display = o.css('display');
+ o.css('visibility', 'hidden');
+ o.show();
+ height = o.height();
+
+ o.css('display', display);
+ o.css('visibility', '');
+ }
+
+ if (setHeight)
+ o.css('height', height);
+
+ o.data('height', height);
+ return this;
+ };
+
+ // TODO: carry arguments, optional height argument
+
+ // reset arguments, sets overflow hidden
+ jQuery.fn.slideOut = function(reset) {
+ var o = jQuery(this[0]);
+ o.animate({height: o.data('height'), opacity: 'show'}, function() {
+ // reset css attributes;
+ if (reset) {
+ this.css('overflow', '');
+ this.css('height', '');
+ }
+ });
+ return this;
+ };
+
+ jQuery.fn.slideIn = function(reset) {
+ var o = jQuery(this[0]);
+ if (reset) {
+ o.css('overflow', 'hidden');
+ }
+ o.animate({height: 0, opacity: 'hide'});
+ return this;
+ };
+
+ jQuery.fn.initTooltips = function(placement) {
+ placement || (placement = 'top');
+
+ var o = jQuery(this[0]);
+ o.find('[data-toggle="tooltip"]').tooltip(
+ {
+ delay: {show: 800, hide: 100},
+ placement: placement
+ });
+
+ return this;
+ };
+
+ jQuery.fn._transit = jQuery.fn.transit;
+
+ // Overriding transit plugin to support hide and show
+ jQuery.fn.transit = jQuery.fn.transition = function(props, duration, easing, callback) {
+ var self = this;
+ var cb = callback;
+ var newprops = _.extend({}, props);
+
+ if (newprops && (newprops.opacity === 'hide')) {
+ newprops.opacity = 0;
+
+ callback = function() {
+ self.css({display: 'none'});
+ if (typeof cb === 'function') {
+ cb.apply(self);
+ }
+ };
+ } else if (newprops && (newprops.opacity === 'show')) {
+ newprops.opacity = 1;
+ this.css({display: 'block'});
+ }
+
+ return this._transit(newprops, duration, easing, callback);
+ };
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/utils/apitypes.js b/pyload/web/app/scripts/utils/apitypes.js
new file mode 100644
index 000000000..342f61f68
--- /dev/null
+++ b/pyload/web/app/scripts/utils/apitypes.js
@@ -0,0 +1,16 @@
+// Autogenerated, do not edit!
+/*jslint -W070: false*/
+define([], function() {
+ 'use strict';
+ return {
+ DownloadState: {'Failed': 3, 'All': 0, 'Unmanaged': 4, 'Finished': 1, 'Unfinished': 2},
+ DownloadStatus: {'Downloading': 10, 'NA': 0, 'Processing': 14, 'Waiting': 9, 'Decrypting': 13, 'Paused': 4, 'Failed': 7, 'Finished': 5, 'Skipped': 6, 'Unknown': 16, 'Aborted': 12, 'Online': 2, 'TempOffline': 11, 'Offline': 1, 'Custom': 15, 'Starting': 8, 'Queued': 3},
+ FileStatus: {'Remote': 2, 'Ok': 0, 'Missing': 1},
+ InputType: {'PluginList': 13, 'Multiple': 11, 'Int': 2, 'NA': 0, 'Time': 7, 'List': 12, 'Bool': 8, 'File': 3, 'Text': 1, 'Table': 14, 'Folder': 4, 'Password': 6, 'Click': 9, 'Select': 10, 'Textbox': 5},
+ Interaction: {'Captcha': 2, 'All': 0, 'Query': 4, 'Notification': 1},
+ MediaType: {'All': 0, 'Audio': 2, 'Image': 4, 'Other': 1, 'Video': 8, 'Document': 16, 'Archive': 32},
+ PackageStatus: {'Paused': 1, 'Remote': 3, 'Folder': 2, 'Ok': 0},
+ Permission: {'All': 0, 'Interaction': 32, 'Modify': 4, 'Add': 1, 'Accounts': 16, 'Plugins': 64, 'Download': 8, 'Delete': 2},
+ Role: {'Admin': 0, 'User': 1},
+ };
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/utils/dialogs.js b/pyload/web/app/scripts/utils/dialogs.js
new file mode 100644
index 000000000..3ceffc9c3
--- /dev/null
+++ b/pyload/web/app/scripts/utils/dialogs.js
@@ -0,0 +1,15 @@
+// Loads all helper and set own handlebars rules
+define(['jquery', 'underscore', 'views/abstract/modalView'], function($, _, Modal) {
+ 'use strict';
+
+ // Shows the confirm dialog for given context
+ // on success executes func with context
+ _.confirm = function(template, func, context) {
+ template = 'hbs!tpl/' + template;
+ _.requireOnce([template], function(html) {
+ var dialog = new Modal(html, _.bind(func, context));
+ dialog.show();
+ });
+
+ };
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/utils/i18n.js b/pyload/web/app/scripts/utils/i18n.js
new file mode 100644
index 000000000..a8d948b4a
--- /dev/null
+++ b/pyload/web/app/scripts/utils/i18n.js
@@ -0,0 +1,5 @@
+define(['jed'], function(Jed) {
+ 'use strict';
+ // TODO load i18n data
+ return new Jed({});
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/utils/lazyRequire.js b/pyload/web/app/scripts/utils/lazyRequire.js
new file mode 100644
index 000000000..96c07aa24
--- /dev/null
+++ b/pyload/web/app/scripts/utils/lazyRequire.js
@@ -0,0 +1,97 @@
+// Define the module.
+define(
+ [
+ 'require', 'underscore'
+ ],
+ function( require, _ ){
+ 'use strict';
+
+
+ // Define the states of loading for a given set of modules
+ // within a require() statement.
+ var states = {
+ unloaded: 'UNLOADED',
+ loading: 'LOADING',
+ loaded: 'LOADED'
+ };
+
+
+ // Define the top-level module container. Mostly, we're making
+ // the top-level container a non-Function so that users won't
+ // try to invoke this without calling the once() method below.
+ var lazyRequire = {};
+
+
+ // I will return a new, unique instance of the requrieOnce()
+ // method. Each instance will only call the require() method
+ // once internally.
+ lazyRequire.once = function(){
+
+ // The modules start in an unloaded state before
+ // requireOnce() is invoked by the calling code.
+ var state = states.unloaded;
+ var args;
+
+ var requireOnce = function(dependencies, loadCallback ){
+
+ // Use the module state to determine which method to
+ // invoke (or just to ignore the invocation).
+ if (state === states.loaded){
+ loadCallback.apply(null, args);
+
+ // The modules have not yet been requested - let's
+ // lazy load them.
+ } else if (state !== states.loading){
+
+ // We're about to load the modules asynchronously;
+ // flag the interim state.
+ state = states.loading;
+
+ // Load the modules.
+ require(
+ dependencies,
+ function(){
+
+ args = arguments;
+ loadCallback.apply( null, args );
+ state = states.loaded;
+
+
+ }
+ );
+
+ // RequireJS is currently loading the modules
+ // asynchronously, but they have not finished
+ // loading yet.
+ } else {
+
+ // Simply ignore this call.
+ return;
+
+ }
+
+ };
+
+ // Return the new lazy loader.
+ return( requireOnce );
+
+ };
+
+
+ // -------------------------------------------------- //
+ // -------------------------------------------------- //
+
+ // Set up holder for underscore
+ var instances = {};
+ _.requireOnce = function(dependencies, loadCallback) {
+ if (!_.has(instances, dependencies))
+ instances[dependencies] = lazyRequire.once();
+
+ return instances[dependencies](dependencies, loadCallback);
+ };
+
+
+ // Return the module definition.
+ return( lazyRequire );
+ }
+); \ No newline at end of file
diff --git a/pyload/web/app/scripts/vendor/jquery.omniwindow.js b/pyload/web/app/scripts/vendor/jquery.omniwindow.js
new file mode 100644
index 000000000..e1f0b8f77
--- /dev/null
+++ b/pyload/web/app/scripts/vendor/jquery.omniwindow.js
@@ -0,0 +1,141 @@
+// jQuery OmniWindow plugin
+// @version: 0.7.0
+// @author: Rudenka Alexander (mur.mailbox@gmail.com)
+// @license: MIT
+
+;(function($) {
+ "use strict";
+ $.fn.extend({
+ omniWindow: function(options) {
+
+ options = $.extend(true, {
+ animationsPriority: {
+ show: ['overlay', 'modal'],
+ hide: ['modal', 'overlay']
+ },
+ overlay: {
+ selector: '.ow-overlay',
+ hideClass: 'ow-closed',
+ animations: {
+ show: function(subjects, internalCallback) { return internalCallback(subjects); },
+ hide: function(subjects, internalCallback) { return internalCallback(subjects); },
+ internal: {
+ show: function(subjects){ subjects.overlay.removeClass(options.overlay.hideClass); },
+ hide: function(subjects){ subjects.overlay.addClass(options.overlay.hideClass); }
+ }
+ }
+ },
+ modal: {
+ hideClass: 'ow-closed',
+ animations: {
+ show: function(subjects, internalCallback) { return internalCallback(subjects); },
+ hide: function(subjects, internalCallback) { return internalCallback(subjects); },
+ internal: {
+ show: function(subjects){ subjects.modal.removeClass(options.modal.hideClass); },
+ hide: function(subjects){ subjects.modal.addClass(options.modal.hideClass); }
+ }
+ },
+ internal: {
+ stateAttribute: 'ow-active'
+ }
+ },
+ eventsNames: {
+ show: 'show.ow',
+ hide: 'hide.ow',
+ internal: {
+ overlayClick: 'click.ow',
+ keyboardKeyUp: 'keyup.ow'
+ }
+ },
+ callbacks: { // Callbacks execution chain
+ beforeShow: function(subjects, internalCallback) { return internalCallback(subjects); }, // 1 (stop if retruns false)
+ positioning: function(subjects, internalCallback) { return internalCallback(subjects); }, // 2
+ afterShow: function(subjects, internalCallback) { return internalCallback(subjects); }, // 3
+ beforeHide: function(subjects, internalCallback) { return internalCallback(subjects); }, // 4 (stop if retruns false)
+ afterHide: function(subjects, internalCallback) { return internalCallback(subjects); }, // 5
+ internal: {
+ beforeShow: function(subjects) {
+ if (subjects.modal.data(options.modal.internal.stateAttribute)) {
+ return false;
+ } else {
+ subjects.modal.data(options.modal.internal.stateAttribute, true);
+ return true;
+ }
+ },
+ afterShow: function(subjects) {
+ $(document).on(options.eventsNames.internal.keyboardKeyUp, function(e) {
+ if (e.keyCode === 27) { // if the key pressed is the ESC key
+ subjects.modal.trigger(options.eventsNames.hide);
+ }
+ });
+
+ subjects.overlay.on(options.eventsNames.internal.overlayClick, function(){
+ subjects.modal.trigger(options.eventsNames.hide);
+ });
+ },
+ positioning: function(subjects) {
+ subjects.modal.css('margin-left', Math.round(subjects.modal.outerWidth() / -2));
+ },
+ beforeHide: function(subjects) {
+ if (subjects.modal.data(options.modal.internal.stateAttribute)) {
+ subjects.modal.data(options.modal.internal.stateAttribute, false);
+ return true;
+ } else {
+ return false;
+ }
+ },
+ afterHide: function(subjects) {
+ subjects.overlay.off(options.eventsNames.internal.overlayClick);
+ $(document).off(options.eventsNames.internal.keyboardKeyUp);
+
+ subjects.overlay.css('display', ''); // clear inline styles after jQ animations
+ subjects.modal.css('display', '');
+ }
+ }
+ }
+ }, options);
+
+ var animate = function(process, subjects, callbackName) {
+ var first = options.animationsPriority[process][0],
+ second = options.animationsPriority[process][1];
+
+ options[first].animations[process](subjects, function(subjs) { // call USER's FIRST animation (depends on priority)
+ options[first].animations.internal[process](subjs); // call internal FIRST animation
+
+ options[second].animations[process](subjects, function(subjs) { // call USER's SECOND animation
+ options[second].animations.internal[process](subjs); // call internal SECOND animation
+
+ // then we need to call USER's
+ // afterShow of afterHide callback
+ options.callbacks[callbackName](subjects, options.callbacks.internal[callbackName]);
+ });
+ });
+ };
+
+ var showModal = function(subjects) {
+ if (!options.callbacks.beforeShow(subjects, options.callbacks.internal.beforeShow)) { return; } // cancel showing if beforeShow callback return false
+
+ options.callbacks.positioning(subjects, options.callbacks.internal.positioning);
+
+ animate('show', subjects, 'afterShow');
+ };
+
+ var hideModal = function(subjects) {
+ if (!options.callbacks.beforeHide(subjects, options.callbacks.internal.beforeHide)) { return; } // cancel hiding if beforeHide callback return false
+
+ animate('hide', subjects, 'afterHide');
+ };
+
+
+ var $overlay = $(options.overlay.selector);
+
+ return this.each(function() {
+ var $modal = $(this);
+ var subjects = {modal: $modal, overlay: $overlay};
+
+ $modal.bind(options.eventsNames.show, function(){ showModal(subjects); })
+ .bind(options.eventsNames.hide, function(){ hideModal(subjects); });
+ });
+ }
+ });
+})(jQuery); \ No newline at end of file
diff --git a/pyload/web/app/scripts/vendor/remaining.js b/pyload/web/app/scripts/vendor/remaining.js
new file mode 100644
index 000000000..d66a2931a
--- /dev/null
+++ b/pyload/web/app/scripts/vendor/remaining.js
@@ -0,0 +1,149 @@
+/**
+ * Javascript Countdown
+ * Copyright (c) 2009 Markus Hedlund
+ * Version 1.1
+ * Licensed under MIT license
+ * http://www.opensource.org/licenses/mit-license.php
+ * http://labs.mimmin.com/countdown
+ */
+define([], function() {
+ var remaining = {
+ /**
+ * Get the difference of the passed date, and now. The different formats of the taget parameter are:
+ * January 12, 2009 15:14:00 (Month dd, yyyy hh:mm:ss)
+ * January 12, 2009 (Month dd, yyyy)
+ * 09,00,12,15,14,00 (yy,mm,dd,hh,mm,ss) Months range from 0-11, not 1-12.
+ * 09,00,12 (yy,mm,dd) Months range from 0-11, not 1-12.
+ * 500 (milliseconds)
+ * 2009-01-12 15:14:00 (yyyy-mm-dd hh-mm-ss)
+ * 2009-01-12 15:14 (yyyy-mm-dd hh-mm)
+ * @param target Target date. Can be either a date object or a string (formated like '24 December, 2010 15:00:00')
+ * @return Difference in seconds
+ */
+ getSeconds: function(target) {
+ var today = new Date();
+
+ if (typeof(target) == 'object') {
+ var targetDate = target;
+ } else {
+ var matches = target.match(/(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})(:(\d{2}))?/); // YYYY-MM-DD HH-MM-SS
+ if (matches != null) {
+ matches[7] = typeof(matches[7]) == 'undefined' ? '00' : matches[7];
+ var targetDate = new Date(matches[1], matches[2] - 1, matches[3], matches[4], matches[5], matches[7]);
+ } else {
+ var targetDate = new Date(target);
+ }
+ }
+
+ return Math.floor((targetDate.getTime() - today.getTime()) / 1000);
+ },
+
+ /**
+ * @param seconds Difference in seconds
+ * @param i18n A language object (see code)
+ * @param onlyLargestUnit Return only the largest unit (see documentation)
+ * @param hideEmpty Hide empty units (see documentation)
+ * @return String formated something like '1 week, 1 hours, 1 second'
+ */
+ getString: function(seconds, i18n, onlyLargestUnit, hideEmpty) {
+ if (seconds < 1) {
+ return '';
+ }
+
+ if (typeof(hideEmpty) == 'undefined' || hideEmpty == null) {
+ hideEmpty = true;
+ }
+ if (typeof(onlyLargestUnit) == 'undefined' || onlyLargestUnit == null) {
+ onlyLargestUnit = false;
+ }
+ if (typeof(i18n) == 'undefined' || i18n == null) {
+ i18n = {
+ weeks: ['week', 'weeks'],
+ days: ['day', 'days'],
+ hours: ['hour', 'hours'],
+ minutes: ['minute', 'minutes'],
+ seconds: ['second', 'seconds']
+ };
+ }
+
+ var units = {
+ weeks: 7 * 24 * 60 * 60,
+ days: 24 * 60 * 60,
+ hours: 60 * 60,
+ minutes: 60,
+ seconds: 1
+ };
+
+ var returnArray = [];
+ var value;
+ for (unit in units) {
+ value = units[unit];
+ if (seconds / value >= 1 || unit == 'seconds' || !hideEmpty) {
+ secondsConverted = Math.floor(seconds / value);
+ var i18nUnit = i18n[unit][secondsConverted == 1 ? 0 : 1];
+ returnArray.push(secondsConverted + ' ' + i18nUnit);
+ seconds -= secondsConverted * value;
+
+ if (onlyLargestUnit) {
+ break;
+ }
+ }
+ }
+ ;
+
+ return returnArray.join(', ');
+ },
+
+ /**
+ * @param seconds Difference in seconds
+ * @return String formated something like '169:00:01'
+ */
+ getStringDigital: function(seconds) {
+ if (seconds < 1) {
+ return '';
+ }
+
+ remainingTime = remaining.getArray(seconds);
+
+ for (index in remainingTime) {
+ remainingTime[index] = remaining.padNumber(remainingTime[index]);
+ }
+ ;
+
+ return remainingTime.join(':');
+ },
+
+ /**
+ * @param seconds Difference in seconds
+ * @return Array with hours, minutes and seconds
+ */
+ getArray: function(seconds) {
+ if (seconds < 1) {
+ return [];
+ }
+
+ var units = [60 * 60, 60, 1];
+
+ var returnArray = [];
+ var value;
+ for (index in units) {
+ value = units[index];
+ secondsConverted = Math.floor(seconds / value);
+ returnArray.push(secondsConverted);
+ seconds -= secondsConverted * value;
+ }
+ ;
+
+ return returnArray;
+ },
+
+ /**
+ * @param number An integer
+ * @return Integer padded with a 0 if necessary
+ */
+ padNumber: function(number) {
+ return (number >= 0 && number < 10) ? '0' + number : number;
+ }
+ };
+ return remaining;
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/abstract/itemView.js b/pyload/web/app/scripts/views/abstract/itemView.js
new file mode 100644
index 000000000..c37118a4c
--- /dev/null
+++ b/pyload/web/app/scripts/views/abstract/itemView.js
@@ -0,0 +1,47 @@
+define(['jquery', 'backbone', 'underscore'], function($, Backbone, _) {
+ 'use strict';
+
+ // A view that is meant for temporary displaying
+ // All events must be unbound in onDestroy
+ return Backbone.View.extend({
+
+ tagName: 'li',
+ destroy: function() {
+ this.undelegateEvents();
+ this.unbind();
+ if (this.onDestroy) {
+ this.onDestroy();
+ }
+ this.$el.removeData().unbind();
+ this.remove();
+ },
+
+ hide: function() {
+ this.$el.slideUp();
+ },
+
+ show: function() {
+ this.$el.slideDown();
+ },
+
+ unrender: function() {
+ var self = this;
+ this.$el.slideUp(function() {
+ self.destroy();
+ });
+ },
+
+ deleteItem: function(e) {
+ if (e)
+ e.stopPropagation();
+ this.model.destroy();
+ },
+
+ restart: function(e) {
+ if(e)
+ e.stopPropagation();
+ this.model.restart();
+ }
+
+ });
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/abstract/modalView.js b/pyload/web/app/scripts/views/abstract/modalView.js
new file mode 100644
index 000000000..65bc0a3c8
--- /dev/null
+++ b/pyload/web/app/scripts/views/abstract/modalView.js
@@ -0,0 +1,124 @@
+define(['jquery', 'backbone', 'underscore', 'omniwindow'], function($, Backbone, _) {
+ 'use strict';
+
+ return Backbone.View.extend({
+
+ events: {
+ 'click .btn-confirm': 'confirm',
+ 'click .btn-close': 'hide',
+ 'click .close': 'hide'
+ },
+
+ template: null,
+ dialog: null,
+
+ onHideDestroy: false,
+ confirmCallback: null,
+
+ initialize: function(template, confirm) {
+ this.confirmCallback = confirm;
+ var self = this;
+ if (this.template === null) {
+ if (template) {
+ this.template = template;
+ // When template was provided this is a temporary dialog
+ this.onHideDestroy = true;
+ }
+ else
+ require(['hbs!tpl/dialogs/modal'], function(template) {
+ self.template = template;
+ });
+ }
+ },
+
+ // TODO: whole modal stuff is not very elegant
+ render: function() {
+ this.$el.html(this.template(this.renderContent()));
+ this.onRender();
+
+ if (this.dialog === null) {
+ this.$el.addClass('modal hide');
+ this.$el.css({opacity: 0, scale: 0.7});
+
+ var self = this;
+ $('body').append(this.el);
+ this.dialog = this.$el.omniWindow({
+ overlay: {
+ selector: '#modal-overlay',
+ hideClass: 'hide',
+ animations: {
+ hide: function(subjects, internalCallback) {
+ subjects.overlay.transition({opacity: 'hide', delay: 100}, 300, function() {
+ internalCallback(subjects);
+ self.onHide();
+ if (self.onHideDestroy)
+ self.destroy();
+ });
+ },
+ show: function(subjects, internalCallback) {
+ subjects.overlay.fadeIn(300);
+ internalCallback(subjects);
+ }}},
+ modal: {
+ hideClass: 'hide',
+ animations: {
+ hide: function(subjects, internalCallback) {
+ subjects.modal.transition({opacity: 'hide', scale: 0.7}, 300);
+ internalCallback(subjects);
+ },
+
+ show: function(subjects, internalCallback) {
+ subjects.modal.transition({opacity: 'show', scale: 1, delay: 100}, 300, function() {
+ internalCallback(subjects);
+ });
+ }}
+ }});
+ }
+
+ return this;
+ },
+
+ onRender: function() {
+
+ },
+
+ renderContent: function() {
+ return {};
+ },
+
+ show: function() {
+ if (this.dialog === null)
+ this.render();
+
+ this.dialog.trigger('show');
+
+ this.onShow();
+ },
+
+ onShow: function() {
+
+ },
+
+ hide: function() {
+ this.dialog.trigger('hide');
+ },
+
+ onHide: function() {
+
+ },
+
+ confirm: function() {
+ if (this.confirmCallback)
+ this.confirmCallback.apply();
+
+ this.hide();
+ },
+
+ destroy: function() {
+ this.$el.remove();
+ this.dialog = null;
+ this.remove();
+ }
+
+ });
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/accounts/accountListView.js b/pyload/web/app/scripts/views/accounts/accountListView.js
new file mode 100644
index 000000000..4eb5bfe7d
--- /dev/null
+++ b/pyload/web/app/scripts/views/accounts/accountListView.js
@@ -0,0 +1,52 @@
+define(['jquery', 'underscore', 'backbone', 'app', 'collections/AccountList', './accountView',
+ 'hbs!tpl/accounts/layout', 'hbs!tpl/accounts/actionbar'],
+ function($, _, Backbone, App, AccountList, accountView, template, templateBar) {
+ 'use strict';
+
+ // Renders settings over view page
+ return Backbone.Marionette.CollectionView.extend({
+
+ itemView: accountView,
+ template: template,
+
+ collection: null,
+ modal: null,
+
+ initialize: function() {
+ this.actionbar = Backbone.Marionette.ItemView.extend({
+ template: templateBar,
+ events: {
+ 'click .btn': 'addAccount'
+ },
+ addAccount: _.bind(this.addAccount, this)
+ });
+
+ this.collection = new AccountList();
+ this.update();
+
+ this.listenTo(App.vent, 'accounts:updated', this.update);
+ },
+
+ update: function() {
+ this.collection.fetch();
+ },
+
+ onBeforeRender: function() {
+ this.$el.html(template());
+ },
+
+ appendHtml: function(collectionView, itemView, index) {
+ this.$('.account-list').append(itemView.el);
+ },
+
+ addAccount: function() {
+ var self = this;
+ _.requireOnce(['views/accounts/accountModal'], function(Modal) {
+ if (self.modal === null)
+ self.modal = new Modal();
+
+ self.modal.show();
+ });
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/accounts/accountModal.js b/pyload/web/app/scripts/views/accounts/accountModal.js
new file mode 100644
index 000000000..6c2b226df
--- /dev/null
+++ b/pyload/web/app/scripts/views/accounts/accountModal.js
@@ -0,0 +1,72 @@
+define(['jquery', 'underscore', 'app', 'views/abstract/modalView', 'hbs!tpl/dialogs/addAccount', 'helpers/pluginIcon', 'select2'],
+ function($, _, App, modalView, template, pluginIcon) {
+ 'use strict';
+ return modalView.extend({
+
+ events: {
+ 'submit form': 'add',
+ 'click .btn-add': 'add'
+ },
+ template: template,
+ plugins: null,
+ select: null,
+
+ initialize: function() {
+ // Inherit parent events
+ this.events = _.extend({}, modalView.prototype.events, this.events);
+ var self = this;
+ $.ajax(App.apiRequest('getAccountTypes', null, {success: function(data) {
+ self.plugins = _.sortBy(data, function(item) {
+ return item;
+ });
+ self.render();
+ }}));
+ },
+
+ onRender: function() {
+ // TODO: could be a separate input type if needed on multiple pages
+ if (this.plugins)
+ this.select = this.$('#pluginSelect').select2({
+ escapeMarkup: function(m) {
+ return m;
+ },
+ formatResult: this.format,
+ formatSelection: this.format,
+ data: {results: this.plugins, text: function(item) {
+ return item;
+ }},
+ id: function(item) {
+ return item;
+ }
+ });
+ },
+
+ onShow: function() {
+ },
+
+ onHide: function() {
+ },
+
+ format: function(data) {
+ return '<img class="logo-select" src="' + pluginIcon(data) + '"> ' + data;
+ },
+
+ add: function(e) {
+ e.stopPropagation();
+ if (this.select) {
+ var plugin = this.select.val(),
+ login = this.$('#login').val(),
+ password = this.$('#password').val(),
+ self = this;
+
+ $.ajax(App.apiRequest('updateAccount', {
+ plugin: plugin, login: login, password: password
+ }, { success: function() {
+ App.vent.trigger('accounts:updated');
+ self.hide();
+ }}));
+ }
+ return false;
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/accounts/accountView.js b/pyload/web/app/scripts/views/accounts/accountView.js
new file mode 100644
index 000000000..89f69d7e7
--- /dev/null
+++ b/pyload/web/app/scripts/views/accounts/accountView.js
@@ -0,0 +1,18 @@
+define(['jquery', 'underscore', 'backbone', 'app', 'hbs!tpl/accounts/account'],
+ function($, _, Backbone, App, template) {
+ 'use strict';
+
+ return Backbone.Marionette.ItemView.extend({
+
+ tagName: 'tr',
+ template: template,
+
+ events: {
+ 'click .btn-danger': 'deleteAccount'
+ },
+
+ deleteAccount: function() {
+ this.model.destroy();
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/dashboard/dashboardView.js b/pyload/web/app/scripts/views/dashboard/dashboardView.js
new file mode 100644
index 000000000..8a0446203
--- /dev/null
+++ b/pyload/web/app/scripts/views/dashboard/dashboardView.js
@@ -0,0 +1,172 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'models/TreeCollection', 'collections/FileList',
+ './packageView', './fileView', 'hbs!tpl/dashboard/layout', 'select2'],
+ function($, Backbone, _, App, TreeCollection, FileList, PackageView, FileView, template) {
+ 'use strict';
+ // Renders whole dashboard
+ return Backbone.Marionette.ItemView.extend({
+
+ template: template,
+
+ events: {
+ },
+
+ ui: {
+ 'packages': '.package-list',
+ 'files': '.file-list'
+ },
+
+ // Package tree
+ tree: null,
+ // Current open files
+ files: null,
+ // True when loading animation is running
+ isLoading: false,
+
+ initialize: function() {
+ App.dashboard = this;
+ this.tree = new TreeCollection();
+
+ var self = this;
+ // When package is added we reload the data
+ App.vent.on('package:added', function() {
+ console.log('Package tree caught, package:added event');
+ self.tree.fetch();
+ });
+
+ App.vent.on('file:updated', _.bind(this.fileUpdated, this));
+
+ // TODO: merge?
+ this.init();
+ // TODO: file:added
+ // TODO: package:deleted
+ // TODO: package:updated
+ },
+
+ init: function() {
+ var self = this;
+ // TODO: put in separated function
+ // TODO: order of elements?
+ // Init the tree and callback for package added
+ this.tree.fetch({success: function() {
+ self.update();
+ self.tree.get('packages').on('add', function(pack) {
+ console.log('Package ' + pack.get('pid') + ' added to tree');
+ self.appendPackage(pack, 0, true);
+ self.openPackage(pack);
+ });
+ }});
+
+ this.$('.input').select2({tags: ['a', 'b', 'sdf']});
+ },
+
+ update: function() {
+ console.log('Update package list');
+
+ var packs = this.tree.get('packages');
+ this.files = this.tree.get('files');
+
+ if (packs)
+ packs.each(_.bind(this.appendPackage, this));
+
+ if (!this.files || this.files.length === 0) {
+ // no files are displayed
+ this.files = null;
+ // Open the first package
+ if (packs && packs.length >= 1)
+ this.openPackage(packs.at(0));
+ }
+ else
+ this.files.each(_.bind(this.appendFile, this));
+
+ return this;
+ },
+
+ // TODO sorting ?!
+ // Append a package to the list, index, animate it
+ appendPackage: function(pack, i, animation) {
+ var el = new PackageView({model: pack}).render().el;
+ $(this.ui.packages).appendWithAnimation(el, animation);
+ },
+
+ appendFile: function(file, i, animation) {
+ var el = new FileView({model: file}).render().el;
+ $(this.ui.files).appendWithAnimation(el, animation);
+ },
+
+ // Show content of the packages on main view
+ openPackage: function(pack) {
+ var self = this;
+
+ // load animation only when something is shown and its different from current package
+ if (this.files && this.files !== pack.get('files'))
+ self.loading();
+
+ pack.fetch({silent: true, success: function() {
+ console.log('Package ' + pack.get('pid') + ' loaded');
+ self.contentReady(pack.get('files'));
+ }, failure: function() {
+ self.failure();
+ }});
+
+ },
+
+ contentReady: function(files) {
+ var old_files = this.files;
+ this.files = files;
+ App.vent.trigger('dashboard:contentReady');
+
+ // show the files when no loading animation is running and not already open
+ if (!this.isLoading && old_files !== files)
+ this.show();
+ },
+
+ // Do load animation, remove the old stuff
+ loading: function() {
+ this.isLoading = true;
+ this.files = null;
+ var self = this;
+ $(this.ui.files).fadeOut({complete: function() {
+ // All file views should vanish
+ App.vent.trigger('dashboard:destroyContent');
+
+ // Loading was faster than animation
+ if (self.files)
+ self.show();
+
+ self.isLoading = false;
+ }});
+ },
+
+ failure: function() {
+ // TODO
+ },
+
+ show: function() {
+ // fileUL has to be resetted before
+ this.files.each(_.bind(this.appendFile, this));
+ //TODO: show placeholder when nothing is displayed (filtered content empty)
+ $(this.ui.files).fadeIn();
+ App.vent.trigger('dashboard:updated');
+ },
+
+ // Refresh the file if it is currently shown
+ fileUpdated: function(data) {
+ var fid;
+ if (_.isObject(data))
+ fid = data.fid;
+ else
+ fid = data;
+ // this works with ids and object TODO: not anymore
+ var file = this.files.get(fid);
+ if (file)
+ if (_.isObject(data)) { // update directly
+ file.set(data);
+ App.vent.trigger('dashboard:updated');
+ } else { // fetch from server
+ file.fetch({success: function() {
+ App.vent.trigger('dashboard:updated');
+ }});
+ }
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/dashboard/fileView.js b/pyload/web/app/scripts/views/dashboard/fileView.js
new file mode 100644
index 000000000..ce91a5f38
--- /dev/null
+++ b/pyload/web/app/scripts/views/dashboard/fileView.js
@@ -0,0 +1,103 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes', 'views/abstract/itemView', 'helpers/formatTime', 'hbs!tpl/dashboard/file'],
+ function($, Backbone, _, App, Api, ItemView, formatTime, template) {
+ 'use strict';
+
+ // Renders single file item
+ return ItemView.extend({
+
+ tagName: 'li',
+ className: 'file-view row-fluid',
+ template: template,
+ events: {
+ 'click .checkbox': 'select',
+ 'click .btn-delete': 'deleteItem',
+ 'click .btn-restart': 'restart'
+ },
+
+ initialize: function() {
+ this.listenTo(this.model, 'change', this.render);
+ // This will be triggered manually and changed before with silent=true
+ this.listenTo(this.model, 'change:visible', this.visibility_changed);
+ this.listenTo(this.model, 'change:progress', this.progress_changed);
+ this.listenTo(this.model, 'remove', this.unrender);
+ this.listenTo(App.vent, 'dashboard:destroyContent', this.destroy);
+ },
+
+ onDestroy: function() {
+ },
+
+ render: function() {
+ var data = this.model.toJSON();
+ if (data.download) {
+ var status = data.download.status;
+ if (status === Api.DownloadStatus.Offline || status === Api.DownloadStatus.TempOffline)
+ data.offline = true;
+ else if (status === Api.DownloadStatus.Online)
+ data.online = true;
+ else if (status === Api.DownloadStatus.Waiting)
+ data.waiting = true;
+ else if (status === Api.DownloadStatus.Downloading)
+ data.downloading = true;
+ else if (this.model.isFailed())
+ data.failed = true;
+ else if (this.model.isFinished())
+ data.finished = true;
+ }
+
+ this.$el.html(this.template(data));
+ if (this.model.get('selected'))
+ this.$el.addClass('ui-selected');
+ else
+ this.$el.removeClass('ui-selected');
+
+ if (this.model.get('visible'))
+ this.$el.show();
+ else
+ this.$el.hide();
+
+ return this;
+ },
+
+ select: function(e) {
+ e.preventDefault();
+ var checked = this.$el.hasClass('ui-selected');
+ // toggle class immediately, so no re-render needed
+ this.model.set('selected', !checked, {silent: true});
+ this.$el.toggleClass('ui-selected');
+ App.vent.trigger('file:selection');
+ },
+
+ visibility_changed: function(visible) {
+ // TODO: improve animation, height is not available when element was not visible
+ if (visible)
+ this.$el.slideOut(true);
+ else {
+ this.$el.calculateHeight(true);
+ this.$el.slideIn(true);
+ }
+ },
+
+ progress_changed: function() {
+ // TODO: progress for non download statuses
+ if (!this.model.isDownload())
+ return;
+
+ if (this.model.get('download').status === Api.DownloadStatus.Downloading) {
+ var bar = this.$('.progress .bar');
+ if (!bar) { // ensure that the dl bar is rendered
+ this.render();
+ bar = this.$('.progress .bar');
+ }
+
+ bar.width(this.model.get('progress') + '%');
+ bar.html('&nbsp;&nbsp;' + formatTime(this.model.get('eta')));
+ } else if (this.model.get('download').status === Api.DownloadStatus.Waiting) {
+ this.$('.second').html(
+ '<i class="icon-time"></i>&nbsp;' + formatTime(this.model.get('eta')));
+
+ } else // Every else state can be rendered normally
+ this.render();
+
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/dashboard/filterView.js b/pyload/web/app/scripts/views/dashboard/filterView.js
new file mode 100644
index 000000000..ad72cf926
--- /dev/null
+++ b/pyload/web/app/scripts/views/dashboard/filterView.js
@@ -0,0 +1,147 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes', 'models/Package', 'hbs!tpl/dashboard/actionbar'],
+ /*jslint -W040: false*/
+ function($, Backbone, _, App, Api, Package, template) {
+ 'use strict';
+
+ // Modified version of type ahead show, nearly the same without absolute positioning
+ function show() {
+ this.$menu
+ .insertAfter(this.$element)
+ .show();
+
+ this.shown = true;
+ return this;
+ }
+
+ // Renders the actionbar for the dashboard, handles everything related to filtering displayed files
+ return Backbone.Marionette.ItemView.extend({
+
+ events: {
+ 'click .li-check': 'toggle_selection',
+ 'click .filter-type': 'filter_type',
+ 'click .filter-state': 'switch_filter',
+ 'submit .form-search': 'search'
+ },
+
+ ui: {
+ 'search': '.search-query',
+ 'stateMenu': '.dropdown-toggle .state',
+ 'select': '.btn-check',
+ 'name': '.breadcrumb .active'
+ },
+
+ template: template,
+ state: null,
+
+ initialize: function() {
+ this.state = Api.DownloadState.All;
+
+ // Apply the filter before the content is shown
+ this.listenTo(App.vent, 'dashboard:contentReady', this.apply_filter);
+ this.listenTo(App.vent, 'dashboard:updated', this.apply_filter);
+ this.listenTo(App.vent, 'dashboard:updated', this.updateName);
+ },
+
+ onRender: function() {
+ // use our modified method
+ $.fn.typeahead.Constructor.prototype.show = show;
+ this.ui.search.typeahead({
+ minLength: 2,
+ source: this.getSuggestions
+ });
+
+ },
+
+ // TODO: app level api request
+ search: function(e) {
+ e.stopPropagation();
+ var query = this.ui.search.val();
+ this.ui.search.val('');
+
+ var pack = new Package();
+ // Overwrite fetch method to use a search
+ // TODO: quite hackish, could be improved to filter packages
+ // or show performed search
+ pack.fetch = function(options) {
+ pack.search(query, options);
+ };
+
+ App.dashboard.openPackage(pack);
+ },
+
+ getSuggestions: function(query, callback) {
+ $.ajax(App.apiRequest('searchSuggestions', {pattern: query}, {
+ method: 'POST',
+ success: function(data) {
+ callback(data);
+ }
+ }));
+ },
+
+ switch_filter: function(e) {
+ e.stopPropagation();
+ var element = $(e.target);
+ var state = parseInt(element.data('state'), 10);
+ var menu = this.ui.stateMenu.parent().parent();
+ menu.removeClass('open');
+
+ if (state === Api.DownloadState.Finished) {
+ menu.removeClass().addClass('dropdown finished');
+ } else if (state === Api.DownloadState.Unfinished) {
+ menu.removeClass().addClass('dropdown active');
+ } else if (state === Api.DownloadState.Failed) {
+ menu.removeClass().addClass('dropdown failed');
+ } else {
+ menu.removeClass().addClass('dropdown');
+ }
+
+ this.state = state;
+ this.ui.stateMenu.text(element.text());
+ this.apply_filter();
+ },
+
+ // Applies the filtering to current open files
+ apply_filter: function() {
+ if (!App.dashboard.files)
+ return;
+
+ var self = this;
+ App.dashboard.files.map(function(file) {
+ var visible = file.get('visible');
+ if (visible !== self.is_visible(file)) {
+ file.set('visible', !visible, {silent: true});
+ file.trigger('change:visible', !visible);
+ }
+ });
+
+ App.vent.trigger('dashboard:filtered');
+ },
+
+ // determine if a file should be visible
+ // TODO: non download files
+ is_visible: function(file) {
+ if (this.state === Api.DownloadState.Finished)
+ return file.isFinished();
+ else if (this.state === Api.DownloadState.Unfinished)
+ return file.isUnfinished();
+ else if (this.state === Api.DownloadState.Failed)
+ return file.isFailed();
+
+ return true;
+ },
+
+ updateName: function() {
+ // TODO
+// this.ui.name.text(App.dashboard.package.get('name'));
+ },
+
+ toggle_selection: function() {
+ App.vent.trigger('selection:toggle');
+ },
+
+ filter_type: function(e) {
+
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/dashboard/packageView.js b/pyload/web/app/scripts/views/dashboard/packageView.js
new file mode 100644
index 000000000..2738fcbea
--- /dev/null
+++ b/pyload/web/app/scripts/views/dashboard/packageView.js
@@ -0,0 +1,75 @@
+define(['jquery', 'app', 'views/abstract/itemView', 'underscore', 'hbs!tpl/dashboard/package'],
+ function($, App, itemView, _, template) {
+ 'use strict';
+
+ // Renders a single package item
+ return itemView.extend({
+
+ tagName: 'li',
+ className: 'package-view',
+ template: template,
+ events: {
+ 'click .package-name, .btn-open': 'open',
+ 'click .icon-refresh': 'restart',
+ 'click .select': 'select',
+ 'click .btn-delete': 'deleteItem'
+ },
+
+ // Ul for child packages (unused)
+ ul: null,
+ // Currently unused
+ expanded: false,
+
+ initialize: function() {
+ this.listenTo(this.model, 'filter:added', this.hide);
+ this.listenTo(this.model, 'filter:removed', this.show);
+ this.listenTo(this.model, 'change', this.render);
+ this.listenTo(this.model, 'remove', this.unrender);
+
+ // Clear drop down menu
+ var self = this;
+ this.$el.on('mouseleave', function() {
+ self.$('.dropdown-menu').parent().removeClass('open');
+ });
+ },
+
+ onDestroy: function() {
+ },
+
+ // Render everything, optional only the fileViews
+ render: function() {
+ this.$el.html(this.template(this.model.toJSON()));
+ this.$el.initTooltips();
+
+ return this;
+ },
+
+ unrender: function() {
+ itemView.prototype.unrender.apply(this);
+
+ // TODO: display other package
+ App.vent.trigger('dashboard:loading', null);
+ },
+
+
+ // TODO
+ // Toggle expanding of packages
+ expand: function(e) {
+ e.preventDefault();
+ },
+
+ open: function(e) {
+ e.preventDefault();
+ App.dashboard.openPackage(this.model);
+ },
+
+ select: function(e) {
+ e.preventDefault();
+ var checked = this.$('.select').hasClass('icon-check');
+ // toggle class immediately, so no re-render needed
+ this.model.set('selected', !checked, {silent: true});
+ this.$('.select').toggleClass('icon-check').toggleClass('icon-check-empty');
+ App.vent.trigger('package:selection');
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/dashboard/selectionView.js b/pyload/web/app/scripts/views/dashboard/selectionView.js
new file mode 100644
index 000000000..25b7998df
--- /dev/null
+++ b/pyload/web/app/scripts/views/dashboard/selectionView.js
@@ -0,0 +1,154 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'hbs!tpl/dashboard/select'],
+ function($, Backbone, _, App, template) {
+ 'use strict';
+
+ // Renders context actions for selection packages and files
+ return Backbone.Marionette.ItemView.extend({
+
+ el: '#selection-area',
+ template: template,
+
+ events: {
+ 'click .icon-check': 'deselect',
+ 'click .icon-pause': 'pause',
+ 'click .icon-trash': 'trash',
+ 'click .icon-refresh': 'restart'
+ },
+
+ // Element of the action bar
+ actionBar: null,
+ // number of currently selected elements
+ current: 0,
+
+ initialize: function() {
+ this.$el.calculateHeight().height(0);
+ var render = _.bind(this.render, this);
+
+ App.vent.on('dashboard:updated', render);
+ App.vent.on('dashboard:filtered', render);
+ App.vent.on('package:selection', render);
+ App.vent.on('file:selection', render);
+ App.vent.on('selection:toggle', _.bind(this.select_toggle, this));
+
+
+ // API events, maybe better to rely on internal ones?
+ App.vent.on('package:deleted', render);
+ App.vent.on('file:deleted', render);
+ },
+
+ get_files: function(all) {
+ var files = [];
+ if (App.dashboard.files)
+ if (all)
+ files = App.dashboard.files.where({visible: true});
+ else
+ files = App.dashboard.files.where({selected: true, visible: true});
+
+ return files;
+ },
+
+ get_packs: function() {
+ if (!App.dashboard.tree.get('packages'))
+ return []; // TODO
+
+ return App.dashboard.tree.get('packages').where({selected: true});
+ },
+
+ render: function() {
+ var files = this.get_files().length;
+ var packs = this.get_packs().length;
+
+ if (files + packs > 0) {
+ this.$el.html(this.template({files: files, packs: packs}));
+ this.$el.initTooltips('bottom');
+ }
+
+ if (files + packs > 0 && this.current === 0)
+ this.$el.slideOut();
+ else if (files + packs === 0 && this.current > 0)
+ this.$el.slideIn();
+
+ // TODO: accessing ui directly, should be events
+ if (files > 0) {
+ App.actionbar.currentView.ui.select.addClass('icon-check').removeClass('icon-check-empty');
+ App.dashboard.ui.packages.addClass('ui-files-selected');
+ }
+ else {
+ App.actionbar.currentView.ui.select.addClass('icon-check-empty').removeClass('icon-check');
+ App.dashboard.ui.packages.removeClass('ui-files-selected');
+ }
+
+ this.current = files + packs;
+ },
+
+ // Deselects all items
+ deselect: function() {
+ this.get_files().map(function(file) {
+ file.set('selected', false);
+ });
+
+ this.get_packs().map(function(pack) {
+ pack.set('selected', false);
+ });
+
+ this.render();
+ },
+
+ pause: function() {
+ alert('Not implemented yet');
+ this.deselect();
+ },
+
+ trash: function() {
+ _.confirm('dialogs/confirmDelete', function() {
+
+ var pids = [];
+ // TODO: delete many at once
+ this.get_packs().map(function(pack) {
+ pids.push(pack.get('pid'));
+ pack.destroy();
+ });
+
+ // get only the fids of non deleted packages
+ var fids = _.filter(this.get_files(),function(file) {
+ return !_.contains(pids, file.get('package'));
+ }).map(function(file) {
+ file.destroyLocal();
+ return file.get('fid');
+ });
+
+ if (fids.length > 0)
+ $.ajax(App.apiRequest('deleteFiles', {fids: fids}));
+
+ this.deselect();
+ }, this);
+ },
+
+ restart: function() {
+ this.get_files().map(function(file) {
+ file.restart();
+ });
+ this.get_packs().map(function(pack) {
+ pack.restart();
+ });
+
+ this.deselect();
+ },
+
+ // Select or deselect all visible files
+ select_toggle: function() {
+ var files = this.get_files();
+ if (files.length === 0) {
+ this.get_files(true).map(function(file) {
+ file.set('selected', true);
+ });
+
+ } else
+ files.map(function(file) {
+ file.set('selected', false);
+ });
+
+ this.render();
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/headerView.js b/pyload/web/app/scripts/views/headerView.js
new file mode 100644
index 000000000..2c83fb381
--- /dev/null
+++ b/pyload/web/app/scripts/views/headerView.js
@@ -0,0 +1,252 @@
+define(['jquery', 'underscore', 'backbone', 'app', 'models/ServerStatus', 'collections/ProgressList',
+ 'views/progressView', 'views/notificationView', 'helpers/formatSize', 'hbs!tpl/header/layout',
+ 'hbs!tpl/header/status', 'hbs!tpl/header/progressbar', 'hbs!tpl/header/progressSup', 'hbs!tpl/header/progressSub' , 'flot'],
+ function(
+ $, _, Backbone, App, ServerStatus, ProgressList, ProgressView, NotificationView, formatSize, template, templateStatus, templateProgress, templateSup, templateSub) {
+ 'use strict';
+ // Renders the header with all information
+ return Backbone.Marionette.ItemView.extend({
+
+ events: {
+ 'click .icon-list': 'toggle_taskList',
+ 'click .popover .close': 'toggle_taskList',
+ 'click .btn-grabber': 'open_grabber',
+ 'click .logo': 'gotoDashboard'
+ },
+
+ ui: {
+ progress: '.progress-list',
+ speedgraph: '#speedgraph'
+ },
+
+ template: template,
+
+ // view
+ grabber: null,
+ speedgraph: null,
+
+ // models and data
+ ws: null,
+ status: null,
+ progressList: null,
+ speeds: null,
+
+ // sub view
+ notificationView: null,
+
+ // save if last progress was empty
+ wasEmpty: false,
+ lastStatus: null,
+
+ initialize: function() {
+ var self = this;
+ this.notificationView = new NotificationView();
+
+ this.status = new ServerStatus();
+ this.listenTo(this.status, 'change', this.update);
+
+ this.progressList = new ProgressList();
+ this.listenTo(this.progressList, 'add', function(model) {
+ self.ui.progress.appendWithAnimation(new ProgressView({model: model}).render().el);
+ });
+
+ // TODO: button to start stop refresh
+ var ws = App.openWebSocket('/async');
+ ws.onopen = function() {
+ ws.send(JSON.stringify('start'));
+ };
+ // TODO compare with polling
+ ws.onmessage = _.bind(this.onData, this);
+ ws.onerror = function(error) {
+ console.log(error);
+ alert('WebSocket error' + error);
+ };
+
+ this.ws = ws;
+ },
+
+ gotoDashboard: function() {
+ App.navigate('');
+ },
+
+ initGraph: function() {
+ var totalPoints = 120;
+ var data = [];
+
+ // init with empty data
+ while (data.length < totalPoints)
+ data.push([data.length, 0]);
+
+ this.speeds = data;
+ this.speedgraph = $.plot(this.ui.speedgraph, [this.speeds], {
+ series: {
+ lines: { show: true, lineWidth: 2 },
+ shadowSize: 0,
+ color: '#fee247'
+ },
+ xaxis: { ticks: [] },
+ yaxis: { ticks: [], min: 1, autoscaleMargin: 0.1, tickFormatter: function(data) {
+ return formatSize(data * 1024);
+ }, position: 'right' },
+ grid: {
+ show: true,
+// borderColor: "#757575",
+ borderColor: 'white',
+ borderWidth: 1,
+ labelMargin: 0,
+ axisMargin: 0,
+ minBorderMargin: 0
+ }
+ });
+
+ },
+
+ // Must be called after view was attached
+ init: function() {
+ this.initGraph();
+ this.update();
+ },
+
+ update: function() {
+ // TODO: what should be displayed in the header
+ // queue/processing size?
+
+ var status = this.status.toJSON();
+ status.maxspeed = _.max(this.speeds, function(speed) {
+ return speed[1];
+ })[1] * 1024;
+ this.$('.status-block').html(
+ templateStatus(status)
+ );
+
+ var data = {tasks: 0, downloads: 0, speed: 0, single: false};
+ this.progressList.each(function(progress) {
+ if (progress.isDownload()) {
+ data.downloads++;
+ data.speed += progress.get('download').speed;
+ } else
+ data.tasks++;
+ });
+
+ // Show progress of one task
+ if (data.tasks + data.downloads === 1) {
+ var progress = this.progressList.at(0);
+ data.single = true;
+ data.eta = progress.get('eta');
+ data.percent = progress.getPercent();
+ data.name = progress.get('name');
+ data.statusmsg = progress.get('statusmsg');
+ }
+
+ data.etaqueue = status.eta;
+ data.linksqueue = status.linksqueue;
+ data.sizequeue = status.sizequeue;
+
+ // Render progressbar only when needed
+ if (!_.isEqual([data.tasks, data.downloads], this.lastStatus)) {
+ console.log('render bar');
+ this.lastStatus = [data.tasks, data.downloads];
+ this.$('#progress-info').html(templateProgress(data));
+ } else {
+ this.$('#progress-info .bar').width(data.percent + '%');
+ }
+
+ // render upper and lower part
+ this.$('.sup').html(templateSup(data));
+ this.$('.sub').html(templateSub(data));
+
+ return this;
+ },
+
+ toggle_taskList: function() {
+ this.$('.popover').animate({opacity: 'toggle'});
+ },
+
+ open_grabber: function() {
+ var self = this;
+ _.requireOnce(['views/linkGrabberModal'], function(ModalView) {
+ if (self.grabber === null)
+ self.grabber = new ModalView();
+
+ self.grabber.show();
+ });
+ },
+
+ onData: function(evt) {
+ var data = JSON.parse(evt.data);
+ if (data === null) return;
+
+ if (data['@class'] === 'ServerStatus') {
+ this.status.set(data);
+
+ // There tasks at the server, but not in queue: so fetch them
+ // or there are tasks in our queue but not on the server
+ if (this.status.get('notifications') && !this.notificationView.tasks.hasTaskWaiting() ||
+ !this.status.get('notifications') && this.notificationView.tasks.hasTaskWaiting())
+ this.notificationView.tasks.fetch();
+
+ this.speeds = this.speeds.slice(1);
+ this.speeds.push([this.speeds[this.speeds.length - 1][0] + 1, Math.floor(data.speed / 1024)]);
+
+ // TODO: if everything is 0 re-render is not needed
+ this.speedgraph.setData([this.speeds]);
+ // adjust the axis
+ this.speedgraph.setupGrid();
+ this.speedgraph.draw();
+
+ }
+ else if (_.isArray(data))
+ this.onProgressUpdate(data);
+ else if (data['@class'] === 'EventInfo')
+ this.onEvent(data.eventname, data.event_args);
+ else
+ console.log('Unknown Async input', data);
+
+ },
+
+ onProgressUpdate: function(progress) {
+ // generate a unique id
+ _.each(progress, function(prog) {
+ if (prog.download)
+ prog.pid = prog.download.fid;
+ else
+ prog.pid = prog.plugin + prog.name;
+ });
+
+ this.progressList.set(progress);
+ // update currently open files with progress
+ this.progressList.each(function(prog) {
+ if (prog.isDownload() && App.dashboard.files) {
+ var file = App.dashboard.files.get(prog.get('download').fid);
+ if (file) {
+ file.set({
+ progress: prog.getPercent(),
+ eta: prog.get('eta'),
+ size: prog.get('total')
+ }, {silent: true});
+ file.setDownloadStatus(prog.get('download').status);
+ file.trigger('change:progress');
+ }
+ }
+ });
+
+ if (progress.length === 0) {
+ // only render one time when last was not empty already
+ if (!this.wasEmpty) {
+ this.update();
+ this.wasEmpty = true;
+ }
+ } else {
+ this.wasEmpty = false;
+ this.update();
+ }
+ },
+
+ onEvent: function(event, args) {
+ args.unshift(event);
+ console.log('Core send event', args);
+ App.vent.trigger.apply(App.vent, args);
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/input/inputLoader.js b/pyload/web/app/scripts/views/input/inputLoader.js
new file mode 100644
index 000000000..04d591d30
--- /dev/null
+++ b/pyload/web/app/scripts/views/input/inputLoader.js
@@ -0,0 +1,8 @@
+define(['./textInput'], function(textInput) {
+ 'use strict';
+
+ // selects appropriate input element
+ return function(input) {
+ return textInput;
+ };
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/input/inputView.js b/pyload/web/app/scripts/views/input/inputView.js
new file mode 100644
index 000000000..1860fcaf1
--- /dev/null
+++ b/pyload/web/app/scripts/views/input/inputView.js
@@ -0,0 +1,86 @@
+define(['jquery', 'backbone', 'underscore'], function($, Backbone, _) {
+ 'use strict';
+
+ // Renders input elements
+ return Backbone.View.extend({
+
+ tagName: 'input',
+
+ input: null,
+ value: null,
+ description: null,
+ default_value: null,
+
+ // enables tooltips
+ tooltip: true,
+
+ initialize: function(options) {
+ this.input = options.input;
+ this.default_value = this.input.default_value;
+ this.value = options.value;
+ this.description = options.description;
+ },
+
+ render: function() {
+ this.renderInput();
+ // data for tooltips
+ if (this.description && this.tooltip) {
+ this.$el.data('content', this.description);
+ // TODO: render default value in popup?
+// this.$el.data('title', "TODO: title");
+ this.$el.popover({
+ placement: 'right',
+ trigger: 'hover'
+// delay: { show: 500, hide: 100 }
+ });
+ }
+
+ return this;
+ },
+
+ renderInput: function() {
+ // Overwrite this
+ },
+
+ showTooltip: function() {
+ if (this.description && this.tooltip)
+ this.$el.popover('show');
+ },
+
+ hideTooltip: function() {
+ if (this.description && this.tooltip)
+ this.$el.popover('hide');
+ },
+
+ destroy: function() {
+ this.undelegateEvents();
+ this.unbind();
+ if (this.onDestroy) {
+ this.onDestroy();
+ }
+ this.$el.removeData().unbind();
+ this.remove();
+ },
+
+ // focus the input element
+ focus: function() {
+ this.$el.focus();
+ },
+
+ // Clear the input
+ clear: function() {
+
+ },
+
+ // retrieve value of the input
+ getVal: function() {
+ return this.value;
+ },
+
+ // the child class must call this when the value changed
+ setVal: function(value) {
+ this.value = value;
+ this.trigger('change', value);
+ }
+ });
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/input/textInput.js b/pyload/web/app/scripts/views/input/textInput.js
new file mode 100644
index 000000000..0eebbf91e
--- /dev/null
+++ b/pyload/web/app/scripts/views/input/textInput.js
@@ -0,0 +1,36 @@
+define(['jquery', 'backbone', 'underscore', './inputView'], function($, Backbone, _, inputView) {
+ 'use strict';
+
+ return inputView.extend({
+
+ // TODO
+ tagName: 'input',
+ events: {
+ 'keyup': 'onChange',
+ 'focus': 'showTooltip',
+ 'focusout': 'hideTooltip'
+ },
+
+ renderInput: function() {
+ this.$el.attr('type', 'text');
+ this.$el.attr('name', 'textInput');
+
+ if (this.default_value)
+ this.$el.attr('placeholder', this.default_value);
+
+ if (this.value)
+ this.$el.val(this.value);
+
+ return this;
+ },
+
+ clear: function() {
+ this.$el.val('');
+ },
+
+ onChange: function(e) {
+ this.setVal(this.$el.val());
+ }
+
+ });
+}); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/linkGrabberModal.js b/pyload/web/app/scripts/views/linkGrabberModal.js
new file mode 100644
index 000000000..e6f59c134
--- /dev/null
+++ b/pyload/web/app/scripts/views/linkGrabberModal.js
@@ -0,0 +1,49 @@
+define(['jquery', 'underscore', 'app', 'views/abstract/modalView', 'hbs!tpl/dialogs/linkgrabber'],
+ function($, _, App, modalView, template) {
+ 'use strict';
+ // Modal dialog for package adding - triggers package:added when package was added
+ return modalView.extend({
+
+ events: {
+ 'click .btn-success': 'addPackage',
+ 'keypress #inputPackageName': 'addOnEnter'
+ },
+
+ template: template,
+
+ initialize: function() {
+ // Inherit parent events
+ this.events = _.extend({}, modalView.prototype.events, this.events);
+ },
+
+ addOnEnter: function(e) {
+ if (e.keyCode !== 13) return;
+ this.addPackage(e);
+ },
+
+ addPackage: function(e) {
+ var self = this;
+ var options = App.apiRequest('addPackage',
+ {
+ name: $('#inputPackageName').val(),
+ // TODO: better parsing / tokenization
+ links: $('#inputLinks').val().split('\n')
+ },
+ {
+ success: function() {
+ App.vent.trigger('package:added');
+ self.hide();
+ }
+ });
+
+ $.ajax(options);
+ $('#inputPackageName').val('');
+ $('#inputLinks').val('');
+ },
+
+ onShow: function() {
+ this.$('#inputPackageName').focus();
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/loginView.js b/pyload/web/app/scripts/views/loginView.js
new file mode 100644
index 000000000..891b3ec99
--- /dev/null
+++ b/pyload/web/app/scripts/views/loginView.js
@@ -0,0 +1,37 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'hbs!tpl/login'],
+ function($, Backbone, _, App, template) {
+ 'use strict';
+
+ // Renders context actions for selection packages and files
+ return Backbone.Marionette.ItemView.extend({
+ template: template,
+
+ events: {
+ 'submit form': 'login'
+ },
+
+ ui: {
+ 'form': 'form'
+ },
+
+ login: function(e) {
+ e.stopPropagation();
+
+ var options = App.apiRequest('login', null, {
+ data: this.ui.form.serialize(),
+ type : 'post',
+ success: function(data) {
+ // TODO: go to last page, better error
+ if (data)
+ App.navigate('');
+ else
+ alert('Wrong login');
+ }
+ });
+
+ $.ajax(options);
+ return false;
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/notificationView.js b/pyload/web/app/scripts/views/notificationView.js
new file mode 100644
index 000000000..93d07a0f3
--- /dev/null
+++ b/pyload/web/app/scripts/views/notificationView.js
@@ -0,0 +1,85 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'collections/InteractionList', 'hbs!tpl/notification'],
+ function($, Backbone, _, App, InteractionList, template) {
+ 'use strict';
+
+ // Renders context actions for selection packages and files
+ return Backbone.Marionette.ItemView.extend({
+
+ // Only view for this area so it's hardcoded
+ el: '#notification-area',
+ template: template,
+
+ events: {
+ 'click .btn-query': 'openQuery',
+ 'click .btn-notification': 'openNotifications'
+ },
+
+ tasks: null,
+ // area is slided out
+ visible: false,
+ // the dialog
+ modal: null,
+
+ initialize: function() {
+ this.tasks = new InteractionList();
+
+ App.vent.on('interaction:added', _.bind(this.onAdd, this));
+ App.vent.on('interaction:deleted', _.bind(this.onDelete, this));
+
+ var render = _.bind(this.render, this);
+ this.listenTo(this.tasks, 'add', render);
+ this.listenTo(this.tasks, 'remove', render);
+
+ },
+
+ onAdd: function(task) {
+ this.tasks.add(task);
+ },
+
+ onDelete: function(task) {
+ this.tasks.remove(task);
+ },
+
+ onRender: function() {
+ this.$el.calculateHeight().height(0);
+ },
+
+ render: function() {
+
+ // only render when it will be visible
+ if (this.tasks.length > 0)
+ this.$el.html(this.template(this.tasks.toJSON()));
+
+ if (this.tasks.length > 0 && !this.visible) {
+ this.$el.slideOut();
+ this.visible = true;
+ }
+ else if (this.tasks.length === 0 && this.visible) {
+ this.$el.slideIn();
+ this.visible = false;
+ }
+
+ return this;
+ },
+
+ openQuery: function() {
+ var self = this;
+
+ _.requireOnce(['views/queryModal'], function(ModalView) {
+ if (self.modal === null) {
+ self.modal = new ModalView();
+ self.modal.parent = self;
+ }
+
+ self.modal.model = self.tasks.at(0);
+ self.modal.render();
+ self.modal.show();
+ });
+
+ },
+
+ openNotifications: function() {
+
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/progressView.js b/pyload/web/app/scripts/views/progressView.js
new file mode 100644
index 000000000..7b9dbb74b
--- /dev/null
+++ b/pyload/web/app/scripts/views/progressView.js
@@ -0,0 +1,46 @@
+define(['jquery', 'backbone', 'underscore', 'app', 'utils/apitypes', 'views/abstract/itemView',
+ 'hbs!tpl/header/progress', 'hbs!tpl/header/progressStatus', 'helpers/pluginIcon'],
+ function($, Backbone, _, App, Api, ItemView, template, templateStatus, pluginIcon) {
+ 'use strict';
+
+ // Renders single file item
+ return ItemView.extend({
+
+ idAttribute: 'pid',
+ tagName: 'li',
+ template: template,
+ events: {
+ },
+
+ // Last name
+ name: null,
+
+ initialize: function() {
+ this.listenTo(this.model, 'change', this.update);
+ this.listenTo(this.model, 'remove', this.unrender);
+ },
+
+ onDestroy: function() {
+ },
+
+ // Update html without re-rendering
+ update: function() {
+ if (this.name !== this.model.get('name')) {
+ this.name = this.model.get('name');
+ this.render();
+ }
+
+ this.$('.bar').width(this.model.getPercent() + '%');
+ this.$('.progress-status').html(templateStatus(this.model.toJSON()));
+ },
+
+ render: function() {
+ // TODO: icon
+ // TODO: other states
+ // TODO: non download progress
+ this.$el.css('background-image', 'url(' + pluginIcon('todo') + ')');
+ this.$el.html(this.template(this.model.toJSON()));
+ return this;
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/queryModal.js b/pyload/web/app/scripts/views/queryModal.js
new file mode 100644
index 000000000..ce624814a
--- /dev/null
+++ b/pyload/web/app/scripts/views/queryModal.js
@@ -0,0 +1,69 @@
+define(['jquery', 'underscore', 'app', 'views/abstract/modalView', './input/inputLoader', 'hbs!tpl/dialogs/interactionTask'],
+ function($, _, App, modalView, load_input, template) {
+ 'use strict';
+ return modalView.extend({
+
+ events: {
+ 'click .btn-success': 'submit',
+ 'submit form': 'submit'
+ },
+ template: template,
+
+ // the notificationView
+ parent: null,
+
+ model: null,
+ input: null,
+
+ initialize: function() {
+ // Inherit parent events
+ this.events = _.extend({}, modalView.prototype.events, this.events);
+ },
+
+ renderContent: function() {
+ var data = {
+ title: this.model.get('title'),
+ plugin: this.model.get('plugin'),
+ description: this.model.get('description')
+ };
+
+ var input = this.model.get('input').data;
+ if (this.model.isCaptcha()) {
+ data.captcha = input[0];
+ data.type = input[1];
+ }
+ return data;
+ },
+
+ onRender: function() {
+ // instantiate the input
+ var input = this.model.get('input');
+ var InputView = load_input(input);
+ this.input = new InputView({input: input});
+ // only renders after wards
+ this.$('#inputField').append(this.input.render().el);
+ },
+
+ submit: function(e) {
+ e.stopPropagation();
+ // TODO: load next task
+
+ this.model.set('result', this.input.getVal());
+ var self = this;
+ this.model.save({success: function() {
+ self.hide();
+ }});
+
+ this.input.clear();
+ return false;
+ },
+
+ onShow: function() {
+ this.input.focus();
+ },
+
+ onHide: function() {
+ this.input.destroy();
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/settings/configSectionView.js b/pyload/web/app/scripts/views/settings/configSectionView.js
new file mode 100644
index 000000000..0d9b0762f
--- /dev/null
+++ b/pyload/web/app/scripts/views/settings/configSectionView.js
@@ -0,0 +1,99 @@
+define(['jquery', 'underscore', 'backbone', 'app', '../abstract/itemView', '../input/inputLoader',
+ 'hbs!tpl/settings/config', 'hbs!tpl/settings/configItem'],
+ function($, _, Backbone, App, itemView, load_input, template, templateItem) {
+ 'use strict';
+
+ // Renders settings over view page
+ return itemView.extend({
+
+ tagName: 'div',
+
+ template: template,
+ templateItem: templateItem,
+
+ // Will only render one time with further attribute updates
+ rendered: false,
+
+ events: {
+ 'click .btn-primary': 'submit',
+ 'click .btn-reset': 'reset'
+ },
+
+ initialize: function() {
+ this.listenTo(this.model, 'destroy', this.destroy);
+ },
+
+ render: function() {
+ if (!this.rendered) {
+ this.$el.html(this.template(this.model.toJSON()));
+
+ // initialize the popover
+ this.$('.page-header a').popover({
+ placement: 'left'
+// trigger: 'hover'
+ });
+
+ var container = this.$('.control-content');
+ var self = this;
+ _.each(this.model.get('items'), function(item) {
+ var json = item.toJSON();
+ var el = $('<div>').html(self.templateItem(json));
+ var InputView = load_input(item.get('input'));
+ var input = new InputView(json).render();
+ item.set('inputView', input);
+
+ self.listenTo(input, 'change', _.bind(self.render, self));
+ el.find('.controls').append(input.el);
+ container.append(el);
+ });
+ this.rendered = true;
+ }
+ // Enable button if something is changed
+ if (this.model.hasChanges())
+ this.$('.btn-primary').removeClass('disabled');
+ else
+ this.$('.btn-primary').addClass('disabled');
+
+ // Mark all inputs that are modified
+ _.each(this.model.get('items'), function(item) {
+ var input = item.get('inputView');
+ var el = input.$el.parent().parent();
+ if (item.isChanged())
+ el.addClass('info');
+ else
+ el.removeClass('info');
+ });
+
+ return this;
+ },
+
+ onDestroy: function() {
+ // TODO: correct cleanup after building up so many views and models
+ },
+
+ submit: function(e) {
+ e.stopPropagation();
+ // TODO: success / failure popups
+ var self = this;
+ this.model.save({success: function() {
+ self.render();
+ App.vent.trigger('config:change');
+ }});
+
+ },
+
+ reset: function(e) {
+ e.stopPropagation();
+ // restore the original value
+ _.each(this.model.get('items'), function(item) {
+ if (item.has('inputView')) {
+ var input = item.get('inputView');
+ input.setVal(item.get('value'));
+ input.render();
+ }
+ });
+ this.render();
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/settings/pluginChooserModal.js b/pyload/web/app/scripts/views/settings/pluginChooserModal.js
new file mode 100644
index 000000000..242d11a5a
--- /dev/null
+++ b/pyload/web/app/scripts/views/settings/pluginChooserModal.js
@@ -0,0 +1,72 @@
+define(['jquery', 'underscore', 'app', 'views/abstract/modalView', 'hbs!tpl/dialogs/addPluginConfig',
+ 'helpers/pluginIcon', 'select2'],
+ function($, _, App, modalView, template, pluginIcon) {
+ 'use strict';
+ return modalView.extend({
+
+ events: {
+ 'click .btn-add': 'add'
+ },
+ template: template,
+ plugins: null,
+ select: null,
+
+ initialize: function() {
+ // Inherit parent events
+ this.events = _.extend({}, modalView.prototype.events, this.events);
+ var self = this;
+ $.ajax(App.apiRequest('getAvailablePlugins', null, {success: function(data) {
+ self.plugins = _.sortBy(data, function(item) {
+ return item.name;
+ });
+ self.render();
+ }}));
+ },
+
+ onRender: function() {
+ // TODO: could be a seperate input type if needed on multiple pages
+ if (this.plugins)
+ this.select = this.$('#pluginSelect').select2({
+ escapeMarkup: function(m) {
+ return m;
+ },
+ formatResult: this.format,
+ formatSelection: this.formatSelection,
+ data: {results: this.plugins, text: function(item) {
+ return item.label;
+ }},
+ id: function(item) {
+ return item.name;
+ }
+ });
+ },
+
+ onShow: function() {
+ },
+
+ onHide: function() {
+ },
+
+ format: function(data) {
+ var s = '<div class="plugin-select" style="background-image: url(' + pluginIcon(data.name) + ')">' + data.label;
+ s += '<br><span>' + data.description + '<span></div>';
+ return s;
+ },
+
+ formatSelection: function(data) {
+ if (!data || _.isEmpty(data))
+ return '';
+
+ return '<img class="logo-select" src="' + pluginIcon(data.name) + '"> ' + data.label;
+ },
+
+ add: function(e) {
+ e.stopPropagation();
+ if (this.select) {
+ var plugin = this.select.val();
+ App.vent.trigger('config:open', plugin);
+ this.hide();
+ }
+ }
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/scripts/views/settings/settingsView.js b/pyload/web/app/scripts/views/settings/settingsView.js
new file mode 100644
index 000000000..ff86efdf9
--- /dev/null
+++ b/pyload/web/app/scripts/views/settings/settingsView.js
@@ -0,0 +1,184 @@
+define(['jquery', 'underscore', 'backbone', 'app', 'models/ConfigHolder', './configSectionView',
+ 'hbs!tpl/settings/layout', 'hbs!tpl/settings/menu', 'hbs!tpl/settings/actionbar'],
+ function($, _, Backbone, App, ConfigHolder, ConfigSectionView, template, templateMenu, templateBar) {
+ 'use strict';
+
+ // Renders settings over view page
+ return Backbone.Marionette.ItemView.extend({
+
+ template: template,
+ templateMenu: templateMenu,
+
+ events: {
+ 'click .settings-menu li > a': 'change_section',
+ 'click .icon-remove': 'deleteConfig'
+ },
+
+ ui: {
+ 'menu': '.settings-menu',
+ 'content': '.setting-box > form'
+ },
+
+ selected: null,
+ modal: null,
+
+ coreConfig: null, // It seems collections are not needed
+ pluginConfig: null,
+
+ // currently open configHolder
+ config: null,
+ lastConfig: null,
+ isLoading: false,
+
+ initialize: function() {
+ this.actionbar = Backbone.Marionette.ItemView.extend({
+ template: templateBar,
+ events: {
+ 'click .btn': 'choosePlugin'
+ },
+ choosePlugin: _.bind(this.choosePlugin, this)
+
+ });
+ this.listenTo(App.vent, 'config:open', this.openConfig);
+ this.listenTo(App.vent, 'config:change', this.refresh);
+
+ this.refresh();
+ },
+
+ refresh: function() {
+ var self = this;
+ $.ajax(App.apiRequest('getCoreConfig', null, {success: function(data) {
+ self.coreConfig = data;
+ self.renderMenu();
+ }}));
+ $.ajax(App.apiRequest('getPluginConfig', null, {success: function(data) {
+ self.pluginConfig = data;
+ self.renderMenu();
+ }}));
+ },
+
+ onRender: function() {
+ // set a height with css so animations will work
+ this.ui.content.height(this.ui.content.height());
+ },
+
+ renderMenu: function() {
+ var plugins = [],
+ addons = [];
+
+ // separate addons and default plugins
+ // addons have an activated state
+ _.each(this.pluginConfig, function(item) {
+ if (item.activated === null)
+ plugins.push(item);
+ else
+ addons.push(item);
+ });
+
+ this.$(this.ui.menu).html(this.templateMenu({
+ core: this.coreConfig,
+ plugin: plugins,
+ addon: addons
+ }));
+
+ // mark the selected element
+ this.$('li[data-name="' + this.selected + '"]').addClass('active');
+ },
+
+ openConfig: function(name) {
+ // Do nothing when this config is already open
+ if (this.config && this.config.get('name') === name)
+ return;
+
+ this.lastConfig = this.config;
+ this.config = new ConfigHolder({name: name});
+ this.loading();
+
+ var self = this;
+ this.config.fetch({success: function() {
+ if (!self.isLoading)
+ self.show();
+
+ }, failure: _.bind(this.failure, this)});
+
+ },
+
+ loading: function() {
+ this.isLoading = true;
+ var self = this;
+ this.ui.content.fadeOut({complete: function() {
+ if (self.config.isLoaded())
+ self.show();
+
+ self.isLoading = false;
+ }});
+
+ },
+
+ show: function() {
+ // TODO animations are bit sloppy
+ this.ui.content.css('display', 'block');
+ var oldHeight = this.ui.content.height();
+
+ // this will destroy the old view
+ if (this.lastConfig)
+ this.lastConfig.trigger('destroy');
+ else
+ this.ui.content.empty();
+
+ // reset the height
+ this.ui.content.css('height', '');
+ // append the new element
+ this.ui.content.append(new ConfigSectionView({model: this.config}).render().el);
+ // get the new height
+ var height = this.ui.content.height();
+ // set the old height again
+ this.ui.content.height(oldHeight);
+ this.ui.content.animate({
+ opacity: 'show',
+ height: height
+ });
+ },
+
+ failure: function() {
+ // TODO
+ this.config = null;
+ },
+
+ change_section: function(e) {
+ // TODO check for changes
+ // TODO move this into render?
+
+ var el = $(e.target).closest('li');
+
+ this.selected = el.data('name');
+ this.openConfig(this.selected);
+
+ this.ui.menu.find('li.active').removeClass('active');
+ el.addClass('active');
+ e.preventDefault();
+ },
+
+ choosePlugin: function(e) {
+ var self = this;
+ _.requireOnce(['views/settings/pluginChooserModal'], function(Modal) {
+ if (self.modal === null)
+ self.modal = new Modal();
+
+ self.modal.show();
+ });
+ },
+
+ deleteConfig: function(e) {
+ e.stopPropagation();
+ var el = $(e.target).parent().parent();
+ var name = el.data('name');
+ var self = this;
+ $.ajax(App.apiRequest('deleteConfig', {plugin: name}, { success: function() {
+ self.refresh();
+ }}));
+ return false;
+ }
+
+ });
+ }); \ No newline at end of file
diff --git a/pyload/web/app/styles/default/accounts.less b/pyload/web/app/styles/default/accounts.less
new file mode 100644
index 000000000..9b45b64b3
--- /dev/null
+++ b/pyload/web/app/styles/default/accounts.less
@@ -0,0 +1,6 @@
+@import "common";
+
+.logo-select {
+ width: 20px;
+ height: 20px;
+} \ No newline at end of file
diff --git a/pyload/web/app/styles/default/admin.less b/pyload/web/app/styles/default/admin.less
new file mode 100644
index 000000000..92524c153
--- /dev/null
+++ b/pyload/web/app/styles/default/admin.less
@@ -0,0 +1,17 @@
+@import "common";
+
+/*
+ Admin
+*/
+
+#btn_newuser {
+ float: right;
+}
+
+#user_permissions {
+ float: right;
+}
+
+.userperm {
+ width: 115px;
+} \ No newline at end of file
diff --git a/pyload/web/app/styles/default/dashboard.less b/pyload/web/app/styles/default/dashboard.less
new file mode 100644
index 000000000..ed87e19a1
--- /dev/null
+++ b/pyload/web/app/styles/default/dashboard.less
@@ -0,0 +1,330 @@
+@import "bootstrap/less/mixins";
+@import "common";
+
+/*
+ Dashboard
+*/
+
+#dashboard ul {
+ margin: 0;
+ list-style: none;
+}
+
+.sidebar-header {
+ font-size: 25px;
+ line-height: 25px;
+ margin: 4px 0;
+ border-bottom: 1px dashed @grey;
+}
+
+/*
+ Packages
+*/
+.package-list {
+ list-style: none;
+ margin-left: 0;
+}
+
+@frame-top: 20px;
+@frame-bottom: 18px;
+
+.package-frame {
+ position: absolute;
+ top: -@frame-top;
+ left: -@frame-top / 2;
+ right: -@frame-top / 2;
+ bottom: -@frame-bottom + 2px; // + size of visible bar
+ z-index: -1; // lies under package
+ border: 1px solid @grey;
+ border-radius: 5px;
+ box-shadow: 3px 3px 6px rgba(0, 0, 0, 0.75);
+}
+
+.package-view {
+ padding-bottom: 4px;
+ margin: 8px 0;
+ position: relative;
+ overflow: hidden;
+ .hyphens;
+
+
+ i {
+ cursor: pointer;
+ }
+
+ & > i {
+ vertical-align: middle;
+ }
+
+ .progress {
+ position: absolute;
+ height: @frame-bottom;
+ line-height: @frame-bottom;
+ font-size: 12px;
+ text-align: center;
+ border-radius: 0;
+ border-bottom-left-radius: 5px;
+ border-bottom-right-radius: 5px;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ margin-bottom: 0;
+ background-image: none;
+ color: @light;
+ background-color: @yellow;
+ }
+
+ .bar-info {
+ background-image: none;
+ background-color: @blue;
+ }
+
+ &:hover {
+ overflow: visible;
+ z-index: 10;
+
+ .package-frame {
+ background-color: @light;
+ }
+ }
+
+ &.ui-selected:hover {
+ color: @light;
+
+ .package-frame {
+ background-color: @dark;
+ }
+
+ }
+}
+
+.package-name {
+ cursor: pointer;
+}
+
+.package-indicator {
+ position: absolute;
+ top: 0;
+ right: 0;
+ float: right;
+ color: @blue;
+ text-shadow: @yellowDark 1px 1px;
+ height: @frame-top;
+ line-height: @frame-top;
+
+ & > i:hover {
+ color: @green;
+ }
+
+ .dropdown-menu {
+ text-shadow: none;
+ }
+
+ .tooltip {
+ text-shadow: none;
+ width: 100%;
+ }
+
+ .btn-move {
+ color: @green;
+ display: none;
+ }
+
+}
+
+.ui-files-selected .btn-move {
+ display: inline;
+}
+
+// Tag area with different effect on hover
+.tag-area {
+ position: absolute;
+ top: -2px;
+ left: 0;
+
+ .badge {
+ font-size: 11px;
+ line-height: 11px;
+ }
+
+ .badge i {
+ cursor: pointer;
+ &:hover:before {
+ content: "\f024"; // show Remove icon
+ }
+ }
+
+ .badge-ghost {
+ visibility: hidden;
+ cursor: pointer;
+ opacity: 0.5;
+ }
+
+ &:hover .badge-ghost {
+ visibility: visible;
+ }
+
+}
+
+/*
+ File View
+*/
+
+.file-list {
+ list-style: none;
+ margin: 0;
+}
+
+@file-height: 22px;
+
+.file-view {
+ position: relative;
+ padding: 0 4px;
+ border-top: 1px solid #dddddd;
+ line-height: @file-height;
+
+ &:first-child {
+ border-top: none;
+ }
+
+ &:hover, &.ui-selected:hover {
+ border-radius: 5px;
+ .gradient(top, @blue, @blueLight);
+ color: @light;
+ }
+
+ &.ui-selected {
+ .gradient(top, @yellow, @yellowDark);
+ color: @dark;
+ border-color: @greenDark;
+
+ .file-row.downloading .bar {
+ .gradient(top, @green, @greenLight);
+ }
+
+ }
+
+ img { // plugin logo
+ margin-top: -2px;
+ padding: 0 2px;
+ height: @file-height;
+ width: @file-height;
+ }
+
+ .icon-chevron-down:hover {
+ cursor: pointer;
+ color: @yellow;
+ }
+
+}
+
+.file-row {
+ min-height: 0 !important;
+// padding-left: 5px;
+ padding-top: 4px;
+ padding-bottom: 4px;
+
+ // TODO: better styling for filestatus
+ &.second {
+// border-radius: 4px;
+// background: @light;
+ font-size: small;
+ font-weight: bold;
+// box-shadow: 3px 3px 6px rgba(0, 0, 0, 0.75);
+// .default-shadow;
+ }
+
+ &.third {
+ margin-left: 0;
+ position: relative;
+ font-size: small;
+ }
+
+ .dropdown-menu {
+ font-size: medium;
+ }
+}
+
+/*
+ TODO: more colorful states
+ better fileView design
+*/
+
+.file-row.finished {
+// .gradient(top, @green, @greenLight);
+// color: @light;
+ color: @green;
+}
+
+.file-row.failed {
+// .gradient(top, @red, @redLight);
+// color: @light;
+ color: @red;
+}
+
+.file-row.downloading {
+
+ .progress {
+ height: @file-height;
+ background: @light;
+ margin: 0;
+ }
+
+ .bar {
+ text-align: left;
+ .gradient(top, @yellow, @yellowDark);
+ .transition-duration(2s);
+ color: @dark;
+ }
+
+}
+
+/*
+FANCY CHECKBOXES
+*/
+.file-view .checkbox {
+ width: 20px;
+ height: 21px;
+ background: url(../../images/default/checks_sheet.png) left top no-repeat;
+ cursor: pointer;
+}
+
+.file-view.ui-selected .checkbox {
+ background: url(../../images/default/checks_sheet.png) -21px top no-repeat;
+}
+
+/*
+ Actionbar
+*/
+
+.form-search {
+ position: relative;
+
+ .dropdown-menu {
+ min-width: 100%;
+ position: absolute;
+ right: 0;
+ left: auto;
+ }
+
+}
+
+li.finished > a, li.finished:hover > a {
+ background-color: @green;
+ color: @light;
+
+ .caret, .caret:hover {
+ border-bottom-color: @light !important;
+ border-top-color: @light !important;
+ }
+}
+
+li.failed > a, li.failed:hover > a {
+ background-color: @red;
+ color: @light;
+
+ .caret, .caret:hover {
+ border-bottom-color: @light !important;
+ border-top-color: @light !important;
+ }
+} \ No newline at end of file
diff --git a/pyload/web/app/styles/default/main.less b/pyload/web/app/styles/default/main.less
new file mode 100644
index 000000000..0bfa4fe2f
--- /dev/null
+++ b/pyload/web/app/styles/default/main.less
@@ -0,0 +1,21 @@
+@import "bootstrap/less/bootstrap";
+@import "bootstrap/less/responsive";
+@import "font-awesome/less/font-awesome";
+
+@FontAwesomePath: "../../fonts";
+
+@import "pyload-common/styles/base";
+@import "pyload-common/styles/basic-layout";
+
+@import "style";
+@import "dashboard";
+@import "settings";
+@import "accounts";
+@import "admin";
+
+@ResourcePath: "../..";
+@DefaultFont: 'Abel', sans-serif;
+
+// Changed dimensions
+@header-height: 70px;
+@footer-height: 66px; \ No newline at end of file
diff --git a/pyload/web/app/styles/default/settings.less b/pyload/web/app/styles/default/settings.less
new file mode 100644
index 000000000..34bfcb92a
--- /dev/null
+++ b/pyload/web/app/styles/default/settings.less
@@ -0,0 +1,121 @@
+@import "common";
+
+/*
+ Settings
+*/
+.settings-menu {
+ background-color: #FFF;
+ box-shadow: 0 0 5px #000; // border: 10px solid #EEE;
+
+ .nav-header {
+ background: @blueDark;
+ color: @light;
+ }
+
+ li > a, .nav-header {
+ margin-left: -16px;
+ margin-right: -16px;
+ text-shadow: none;
+ }
+
+ i {
+ margin-top: 0;
+ }
+
+ .plugin, .addon {
+ a {
+ padding-left: 28px;
+ background-position: 4px 2px;
+ background-repeat: no-repeat;
+ background-size: 20px 20px;
+ }
+
+ .icon-remove {
+ display: none;
+ }
+
+ &:hover {
+ i {
+ display: block;
+ }
+ }
+
+ }
+
+ .addon {
+ div {
+ font-size: small;
+ }
+ .addon-on {
+ color: @green;
+ }
+
+ .addon-off {
+ color: @red;
+ }
+
+ }
+
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+
+ .nav > li > a:hover {
+ color: @blueDark;
+ }
+}
+
+.setting-box {
+ border: 10px solid @blueDark;
+ box-shadow: 0 0 5px @dark; // .gradient(bottom, @yellowLightest, @light);
+ overflow: hidden;
+
+ .page-header {
+ margin: 0;
+
+ .btn {
+ float: right;
+ margin-top: 5px;
+ }
+
+ .popover {
+ font-size: medium;
+ }
+
+ }
+
+ // Bit wider control labels
+ .control-label {
+ width: 180px;
+ }
+ .controls {
+ margin-left: 200px;
+ }
+ .form-actions {
+ padding-left: 200px;
+ }
+
+}
+
+/*
+ Plugin select
+*/
+
+.plugin-select {
+ background-position: left 2px;
+ background-repeat: no-repeat;
+ background-size: 20px 20px;
+ padding-left: 24px;
+
+ font-weight: bold;
+ span {
+ line-height: 14px;
+ font-size: small;
+ font-weight: normal;
+ }
+
+}
+
+.logo-select {
+ width: 20px;
+ height: 20px;
+} \ No newline at end of file
diff --git a/pyload/web/app/styles/default/style.less b/pyload/web/app/styles/default/style.less
new file mode 100644
index 000000000..b75f45a65
--- /dev/null
+++ b/pyload/web/app/styles/default/style.less
@@ -0,0 +1,297 @@
+@import "bootstrap/less/mixins";
+@import "common";
+
+/*
+ Header
+*/
+header { // background-color: @greyDark;
+ .gradient(to bottom, #222222, #111111);
+ height: @header-height;
+ position: fixed;
+ top: 0;
+ vertical-align: top;
+ width: 100%;
+ z-index: 10;
+ color: #ffffff;
+
+ a {
+ color: #ffffff;
+ }
+ .container-fluid, .row-fluid {
+ height: @header-height;
+ }
+
+ span.title {
+ color: white;
+ float: left;
+ font-family: SansationRegular, sans-serif;
+ font-size: 40px;
+ line-height: @header-height;
+ cursor: default;
+ }
+
+ .logo {
+ margin-right: 10px;
+ margin-top: 10px;
+ width: 105px;
+ height: 107px;
+ background-size: auto;
+ cursor: pointer;
+ }
+
+}
+
+@header-inner-height: @header-height - 16px;
+
+// centered header element
+.centered {
+ height: @header-inner-height;
+ margin: 8px 0;
+}
+
+.header-block {
+ .centered;
+ float: left;
+ line-height: @header-inner-height / 3; // 3 rows
+ font-size: small;
+}
+
+.status-block {
+ min-width: 15%;
+}
+
+.header-btn {
+ float: right;
+ position: relative;
+ .centered;
+
+ .lower {
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ margin-left: 0;
+
+ button {
+ width: 100% / 3; // 3 buttons
+ }
+
+ }
+}
+
+#progress-area {
+ .centered;
+ position: relative;
+ margin-top: 8px;
+ line-height: 16px;
+
+ .sub {
+ font-size: small;
+ }
+
+ .popover { // display: block;
+ max-width: none;
+ width: 120%;
+ left: -60%; // Half of width
+ margin-left: 50%;
+ top: 100%;
+ }
+
+ .popover-title, .popover-content {
+ color: @greyDark;
+ }
+
+ .icon-list {
+ cursor: pointer;
+ margin-right: 2px; // same as globalprogress margin
+
+ &:hover {
+ color: @yellow;
+ }
+ }
+ .close {
+ line-height: 14px;
+ }
+}
+
+.progress-list {
+ list-style: none;
+ margin: 0;
+ font-size: small;
+
+ li {
+ background-repeat: no-repeat;
+ background-size: 32px 32px;
+ background-position: 0px 8px;
+ padding-left: 40px;
+
+ &:not(:last-child) {
+ margin-bottom: 5px;
+ padding-bottom: 5px;
+ border-bottom: 1px dashed @greyLight;
+ }
+
+ .progress {
+ height: 8px;
+ margin-bottom: 0;
+
+ .bar {
+ .transition-duration(2s);
+ .gradient(bottom, @blue, @blueLight);
+ }
+ }
+ }
+}
+
+#globalprogress {
+ background-color: @greyDark;
+ background-image: none;
+ height: 8px;
+ margin: 4px 0;
+ border-radius: 8px;
+ border: 2px solid @grey;
+
+ .bar {
+ color: @dark;
+ background-image: none;
+ background-color: @yellow;
+ .transition-duration(2s);
+
+ &.running {
+ width: 100%;
+ .stripes(@yellowLighter, @yellowDark);
+ }
+ }
+}
+
+.speedgraph-container {
+ // Allows speedgraph to take up remaining space
+ display: block;
+ overflow: hidden;
+ padding: 0 8px;
+
+ #speedgraph {
+ float: right;
+ width: 100%;
+ .centered;
+// height: @header-height - 16px;
+// margin: 8px 0;
+ font-family: sans-serif;
+ }
+}
+
+.header-area {
+ display: none; // hidden by default
+ position: absolute;
+ bottom: -28px;
+ line-height: 18px;
+ top: @header-height;
+ padding: 4px 10px 6px 10px;
+ text-align: center;
+ border-radius: 0 0 6px 6px;
+ color: @light;
+ background-color: @greyDark;
+ .default-shadow;
+}
+
+#notification-area {
+ .header-area;
+ left: 140px;
+
+ .badge {
+ vertical-align: top;
+ }
+
+ .btn-query, .btn-notification {
+ cursor: pointer;
+ }
+}
+
+#selection-area {
+ .header-area;
+ left: 50%;
+ min-width: 15%;
+
+ i {
+ cursor: pointer;
+
+ &:hover {
+ color: @yellow;
+ }
+ }
+
+}
+
+/*
+ Actionbar
+*/
+
+.nav > li > a:hover {
+ color: @blue;
+}
+
+.actionbar {
+ padding-bottom: 3px;
+ margin-bottom: 0;
+ border-bottom: 1px dashed @grey;
+
+ height: @actionbar-height;
+
+ padding-top: 2px;
+ margin-bottom: 5px;
+
+}
+
+.actionbar > li > a {
+ margin-top: 4px;
+}
+
+.actionbar .breadcrumb {
+ margin: 0;
+ padding-top: 10px;
+ padding-bottom: 0;
+
+ .active {
+ color: @grey;
+ }
+
+}
+
+.actionbar form {
+ margin-top: 6px;
+ margin-bottom: 0;
+}
+
+.actionbar input, .actionbar button {
+ padding-top: 2px;
+ padding-bottom: 2px;
+}
+
+.actionbar .dropdown-menu i {
+ margin-top: 4px;
+ padding-right: 5px;
+}
+
+/*
+ Login
+*/
+.login {
+ vertical-align: middle;
+ border: 2px solid @dark;
+ padding: 15px 50px;
+ font-size: 17px;
+ border-radius: 15px;
+ -moz-border-radius: 15px;
+ -webkit-border-radius: 15px;
+}
+
+/*
+ Footer
+*/
+footer .copyright {
+ background-size: 40px 40px;
+ background-position: 12px center;
+ height: 40px;
+ padding-left: 40px;
+ padding-top: 10px;
+}
diff --git a/pyload/web/app/styles/font.css b/pyload/web/app/styles/font.css
new file mode 100644
index 000000000..088b6f14c
--- /dev/null
+++ b/pyload/web/app/styles/font.css
@@ -0,0 +1,13 @@
+/**
+ * @file
+ * Font styling
+ */
+
+@font-face {
+ font-family: 'Abel';
+ font-style: normal;
+ font-weight: 400;
+ src: local('Abel'), local('Abel-Regular');
+ src: url(../fonts/Abel-Regular.woff) format('woff');
+ url(../fonts/Abel-Regular.ttf) format('truetype');
+}
diff --git a/pyload/web/app/templates/default/accounts/account.html b/pyload/web/app/templates/default/accounts/account.html
new file mode 100644
index 000000000..90bd632c8
--- /dev/null
+++ b/pyload/web/app/templates/default/accounts/account.html
@@ -0,0 +1,10 @@
+<td>{{ plugin }}</td>
+<td>{{ loginname }}</td>
+<td>{{ valid }}</td>
+<td>{{ premium }}</td>
+<td>{{ trafficleft }}</td>
+<td>{{ shared }}</td>
+<td>{{ activated }}</td>
+<td>
+ <button type="button" class="btn btn-danger">Delete</button>
+</td> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/accounts/actionbar.html b/pyload/web/app/templates/default/accounts/actionbar.html
new file mode 100644
index 000000000..f4652ec42
--- /dev/null
+++ b/pyload/web/app/templates/default/accounts/actionbar.html
@@ -0,0 +1,5 @@
+<div class="span2 offset1">
+</div>
+<span class="span9">
+ <button class="btn btn-small btn-blue btn-add">Add Account</button>
+</span> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/accounts/layout.html b/pyload/web/app/templates/default/accounts/layout.html
new file mode 100644
index 000000000..e6627500d
--- /dev/null
+++ b/pyload/web/app/templates/default/accounts/layout.html
@@ -0,0 +1,19 @@
+<!--{# TODO: responsive layout instead of table #}-->
+<div class="span10 offset2">
+ <table class="table table-striped">
+ <thead>
+ <tr>
+ <th>Plugin</th>
+ <th>Name</th>
+ <th>Valid</th>
+ <th>Premium</th>
+ <th>Traffic</th>
+ <th>Shared</th>
+ <th>Activated</th>
+ <th>Delete</th>
+ </tr>
+ </thead>
+ <tbody class="account-list">
+ </tbody>
+ </table>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/admin.html b/pyload/web/app/templates/default/admin.html
new file mode 100644
index 000000000..2eb90d7e0
--- /dev/null
+++ b/pyload/web/app/templates/default/admin.html
@@ -0,0 +1,223 @@
+{% extends 'default/base.html' %}
+
+{% block title %}{{ _("Admin") }} - {{ super() }} {% endblock %}
+{% block subtitle %}{{ _("Admin") }}
+{% endblock %}
+
+{% block css %}
+ <link href="static/css/default/admin.less" rel="stylesheet/less" type="text/css" media="screen"/>
+ <link rel="stylesheet" type="text/css" href="static/css/fontawesome.css" />
+{% endblock %}
+
+{% block require %}
+{% endblock %}
+
+{% block content %}
+ <div class="container-fluid">
+ <div class="row-fluid">
+ <div id="userlist" class="span10">
+ <div class="page-header">
+ <h1>Admin Bereich
+ <small>Userverwaltung, Systeminfos</small>
+ <a id="btn_newuser" class="btn btn-warning btn-large" type="button"><i class="iconf-plus-sign iconf-large "></i></a>
+ </h1>
+
+
+
+ </div>
+
+ <div class="dropdown">
+ <span class="label name">User</span>
+ <a class="dropdown-toggle" data-toggle="dropdown" href="#"><i class="iconf-user iconf-8x"></i></a>
+ <ul class="dropdown-menu" role="menu" aria-labelledby="dropdownMenu">
+ <li><a tabindex="-1" id="useredit" href="#" role="button" data-backdrop="true" data-controls-modal="event-modal" data-keyboard="true"><i class="icon-pencil"></i>Edit</a></li>
+ <li><a tabindex="-1" href="#"><i class="icon-tasks"></i>Statistik</a></li>
+ <li class="divider"></li>
+ <li><a tabindex="-1" href="#"><i class="icon-remove-sign"></i>Delete</a></li>
+ </ul>
+ </div>
+
+ <div id="event-modal" class="modal hide fade">
+ <div class="modal-header">
+ <a class="close" id="useredit_close" href="#">x</a>
+ <h3>User Settings</h3>
+ </div>
+ <div class="modal-body">
+ <p>Set password and permissions</p>
+ <table style="width:100%;" class="table ">
+ <td>
+ <div class="input-prepend">
+ <span class="add-on"><i class="iconf-key"></i></span>
+ <input class="span2" style="min-width:120px;" id="prependedInput" type="text" placeholder="New Password">
+ </div>
+ <div class="input-prepend">
+ <span class="add-on"><i class="icon-repeat"></i></span>
+ <input class="span2" style="min-width:120px;" id="prependedInput" type="text" placeholder="Repeat">
+ </div>
+ <br>
+ <br>
+ <br>
+ <form class="form-horizontal">
+ <div class="control-group">
+ <label class="control-label" for="onoff">Administrator</label>
+
+ <div class="controls">
+ <div class="btn-group" id="onoff" data-toggle="buttons-radio">
+ <button type="button" class="btn btn-primary" >On</button>
+ <button type="button" class="btn btn-primary active">Off</button>
+ </div>
+ </div>
+ </div>
+ </form>
+ </td>
+ <td>
+ <div id="user_permissions">
+ <h3>Permissions</h3>
+ <div class="btn-group btn-group-vertical" data-toggle="buttons-checkbox">
+ <button type="button" class="btn btn-inverse userperm">Accounts</button>
+ <button type="button" class="btn btn-inverse userperm active">Add</button>
+ <button type="button" class="btn btn-inverse userperm">Delete</button>
+ <button type="button" class="btn btn-inverse userperm active">Download</button>
+ <button type="button" class="btn btn-inverse userperm active">List</button>
+ <button type="button" class="btn btn-inverse userperm">Logs</button>
+ <button type="button" class="btn btn-inverse userperm">Modify</button>
+ <button type="button" class="btn btn-inverse userperm">Settings</button>
+ <button type="button" class="btn btn-inverse userperm active">Status</button>
+ </div>
+ </div>
+ </td>
+ </table>
+ </div>
+ <div class="modal-footer">
+ <a class="btn btn-primary" id="useredit_save"href="#">Save</a>
+
+ </div>
+ </div>
+
+
+
+ </div>
+
+ <div class="span2">
+ <br>
+ <h2>Support</h2>
+ <table>
+ <tr>
+ <td>
+ <i class="icon-globe"></i>
+ </td>
+ <td>
+ <a href="#">Wiki |</a>
+ <a href="#">Forum |</a>
+ <a href="#">Chat</a>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <i class="icon-book"></i>
+ </td>
+ <td>
+ <a href="#">Documentation</a>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <i class="icon-fire"></i>
+ </td>
+ <td>
+ <a href="#">Development</a>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <i class="icon-bullhorn"></i>
+ </td>
+ <td>
+ <a href="#">Issue Tracker</a>
+ </td>
+ </tr>
+ </table>
+ <br>
+ <a href="#" class="btn btn-inverse" id="info" rel="popover" data-content="<table class='table table-striped'>
+ <tbody>
+ <tr>
+ <td>Python:</td>
+ <td>2.6.4 </td>
+ </tr>
+ <tr>
+ <td>Betriebssystem:</td>
+ <td>nt win32</td>
+ </tr>
+ <tr>
+ <td>pyLoad Version:</td>
+ <td>0.4.9</td>
+ </tr>
+ <tr>
+ <td>Installationsordner:</td>
+ <td>C:\pyLoad</td>
+ </tr>
+ <tr>
+ <td>Konfigurationsordner:</td>
+ <td>C:\Users\Marvin\pyload</td>
+ </tr>
+ <tr>
+ <td>Downloadordner:</td>
+ <td>C:\Users\Marvin\new</td>
+ </tr>
+ <tr>
+ <td>HDD:</td>
+ <td>1.67 TiB <div class='progress progress-striped active'>
+ <div class='bar' style='width: 40%;'></div>
+</div></td>
+ </tr>
+ <tr>
+ <td>Sprache:</td>
+ <td>de</td>
+ </tr>
+ <tr>
+ <td>Webinterface Port:</td>
+ <td>8000</td>
+ </tr>
+ <tr>
+ <td>Remote Interface Port:</td>
+ <td>7227</td>
+ </tr>
+ </tbody>
+ </table>" title="Systeminformationen">System</a>
+
+ </div>
+ </div>
+ </div>
+
+ <script src="static/js/libs/jquery-1.9.0.js"></script>
+ {##}
+ <script src="static/js/libs/bootstrap-2.2.2.js"></script>
+ <script type="text/javascript">
+ $('#info').popover({
+ placement: 'left',
+ trigger: 'click',
+ html:'true',
+ });
+
+ $('.dropdown-toggle').dropdown();
+
+ $("#btn_newuser").click(function() {
+
+ str = "<div class='dropdown1'><span class='label name'>User</span><a class='dropdown-toggle' data-toggle='dropdown1' href='#'><i class='iconf-user iconf-8x'></i></a><ul class='dropdown-menu' role='menu' aria-labelledby='dropdownMenu'><li><a tabindex='-1' href='#'>Action</a></li><li><a tabindex='-1' href='#'>Another action</a></li><li><a tabindex='-1' href='#'>Something else here</a></li><li class='divider'></li><li><a tabindex='-1' href='#'>Separated link</a></li></ul></div>";
+
+ $("#userlist").append(str);
+
+ });
+
+ $("#useredit").click(function() {
+ $('#event-modal').modal();
+ });
+ $("#useredit_close").click(function() {
+ $('#event-modal').modal('hide');
+ });
+ $("#useredit_save").click(function() {
+ $('#event-modal').modal('hide');
+ });
+
+ </script>
+{% endblock %} \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dashboard/actionbar.html b/pyload/web/app/templates/default/dashboard/actionbar.html
new file mode 100644
index 000000000..a8b2ebecd
--- /dev/null
+++ b/pyload/web/app/templates/default/dashboard/actionbar.html
@@ -0,0 +1,54 @@
+<div class="span2 offset1">
+</div>
+<ul class="actionbar nav nav-pills span9">
+ <li>
+ <ul class="breadcrumb">
+ <li><a href="#">Local</a> <span class="divider">/</span></li>
+ <li class="active"></li>
+ </ul>
+ </li>
+
+ <li style="float: right;">
+ <form class="form-search" action="#">
+ <div class="input-append">
+ <input type="text" class="search-query" style="width: 120px">
+ <button type="submit" class="btn">Search</button>
+ </div>
+ </form>
+ </li>
+ <li style="float: right" class="li-check">
+ <a href="#"><i class="icon-check-empty btn-check"></i></a>
+ </li>
+ <li class="dropdown" style="float: right;">
+ <a class="dropdown-toggle type"
+ data-toggle="dropdown"
+ href="#">
+ Type
+ <b class="caret"></b>
+ </a>
+ <ul class="dropdown-menu">
+ <li><a class="filter-type" data-type="2" href="#"><i class="icon-ok"></i>&nbsp;Audio</a></li>
+ <li><a class="filter-type" data-type="4" href="#"><i class="icon-ok"></i>&nbsp;Image</a></li>
+ <li><a class="filter-type" data-type="8" href="#"><i class="icon-ok"></i>&nbsp;Video</a></li>
+ <li><a class="filter-type" data-type="16" href="#"><i class="icon-ok"></i>&nbsp;Document</a></li>
+ <li><a class="filter-type" data-type="32" href="#"><i class="icon-remove"></i>&nbsp;Archive</a></li>
+ <li><a class="filter-type" data-type="1" href="#"><i class="icon-remove"></i>&nbsp;Other</a></li>
+ </ul>
+ </li>
+ <li class="dropdown" style="float: right;">
+ <a class="dropdown-toggle"
+ data-toggle="dropdown"
+ href="#">
+ <span class="state">
+ All
+ </span>
+ <b class="caret"></b>
+ </a>
+ <ul class="dropdown-menu">
+ <li><a class="filter-state" data-state="0" href="#">All</a></li>
+ <li><a class="filter-state" data-state="1" href="#">Finished</a></li>
+ <li><a class="filter-state" data-state="2" href="#">Unfinished</a></li>
+ <li><a class="filter-state" data-state="3" href="#">Failed</a></li>
+ </ul>
+ </li>
+</ul> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dashboard/file.html b/pyload/web/app/templates/default/dashboard/file.html
new file mode 100644
index 000000000..4bf3c7a97
--- /dev/null
+++ b/pyload/web/app/templates/default/dashboard/file.html
@@ -0,0 +1,34 @@
+<div class="file-row first span6">
+ <i class="checkbox"></i>&nbsp;
+ <span class="name">
+ {{ name }}
+ </span>
+</div>
+<div class="file-row second span3 {{ fileClass this }}">
+ {{ fileStatus this }}
+</div>
+
+<div class="file-row third span3 pull-right">
+ <i class="{{ fileIcon media }}"></i>&nbsp;
+ {{ formatSize size }}
+ <span class="pull-right">
+ <img src="{{ pluginIcon download.plugin }}"/>
+ {{ download.plugin }}&nbsp;
+ <i class="icon-chevron-down" data-toggle="dropdown"></i>
+ <ul class="dropdown-menu" role="menu">
+ <li><a href="#" class="btn-delete"><i class="icon-trash"></i> Delete</a></li>
+ <li><a href="#" class="btn-restart"><i class="icon-refresh"></i> Restart</a></li>
+ <!--{# TODO: only show when finished #}-->
+ <li><a href="download/{{ fid }}" target="_blank" class="btn-dowload"><i class="icon-download"></i>
+ Download</a></li>
+ <li><a href="#" class="btn-share"><i class="icon-share"></i> Share</a></li>
+ <li class="divider"></li>
+ <li class="dropdown-submenu pull-left">
+ <a>Addons</a>
+ <ul class="dropdown-menu">
+ <li><a>Test</a></li>
+ </ul>
+ </li>
+ </ul>
+ </span>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dashboard/layout.html b/pyload/web/app/templates/default/dashboard/layout.html
new file mode 100644
index 000000000..cd84d3a26
--- /dev/null
+++ b/pyload/web/app/templates/default/dashboard/layout.html
@@ -0,0 +1,32 @@
+<div class="span3">
+ <div class="sidebar-header">
+ <i class="icon-hdd"></i> Local
+ <div class="pull-right" style="font-size: medium; line-height: normal">
+ <i class="icon-chevron-down" style="font-size: 20px"></i>
+ </div>
+ <div class="clearfix"></div>
+ </div>
+ <ul class="package-list">
+
+ </ul>
+ <div class="sidebar-header">
+ <i class="icon-group"></i> Shared
+ </div>
+ <ul class="package-list">
+ <li>Shared content</li>
+ <li>from other user</li>
+ </ul>
+ <div class="sidebar-header">
+ <i class="icon-sitemap"></i> Remote
+ </div>
+ <ul>
+ <li>Content from</li>
+ <li>remote sites or</li>
+ <li>other pyload instances</li>
+ </ul>
+</div>
+<div class="span9">
+ <ul class="file-list">
+
+ </ul>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dashboard/package.html b/pyload/web/app/templates/default/dashboard/package.html
new file mode 100644
index 000000000..0f2496046
--- /dev/null
+++ b/pyload/web/app/templates/default/dashboard/package.html
@@ -0,0 +1,50 @@
+{{#if selected }}
+ <i class="icon-check select"></i>
+ {{ else }}
+ <i class="icon-check-empty select"></i>
+ {{/if}}
+ <span class="package-name">
+ {{ name }}
+ </span>
+
+ <div class="package-frame">
+ <div class="tag-area">
+ <span class="badge badge-success"><i class="icon-tag"></i>video</span>
+ <span class="badge badge-success badge-ghost"><i class="icon-tag"></i> Add Tag</span>
+ </div>
+ <div class="package-indicator">
+ <i class="icon-plus-sign btn-move" data-toggle="tooltip" title="Move files here"></i>
+ <i class="icon-pause" data-toggle="tooltip" title="Pause Package"></i>
+ <i class="icon-refresh" data-toggle="tooltip" title="Restart Package"></i>
+ {{#if shared }}
+ <i class="icon-eye-open" data-toggle="tooltip" title="Package is public"></i>
+ {{ else }}
+ <i class="icon-eye-close" data-toggle="tooltip" title="Package is private"></i>
+ {{/if}}
+ <i class="icon-chevron-down" data-toggle="dropdown">
+ </i>
+ <ul class="dropdown-menu" role="menu">
+ <li><a href="#" class="btn-open"><i class="icon-folder-open-alt"></i> Open</a></li>
+ <li><a href="#"><i class="icon-plus-sign"></i> Add links</a></li>
+ <li><a href="#"><i class="icon-edit"></i> Details</a></li>
+ <li><a href="#" class="btn-delete"><i class="icon-trash"></i> Delete</a></li>
+ <li><a href="#" class="btn-recheck"><i class="icon-refresh"></i> Recheck</a></li>
+ <li class="divider"></li>
+ <li class="dropdown-submenu">
+ <a>Addons</a>
+ <ul class="dropdown-menu">
+ <li><a>Test</a></li>
+ </ul>
+ </li>
+ </ul>
+ </div>
+ <div class="progress">
+ <span style="position: absolute; left: 5px">
+ {{ stats.linksdone }} / {{ stats.linkstotal }}
+ </span>
+ <div class="bar bar-info" style="width: {{ percent }}%"></div>
+ <span style="position: absolute; right: 5px">
+ {{ formatSize stats.sizedone }} / {{ formatSize stats.sizetotal }}
+ </span>
+ </div>
+ </div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dashboard/select.html b/pyload/web/app/templates/default/dashboard/select.html
new file mode 100644
index 000000000..f4c696d11
--- /dev/null
+++ b/pyload/web/app/templates/default/dashboard/select.html
@@ -0,0 +1,11 @@
+<i class="icon-check" data-toggle="tooltip" title="Deselect"></i>&nbsp;
+{{#if packs }}{{ ngettext "1 package" "%d packages" packs }}{{/if}}
+{{#if files}}
+{{#if packs}}, {{/if}}
+{{ ngettext "1 file" "%d files" files }}
+{{/if }}
+selected
+&nbsp;|&nbsp;
+<i class="icon-pause" data-toggle="tooltip" title="Pause"></i>&nbsp;
+<i class="icon-trash" data-toggle="tooltip" title="Delete"></i>&nbsp;
+<i class="icon-refresh" data-toggle="tooltip" title="Restart"></i> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dialogs/addAccount.html b/pyload/web/app/templates/default/dialogs/addAccount.html
new file mode 100755
index 000000000..bdc8a609a
--- /dev/null
+++ b/pyload/web/app/templates/default/dialogs/addAccount.html
@@ -0,0 +1,42 @@
+<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+ <h3>Add an account</h3>
+</div>
+<div class="modal-body">
+ <form class="form-horizontal" autocomplete="off">
+ <legend>
+ Please enter your account data
+ </legend>
+ <div class="control-group">
+ <label class="control-label" for="pluginSelect">
+ Plugin
+ </label>
+
+ <div class="controls">
+ <input type="hidden" id="pluginSelect">
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="login">
+ Loginname
+ </label>
+
+ <div class="controls">
+ <input type="text" id="login">
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="password">
+ Password
+ </label>
+
+ <div class="controls">
+ <input type="password" id="password">
+ </div>
+ </div>
+ </form>
+</div>
+<div class="modal-footer">
+ <a class="btn btn-success btn-add">Add</a>
+ <a class="btn btn-close">Close</a>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dialogs/addPluginConfig.html b/pyload/web/app/templates/default/dialogs/addPluginConfig.html
new file mode 100755
index 000000000..e7a42a208
--- /dev/null
+++ b/pyload/web/app/templates/default/dialogs/addPluginConfig.html
@@ -0,0 +1,26 @@
+<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+ <h3>
+ Choose a plugin
+ </h3>
+</div>
+<div class="modal-body">
+ <form class="form-horizontal">
+ <legend>
+ Please choose a plugin, which you want to configure
+ </legend>
+ <div class="control-group">
+ <label class="control-label" for="pluginSelect">
+ Plugin
+ </label>
+
+ <div class="controls">
+ <input type="hidden" id="pluginSelect">
+ </div>
+ </div>
+ </form>
+</div>
+<div class="modal-footer">
+ <a class="btn btn-success btn-add">Add</a>
+ <a class="btn btn-close">Close</a>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dialogs/confirmDelete.html b/pyload/web/app/templates/default/dialogs/confirmDelete.html
new file mode 100644
index 000000000..65ae1cb21
--- /dev/null
+++ b/pyload/web/app/templates/default/dialogs/confirmDelete.html
@@ -0,0 +1,11 @@
+<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+ <h3>Please confirm</h3>
+</div>
+<div class="modal-body">
+ Do you want to delete the selected items?
+</div>
+<div class="modal-footer">
+ <a class="btn btn-danger btn-confirm"><i class="icon-trash icon-white"></i> Delete</a>
+ <a class="btn btn-close">Cancel</a>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dialogs/interactionTask.html b/pyload/web/app/templates/default/dialogs/interactionTask.html
new file mode 100755
index 000000000..a152a5046
--- /dev/null
+++ b/pyload/web/app/templates/default/dialogs/interactionTask.html
@@ -0,0 +1,37 @@
+<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+ <h3>
+ {{ title }}
+ <small style="background: url('{{ pluginIcon plugin }}') no-repeat right 0; background-size: 20px; padding-right: 22px">
+ {{ plugin }}
+ </small>
+ </h3>
+</div>
+<div class="modal-body">
+ <form class="form-horizontal" action="#">
+ <legend>{{ description }}</legend>
+ {{#if captcha }}
+ <div class="control-group">
+ <label class="control-label" for="captchaImage">
+ Captcha Image
+ </label>
+
+ <div class="controls">
+ <img id="captchaImage" src="data:image/{{ type }};base64,{{ captcha }}">
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="inputField">Captcha Text</label>
+
+ <div class="controls" id="inputField">
+ </div>
+ </div>
+ {{ else }}
+ {{ content }}
+ {{/if}}
+ </form>
+</div>
+<div class="modal-footer">
+ <a class="btn btn-success">Submit</a>
+ <a class="btn btn-close">Close</a>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dialogs/linkgrabber.html b/pyload/web/app/templates/default/dialogs/linkgrabber.html
new file mode 100755
index 000000000..08418cf03
--- /dev/null
+++ b/pyload/web/app/templates/default/dialogs/linkgrabber.html
@@ -0,0 +1,49 @@
+<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+ <h3>
+ AddPackage
+ <small>paste&add links to pyLoad</small>
+ </h3>
+</div>
+
+<div class="modal-body">
+ <div class="alert alert-error hidden">
+ Upload files container failed. Please try again.
+ </div>
+ <form class="form-horizontal">
+ <div class="control-group">
+ <label class="control-label" for="inputPackageName">Package name</label>
+
+ <div class="controls">
+ <input type="text" class="span4" id="inputPackageName" placeholder="Name of your package">
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="inputLinks">Links</label>
+
+ <div class="controls">
+ <textarea id="inputLinks" class="span4" rows="10" placeholder="Paste your links here..."></textarea>
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="inputPassword">Password</label>
+
+ <div class="controls">
+ <input type="text" id="inputPassword" class="span4" placeholder="Password for .rar files">
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="inputContainer">Upload links container</label>
+
+ <div class="controls controls-row">
+ <input type="text" id="inputContainer" class="span3" placeholder="Path to your container">
+ <button id="inputContainer-btn" class="btn span1" type="button">Browse&hellip;</button>
+ </div>
+ </div>
+ </form>
+</div>
+
+<div class="modal-footer">
+ <a class="btn btn-success"><i class="icon-plus icon-white"></i> Add</a>
+ <a class="btn btn-close">Close</a>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/dialogs/modal.html b/pyload/web/app/templates/default/dialogs/modal.html
new file mode 100755
index 000000000..1e44cc99c
--- /dev/null
+++ b/pyload/web/app/templates/default/dialogs/modal.html
@@ -0,0 +1,10 @@
+<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+ <h3>Dialog</h3>
+</div>
+<div class="modal-body">
+</div>
+<div class="modal-footer">
+ <a class="btn btn-close">Close</a>
+ <a class="btn btn-primary">Save</a>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/header/layout.html b/pyload/web/app/templates/default/header/layout.html
new file mode 100644
index 000000000..0fe61c4e3
--- /dev/null
+++ b/pyload/web/app/templates/default/header/layout.html
@@ -0,0 +1,61 @@
+<div class="span3">
+ <div class="logo"></div>
+ <span class="title visible-large-screen">pyLoad</span>
+</div>
+<div class="span4 offset1">
+ <div id="progress-area">
+ <span id="progress-info">
+ </span>
+ <div class="popover bottom">
+ <div class="arrow"></div>
+ <div class="popover-inner">
+ <h3 class="popover-title">
+ Running...
+ <button type="button" class="close" aria-hidden="true">&times;</button>
+ </h3>
+ <div class="popover-content">
+ <ul class="progress-list"></ul>
+ </div>
+ </div>
+ </div>
+ </div>
+</div>
+<div class="span4">
+ <div class="header-block">
+ <i class="icon-download-alt icon-white"></i> Max. Speed:<br>
+ <i class="icon-off icon-white"></i> Running:<br>
+ <i class="icon-refresh icon-white"></i> Reconnect:<br>
+ </div>
+
+ <div class="header-block status-block"></div>
+
+ <div class="header-btn">
+ <div class="btn-group">
+ <a class="btn btn-blue btn-small" href="#"><i class="icon-user icon-white"></i> User</a>
+ <a class="btn btn-blue btn-small dropdown-toggle" data-toggle="dropdown" href="#"><span
+ class="caret"></span></a>
+ <ul class="dropdown-menu" style="right: 0; left: -100%">
+ <li><a data-nav href="/"><i class="icon-list-alt"></i> Dashboard</a></li>
+ <li><a data-nav href="/settings"><i class="icon-wrench"></i> Settings</a></li>
+ <li><a data-nav href="/accounts"><i class="icon-key"></i> Accounts</a></li>
+ <li><a data-nav href="/admin"><i class="icon-cogs"></i> Admin</a></li>
+ <li class="divider"></li>
+ <li><a data-nav href="/logout"><i class="icon-signout"></i> Logout</a></li>
+ </ul>
+ </div>
+ <div class="btn-group lower">
+ <button class="btn btn-success btn-grabber btn-mini" href="#">
+ <i class="icon-plus icon-white"></i>
+ </button>
+ <button class="btn btn-blue btn-play btn-mini" href="#">
+ <i class="icon-play icon-white"></i>
+ </button>
+ <button class="btn btn-danger btn-delete btn-mini" href="#">
+ <i class="icon-remove icon-white"></i>
+ </button>
+ </div>
+ </div>
+<span class="visible-desktop speedgraph-container">
+ <div id="speedgraph"></div>
+</span>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/header/progress.html b/pyload/web/app/templates/default/header/progress.html
new file mode 100644
index 000000000..740e18a4c
--- /dev/null
+++ b/pyload/web/app/templates/default/header/progress.html
@@ -0,0 +1,10 @@
+{{ name }}
+<span class="pull-right">{{ plugin }}</span>
+
+<div class="progress">
+ <div class="bar" style="width: {{ percent }}%"></div>
+</div>
+
+<div class="progress-status">
+ <!-- rendered by progressInfo template -->
+</div>
diff --git a/pyload/web/app/templates/default/header/progressStatus.html b/pyload/web/app/templates/default/header/progressStatus.html
new file mode 100644
index 000000000..020ed2e96
--- /dev/null
+++ b/pyload/web/app/templates/default/header/progressStatus.html
@@ -0,0 +1,8 @@
+{{#if downloading }}
+ {{ formatSize done }} of {{ formatSize total }} ({{ formatSize download.speed }}/s)
+{{ else }}
+ {{ statusmsg }}
+{{/if}}
+<span class="pull-right">
+ {{ formatTime eta }}
+</span> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/header/progressSub.html b/pyload/web/app/templates/default/header/progressSub.html
new file mode 100644
index 000000000..3400ee011
--- /dev/null
+++ b/pyload/web/app/templates/default/header/progressSub.html
@@ -0,0 +1,6 @@
+{{#if linksqueue }}
+ {{ linksqueue }} downloads left ({{ formatSize sizequeue }})
+{{/if}}
+<span class="pull-right">
+ {{ formatTime etaqueue }}
+</span> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/header/progressSup.html b/pyload/web/app/templates/default/header/progressSup.html
new file mode 100644
index 000000000..f2c0ac734
--- /dev/null
+++ b/pyload/web/app/templates/default/header/progressSup.html
@@ -0,0 +1,10 @@
+{{#if single }}
+ {{ truncate name 32}} ({{ statusmsg }})
+{{ else }}
+ {{#if downloads }}
+ {{ downloads }} downloads running {{#if speed }}({{ formatSize speed }}/s){{/if}}
+ {{ else }}
+ No running tasks
+ {{/if}}
+{{/if}}
+<i class="icon-list pull-right"></i> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/header/progressbar.html b/pyload/web/app/templates/default/header/progressbar.html
new file mode 100644
index 000000000..2775e664b
--- /dev/null
+++ b/pyload/web/app/templates/default/header/progressbar.html
@@ -0,0 +1,16 @@
+
+<div class="sup">
+</div>
+
+<div class="progress" id="globalprogress">
+ {{#if single }}
+ <div class="bar" style="width: {{ percent }}%">
+ {{ else }}
+ <div class="bar {{#if downloads }}running{{/if}}">
+ {{/if}}
+ </div>
+ </div>
+</div>
+
+<div class="sub">
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/header/status.html b/pyload/web/app/templates/default/header/status.html
new file mode 100644
index 000000000..f840b6e33
--- /dev/null
+++ b/pyload/web/app/templates/default/header/status.html
@@ -0,0 +1,3 @@
+<span class="pull-right maxspeed">{{ formatSize maxspeed }}/s</span><br>
+<span class="pull-right running">{{ paused }}</span><br>
+<span class="pull-right reconnect">{{#if reconnect }}true{{ else }}false{{/if}}</span> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/login.html b/pyload/web/app/templates/default/login.html
new file mode 100644
index 000000000..9e8d9eeb6
--- /dev/null
+++ b/pyload/web/app/templates/default/login.html
@@ -0,0 +1,28 @@
+<br>
+<div class="login">
+ <form method="post" class="form-horizontal">
+ <legend>Login</legend>
+ <div class="control-group">
+ <label class="control-label" for="inputUser">Username</label>
+ <div class="controls">
+ <input type="text" id="inputUser" placeholder="Username" name="username">
+ </div>
+ </div>
+ <div class="control-group">
+ <label class="control-label" for="inputPassword">Password</label>
+ <div class="controls">
+ <input type="password" id="inputPassword" placeholder="Password" name="password">
+ </div>
+ </div>
+ <div class="control-group">
+ <div class="controls">
+ <label class="checkbox">
+ <input type="checkbox"> Remember me
+ </label>
+ <button type="submit" class="btn">Login</button>
+ </div>
+ </div>
+ </form>
+</div>
+<br>
+<!-- TODO: Errors -->
diff --git a/pyload/web/app/templates/default/notification.html b/pyload/web/app/templates/default/notification.html
new file mode 100644
index 000000000..1b6d21e27
--- /dev/null
+++ b/pyload/web/app/templates/default/notification.html
@@ -0,0 +1,10 @@
+{{#if queries }}
+ <span class="btn-query">
+ Queries <span class="badge badge-info">{{ queries }}</span>
+ </span>
+{{/if}}
+{{#if notifications }}
+ <span class="btn-notification">
+ Notifications <span class="badge badge-success">{{ notifications }}</span>
+ </span>
+{{/if}} \ No newline at end of file
diff --git a/pyload/web/app/templates/default/settings/actionbar.html b/pyload/web/app/templates/default/settings/actionbar.html
new file mode 100644
index 000000000..25b10d463
--- /dev/null
+++ b/pyload/web/app/templates/default/settings/actionbar.html
@@ -0,0 +1,5 @@
+<div class="span2 offset1">
+</div>
+<span class="span9">
+ <button class="btn btn-small btn-blue btn-add">Add Plugin</button>
+</span> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/settings/config.html b/pyload/web/app/templates/default/settings/config.html
new file mode 100644
index 000000000..47ff45f0b
--- /dev/null
+++ b/pyload/web/app/templates/default/settings/config.html
@@ -0,0 +1,17 @@
+<legend>
+ <div class="page-header">
+ <h1>{{ label }}
+ <small>{{ description }}</small>
+ {{#if long_description }}
+ <a class="btn btn-small" data-title="Help" data-content="{{ long_description }}"><i
+ class="icon-question-sign"></i></a>
+ {{/if}}
+ </h1>
+ </div>
+</legend>
+<div class="control-content">
+</div>
+<div class="form-actions">
+ <button type="button" class="btn btn-primary">Save changes</button>
+ <button type="button" class="btn btn-reset">Reset</button>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/settings/configItem.html b/pyload/web/app/templates/default/settings/configItem.html
new file mode 100644
index 000000000..5b583b8df
--- /dev/null
+++ b/pyload/web/app/templates/default/settings/configItem.html
@@ -0,0 +1,7 @@
+ <div class="control-group">
+ <label class="control-label">{{ label }}</label>
+
+ <div class="controls">
+ <!--{# <span class="help-inline">{{ description }}</span>#}-->
+ </div>
+ </div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/settings/layout.html b/pyload/web/app/templates/default/settings/layout.html
new file mode 100644
index 000000000..39f1a2ec9
--- /dev/null
+++ b/pyload/web/app/templates/default/settings/layout.html
@@ -0,0 +1,11 @@
+<div class="span2">
+ <ul class="nav nav-list well settings-menu">
+ </ul>
+</div>
+<div class="span10">
+ <div class="well setting-box">
+ <form class="form-horizontal" action="#">
+ <h1>Please choose a config section</h1>
+ </form>
+ </div>
+</div> \ No newline at end of file
diff --git a/pyload/web/app/templates/default/settings/menu.html b/pyload/web/app/templates/default/settings/menu.html
new file mode 100644
index 000000000..893fd7b5b
--- /dev/null
+++ b/pyload/web/app/templates/default/settings/menu.html
@@ -0,0 +1,40 @@
+{{#if core}}
+<li class="nav-header"><i class="icon-globe icon-white"></i> General</li>
+{{#each core}}
+<li data-name="{{ name }}"><a href="#">{{ label }}</a></li>
+{{/each}}
+{{/if}}
+<li class="divider"></li>
+<li class="nav-header"><i class="icon-th-large icon-white"></i> Addons</li>
+{{#each addon }}
+<li class="addon" data-name="{{ name }}">
+ <a href="#" style="background-image: url({{ pluginIcon name }});">
+ {{ label }}
+ <i class="icon-remove pull-right"></i>
+ {{#if activated }}
+ <div class="addon-on">
+ active
+ {{else}}
+ <div class="addon-off">
+ inactive
+ {{/if}}
+ {{#if user_context }}
+ <!--{# TODO: tooltip #}-->
+ <i class="icon-user pull-right"></i>
+ {{else}}
+ <i class="icon-globe pull-right"></i>
+ {{/if}}
+ </div>
+ </a>
+</li>
+{{/each}}
+<li class="divider"></li>
+<li class="nav-header"><i class="icon-th-list icon-white"></i> Plugin Configs</li>
+{{#each plugin }}
+<li class="plugin" data-name="{{ name }}">
+ <a style="background-image: url({{ pluginIcon name }});">
+ {{ label }}
+ <i class="icon-remove pull-right"></i>
+ </a>
+</li>
+{{/each}} \ No newline at end of file
diff --git a/pyload/web/app/templates/default/setup.html b/pyload/web/app/templates/default/setup.html
new file mode 100644
index 000000000..e5c9f4b8c
--- /dev/null
+++ b/pyload/web/app/templates/default/setup.html
@@ -0,0 +1,16 @@
+{% extends 'default/base.html' %}
+{% block title %}
+ {{_("Setup")}} - {{ super()}}
+{% endblock %}
+
+{% block content %}
+ <div class="hero-unit">
+ <h1>You did it!</h1>
+ <p>pyLoad is running and ready for configuration.</p>
+ <p>
+ <a class="btn btn-primary btn-large">
+ Go on
+ </a>
+ </p>
+ </div>
+{% endblock %} \ No newline at end of file
diff --git a/pyload/web/app/unavailable.html b/pyload/web/app/unavailable.html
new file mode 100644
index 000000000..6706a693c
--- /dev/null
+++ b/pyload/web/app/unavailable.html
@@ -0,0 +1,18 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>WebUI not available</title>
+</head>
+<body>
+
+<h1>WebUI not available</h1>
+You are running a pyLoad version without prebuilt webUI. You can download a build from our website or deactivate the dev mode.
+If desired you can build it yourself by running:
+<ul>
+ <li>npm install</li>
+ <li>bower install</li>
+ <li>grunt build</li>
+</ul>
+
+</body>
+</html> \ No newline at end of file
diff --git a/pyload/web/bower.json b/pyload/web/bower.json
new file mode 100644
index 000000000..dfabc05d6
--- /dev/null
+++ b/pyload/web/bower.json
@@ -0,0 +1,22 @@
+{
+ "name": "pyload",
+ "version": "0.1.0",
+ "dependencies": {
+ "pyload-common": "https://github.com/pyload/pyload-common.git",
+ "requirejs": "~2.1.6",
+ "requirejs-text": "*",
+ "require-handlebars-plugin": "*",
+ "jquery": "~1.9.1",
+ "jquery.transit": "~0.9.9",
+ "jquery.cookie": "~1.3.1",
+ "jquery.animate-enhanced": "*",
+ "flot": "~0.8.1",
+ "underscore": "~1.4.4",
+ "backbone": "~1.0.0",
+ "backbone.marionette": "~1.0.3",
+ "handlebars.js": "1.0.0-rc.3",
+ "jed": "~0.5.4",
+ "select2": "~3.4.0"
+ },
+ "devDependencies": {}
+}
diff --git a/pyload/web/cnl_app.py b/pyload/web/cnl_app.py
new file mode 100644
index 000000000..90aa76d72
--- /dev/null
+++ b/pyload/web/cnl_app.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from os.path import join
+import re
+from urllib import unquote
+from base64 import standard_b64decode
+from binascii import unhexlify
+
+from pyload.utils.fs import save_filename
+
+from bottle import route, request, HTTPError
+from webinterface import PYLOAD, DL_ROOT, JS
+
+try:
+ from Crypto.Cipher import AES
+except:
+ pass
+
+
+def local_check(function):
+ def _view(*args, **kwargs):
+ if request.environ.get('REMOTE_ADDR', "0") in ('127.0.0.1', 'localhost') \
+ or request.environ.get('HTTP_HOST','0') in ('127.0.0.1:9666', 'localhost:9666'):
+ return function(*args, **kwargs)
+ else:
+ return HTTPError(403, "Forbidden")
+
+ return _view
+
+
+@route("/flash")
+@route("/flash/:id")
+@route("/flash", method="POST")
+@local_check
+def flash(id="0"):
+ return "JDownloader\r\n"
+
+@route("/flash/add", method="POST")
+@local_check
+def add(request):
+ package = request.POST.get('referer', None)
+ urls = filter(lambda x: x != "", request.POST['urls'].split("\n"))
+
+ if package:
+ PYLOAD.addPackage(package, urls, 0)
+ else:
+ PYLOAD.generateAndAddPackages(urls, 0)
+
+ return ""
+
+@route("/flash/addcrypted", method="POST")
+@local_check
+def addcrypted():
+
+ package = request.forms.get('referer', 'ClickAndLoad Package')
+ dlc = request.forms['crypted'].replace(" ", "+")
+
+ dlc_path = join(DL_ROOT, save_filename(package) + ".dlc")
+ dlc_file = open(dlc_path, "wb")
+ dlc_file.write(dlc)
+ dlc_file.close()
+
+ try:
+ PYLOAD.addPackage(package, [dlc_path], 0)
+ except:
+ return HTTPError()
+ else:
+ return "success\r\n"
+
+@route("/flash/addcrypted2", method="POST")
+@local_check
+def addcrypted2():
+
+ package = request.forms.get("source", None)
+ crypted = request.forms["crypted"]
+ jk = request.forms["jk"]
+
+ crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
+ if JS:
+ jk = "%s f()" % jk
+ jk = JS.eval(jk)
+
+ else:
+ try:
+ jk = re.findall(r"return ('|\")(.+)('|\")", jk)[0][1]
+ except:
+ ## Test for some known js functions to decode
+ if jk.find("dec") > -1 and jk.find("org") > -1:
+ org = re.findall(r"var org = ('|\")([^\"']+)", jk)[0][1]
+ jk = list(org)
+ jk.reverse()
+ jk = "".join(jk)
+ else:
+ print "Could not decrypt key, please install py-spidermonkey or ossp-js"
+
+ try:
+ Key = unhexlify(jk)
+ except:
+ print "Could not decrypt key, please install py-spidermonkey or ossp-js"
+ return "failed"
+
+ IV = Key
+
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ result = obj.decrypt(crypted).replace("\x00", "").replace("\r","").split("\n")
+
+ result = filter(lambda x: x != "", result)
+
+ try:
+ if package:
+ PYLOAD.addPackage(package, result, 0)
+ else:
+ PYLOAD.generateAndAddPackages(result, 0)
+ except:
+ return "failed can't add"
+ else:
+ return "success\r\n"
+
+@route("/flashgot_pyload")
+@route("/flashgot_pyload", method="POST")
+@route("/flashgot")
+@route("/flashgot", method="POST")
+@local_check
+def flashgot():
+ if request.environ['HTTP_REFERER'] != "http://localhost:9666/flashgot" and request.environ['HTTP_REFERER'] != "http://127.0.0.1:9666/flashgot":
+ return HTTPError()
+
+ autostart = int(request.forms.get('autostart', 0))
+ package = request.forms.get('package', None)
+ urls = filter(lambda x: x != "", request.forms['urls'].split("\n"))
+ folder = request.forms.get('dir', None)
+
+ if package:
+ PYLOAD.addPackage(package, urls, autostart)
+ else:
+ PYLOAD.generateAndAddPackages(urls, autostart)
+
+ return ""
+
+@route("/crossdomain.xml")
+@local_check
+def crossdomain():
+ rep = "<?xml version=\"1.0\"?>\n"
+ rep += "<!DOCTYPE cross-domain-policy SYSTEM \"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\">\n"
+ rep += "<cross-domain-policy>\n"
+ rep += "<allow-access-from domain=\"*\" />\n"
+ rep += "</cross-domain-policy>"
+ return rep
+
+
+@route("/flash/checkSupportForUrl")
+@local_check
+def checksupport():
+
+ url = request.GET.get("url")
+ res = PYLOAD.checkURLs([url])
+ supported = (not res[0][1] is None)
+
+ return str(supported).lower()
+
+@route("/jdcheck.js")
+@local_check
+def jdcheck():
+ rep = "jdownloader=true;\n"
+ rep += "var version='9.581;'"
+ return rep
diff --git a/pyload/web/middlewares.py b/pyload/web/middlewares.py
new file mode 100644
index 000000000..ae0911cc3
--- /dev/null
+++ b/pyload/web/middlewares.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# gzip is optional on some platform
+try:
+ import gzip
+except ImportError:
+ gzip = None
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+class StripPathMiddleware(object):
+ def __init__(self, app):
+ self.app = app
+
+ def __call__(self, e, h):
+ e['PATH_INFO'] = e['PATH_INFO'].rstrip('/')
+ return self.app(e, h)
+
+
+class PrefixMiddleware(object):
+ def __init__(self, app, prefix="/pyload"):
+ self.app = app
+ self.prefix = prefix
+
+ def __call__(self, e, h):
+ path = e["PATH_INFO"]
+ if path.startswith(self.prefix):
+ e['PATH_INFO'] = path.replace(self.prefix, "", 1)
+ return self.app(e, h)
+
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+# WSGI middleware
+# Gzip-encodes the response.
+
+class GZipMiddleWare(object):
+
+ def __init__(self, application, compress_level=6):
+ self.application = application
+ self.compress_level = int(compress_level)
+
+ def __call__(self, environ, start_response):
+ if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', ''):
+ # nothing for us to do, so this middleware will
+ # be a no-op:
+ return self.application(environ, start_response)
+ response = GzipResponse(start_response, self.compress_level)
+ app_iter = self.application(environ,
+ response.gzip_start_response)
+ if app_iter is not None:
+ response.finish_response(app_iter)
+
+ return response.write()
+
+def header_value(headers, key):
+ for header, value in headers:
+ if key.lower() == header.lower():
+ return value
+
+def update_header(headers, key, value):
+ remove_header(headers, key)
+ headers.append((key, value))
+
+def remove_header(headers, key):
+ for header, value in headers:
+ if key.lower() == header.lower():
+ headers.remove((header, value))
+ break
+
+class GzipResponse(object):
+
+ def __init__(self, start_response, compress_level):
+ self.start_response = start_response
+ self.compress_level = compress_level
+ self.buffer = StringIO()
+ self.compressible = False
+ self.content_length = None
+ self.headers = ()
+
+ def gzip_start_response(self, status, headers, exc_info=None):
+ self.headers = headers
+ ct = header_value(headers,'content-type')
+ ce = header_value(headers,'content-encoding')
+ cl = header_value(headers, 'content-length')
+
+ # don't compress on unknown size, it may be too huge
+ cl = int(cl) if cl else 0
+
+ if ce:
+ self.compressible = False
+ elif gzip is not None and ct and (ct.startswith('text/') or ct.startswith('application/')) \
+ and 'zip' not in ct and 200 < cl < 1024*1024:
+ self.compressible = True
+ headers.append(('content-encoding', 'gzip'))
+ headers.append(('vary', 'Accept-Encoding'))
+
+ remove_header(headers, 'content-length')
+ self.headers = headers
+ self.status = status
+ return self.buffer.write
+
+ def write(self):
+ out = self.buffer
+ out.seek(0)
+ s = out.getvalue()
+ out.close()
+ return [s]
+
+ def finish_response(self, app_iter):
+ if self.compressible:
+ output = gzip.GzipFile(mode='wb', compresslevel=self.compress_level,
+ fileobj=self.buffer)
+ else:
+ output = self.buffer
+ try:
+ for s in app_iter:
+ output.write(s)
+ if self.compressible:
+ output.close()
+ finally:
+ if hasattr(app_iter, 'close'):
+ try:
+ app_iter.close()
+ except :
+ pass
+
+ content_length = self.buffer.tell()
+ update_header(self.headers, "Content-Length" , str(content_length))
+ self.start_response(self.status, self.headers) \ No newline at end of file
diff --git a/pyload/web/package.json b/pyload/web/package.json
new file mode 100644
index 000000000..fdd7b62c4
--- /dev/null
+++ b/pyload/web/package.json
@@ -0,0 +1,36 @@
+{
+ "name": "pyload",
+ "version": "0.1.0",
+ "repository": {
+ "type": "git",
+ "url": "git://github.com/pyload/pyload.git"
+ },
+ "dependencies": {},
+ "devDependencies": {
+ "grunt": "~0.4.1",
+ "grunt-contrib-copy": "~0.4.1",
+ "grunt-contrib-concat": "~0.1.3",
+ "grunt-contrib-uglify": "~0.2.2",
+ "grunt-contrib-jshint": "~0.4.1",
+ "grunt-contrib-less": "~0.5.2",
+ "grunt-contrib-cssmin": "~0.6.0",
+ "grunt-contrib-connect": "~0.2.0",
+ "grunt-contrib-clean": "~0.4.0",
+ "grunt-contrib-htmlmin": "~0.1.3",
+ "grunt-contrib-requirejs": "~0.4.0",
+ "grunt-contrib-imagemin": "~0.1.3",
+ "grunt-contrib-watch": "~0.4.0",
+ "grunt-rev": "~0.1.0",
+ "grunt-usemin": "~0.1.10",
+ "grunt-mocha": "~0.3.0",
+ "grunt-open": "~0.2.0",
+ "grunt-svgmin": "~0.1.0",
+ "grunt-concurrent": "~0.1.0",
+ "matchdep": "~0.1.1",
+ "rjs-build-analysis": "0.0.3",
+ "connect-livereload": "~0.2.0"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+}
diff --git a/pyload/web/pyload_app.py b/pyload/web/pyload_app.py
new file mode 100644
index 000000000..7202c319b
--- /dev/null
+++ b/pyload/web/pyload_app.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+import time
+from os.path import join
+
+from bottle import route, static_file, response, redirect, template
+
+from webinterface import PYLOAD, PROJECT_DIR, SETUP, APP_PATH, UNAVAILALBE
+
+from utils import login_required
+
+
+@route('/icons/<path:path>')
+def serve_icon(path):
+ # TODO
+ return redirect('/images/icon.png')
+ # return static_file(path, root=join("tmp", "icons"))
+
+
+@route("/download/:fid")
+@login_required('Download')
+def download(fid, api):
+ path, name = api.getFilePath(fid)
+ return static_file(name, path, download=True)
+
+
+@route('/')
+def index():
+ if UNAVAILALBE:
+ return server_static("unavailable.html")
+
+ if SETUP:
+ # TODO show different page
+ pass
+
+ resp = server_static('index.html')
+
+ # Render variables into the html page
+ if resp.status_code == 200:
+ content = resp.body.read()
+ resp.body = template(content, ws=PYLOAD.getWSAddress(), web=PYLOAD.getConfigValue('webinterface', 'port'))
+
+ return resp
+
+# Very last route that is registered, could match all uris
+@route('/<path:path>')
+def server_static(path):
+ response.headers['Expires'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ time.gmtime(time.time() + 60 * 60 * 24 * 7))
+ response.headers['Cache-control'] = "public"
+ resp = static_file(path, root=join(PROJECT_DIR, APP_PATH))
+ # Also serve from .tmp folder in dev mode
+ if resp.status_code == 404 and APP_PATH == "app":
+ return static_file(path, root=join(PROJECT_DIR, '.tmp'))
+
+ return resp \ No newline at end of file
diff --git a/pyload/web/servers.py b/pyload/web/servers.py
new file mode 100644
index 000000000..a3c51e36b
--- /dev/null
+++ b/pyload/web/servers.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from bottle import ServerAdapter as BaseAdapter
+
+class ServerAdapter(BaseAdapter):
+ SSL = False
+ NAME = ""
+
+ def __init__(self, host, port, key, cert, connections, debug, **kwargs):
+ BaseAdapter.__init__(self, host, port, **kwargs)
+ self.key = key
+ self.cert = cert
+ self.connection = connections
+ self.debug = debug
+
+ @classmethod
+ def find(cls):
+ """ Check if server is available by trying to import it
+
+ :raises Exception: importing C dependant library could also fail with other reasons
+ :return: True on success
+ """
+ try:
+ __import__(cls.NAME)
+ return True
+ except ImportError:
+ return False
+
+ def run(self, handler):
+ raise NotImplementedError
+
+
+class CherryPyWSGI(ServerAdapter):
+ SSL = True
+ NAME = "threaded"
+
+ @classmethod
+ def find(cls):
+ return True
+
+ def run(self, handler):
+ from wsgiserver import CherryPyWSGIServer
+
+ if self.cert and self.key:
+ CherryPyWSGIServer.ssl_certificate = self.cert
+ CherryPyWSGIServer.ssl_private_key = self.key
+
+ server = CherryPyWSGIServer((self.host, self.port), handler, numthreads=self.connection)
+ server.start()
+
+
+class FapwsServer(ServerAdapter):
+ """ Does not work very good currently """
+
+ NAME = "fapws"
+
+ def run(self, handler): # pragma: no cover
+ import fapws._evwsgi as evwsgi
+ from fapws import base, config
+
+ port = self.port
+ if float(config.SERVER_IDENT[-2:]) > 0.4:
+ # fapws3 silently changed its API in 0.5
+ port = str(port)
+ evwsgi.start(self.host, port)
+ evwsgi.set_base_module(base)
+
+ def app(environ, start_response):
+ environ['wsgi.multiprocess'] = False
+ return handler(environ, start_response)
+
+ evwsgi.wsgi_cb(('', app))
+ evwsgi.run()
+
+
+# TODO: ssl
+class MeinheldServer(ServerAdapter):
+ SSL = True
+ NAME = "meinheld"
+
+ def run(self, handler):
+ from meinheld import server
+
+ if self.quiet:
+ server.set_access_logger(None)
+ server.set_error_logger(None)
+
+ server.listen((self.host, self.port))
+ server.run(handler)
+
+# todo:ssl
+class TornadoServer(ServerAdapter):
+ """ The super hyped asynchronous server by facebook. Untested. """
+
+ SSL = True
+ NAME = "tornado"
+
+ def run(self, handler): # pragma: no cover
+ import tornado.wsgi, tornado.httpserver, tornado.ioloop
+
+ container = tornado.wsgi.WSGIContainer(handler)
+ server = tornado.httpserver.HTTPServer(container)
+ server.listen(port=self.port)
+ tornado.ioloop.IOLoop.instance().start()
+
+
+class BjoernServer(ServerAdapter):
+ """ Fast server written in C: https://github.com/jonashaag/bjoern """
+
+ NAME = "bjoern"
+
+ def run(self, handler):
+ from bjoern import run
+
+ run(handler, self.host, self.port)
+
+
+# todo: ssl
+class EventletServer(ServerAdapter):
+
+ SSL = True
+ NAME = "eventlet"
+
+ def run(self, handler):
+ from eventlet import wsgi, listen
+
+ try:
+ wsgi.server(listen((self.host, self.port)), handler,
+ log_output=(not self.quiet))
+ except TypeError:
+ # Needed to ignore the log
+ class NoopLog:
+ def write(self, *args):
+ pass
+
+ # Fallback, if we have old version of eventlet
+ wsgi.server(listen((self.host, self.port)), handler, log=NoopLog())
+
+
+class FlupFCGIServer(ServerAdapter):
+
+ SSL = False
+ NAME = "flup"
+
+ def run(self, handler): # pragma: no cover
+ import flup.server.fcgi
+ from flup.server.threadedserver import ThreadedServer
+
+ def noop(*args, **kwargs):
+ pass
+
+ # Monkey patch signal handler, it does not work from threads
+ ThreadedServer._installSignalHandlers = noop
+
+ self.options.setdefault('bindAddress', (self.host, self.port))
+ flup.server.fcgi.WSGIServer(handler, **self.options).run()
+
+# Order is important and gives every server precedence over others!
+all_server = [BjoernServer, TornadoServer, EventletServer, CherryPyWSGI]
+# Some are deactivated because they have some flaws
+##all_server = [FapwsServer, MeinheldServer, BjoernServer, TornadoServer, EventletServer, CherryPyWSGI] \ No newline at end of file
diff --git a/pyload/web/setup_app.py b/pyload/web/setup_app.py
new file mode 100644
index 000000000..cd44ad08e
--- /dev/null
+++ b/pyload/web/setup_app.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from bottle import route, request, response, HTTPError, redirect
+
+from webinterface import PROJECT_DIR, SETUP
+
+def setup_required(func):
+ def _view(*args, **kwargs):
+ # setup needs to be running
+ if SETUP is None:
+ redirect("/nopermission")
+
+ return func(*args, **kwargs)
+ return _view
+
+
+@route("/setup")
+@setup_required
+def setup():
+ pass # TODO
diff --git a/pyload/web/utils.py b/pyload/web/utils.py
new file mode 100644
index 000000000..dae987f84
--- /dev/null
+++ b/pyload/web/utils.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from bottle import request, HTTPError, redirect
+
+from webinterface import PYLOAD, SETUP
+
+
+def set_session(request, user):
+ s = request.environ.get('beaker.session')
+ s["uid"] = user.uid
+ s.save()
+ return s
+
+
+def get_user_api(s):
+ if s:
+ uid = s.get("uid", None)
+ if (uid is not None) and (PYLOAD is not None):
+ return PYLOAD.withUserContext(uid)
+ return None
+
+
+def is_mobile():
+ if request.get_cookie("mobile"):
+ if request.get_cookie("mobile") == "True":
+ return True
+ else:
+ return False
+ mobile_ua = request.headers.get('User-Agent', '').lower()
+ if mobile_ua.find('opera mini') > 0:
+ return True
+ if mobile_ua.find('windows') > 0:
+ return False
+ if request.headers.get('Accept', '').lower().find('application/vnd.wap.xhtml+xml') > 0:
+ return True
+ if re.search('(up.browser|up.link|mmp|symbian|smartphone|midp|wap|phone|android)', mobile_ua) is not None:
+ return True
+ mobile_ua = mobile_ua[:4]
+ mobile_agents = ['w3c ', 'acs-', 'alav', 'alca', 'amoi', 'audi', 'avan', 'benq', 'bird', 'blac', 'blaz', 'brew',
+ 'cell', 'cldc', 'cmd-',
+ 'dang', 'doco', 'eric', 'hipt', 'inno', 'ipaq', 'java', 'jigs', 'kddi', 'keji', 'leno', 'lg-c',
+ 'lg-d', 'lg-g', 'lge-',
+ 'maui', 'maxo', 'midp', 'mits', 'mmef', 'mobi', 'mot-', 'moto', 'mwbp', 'nec-', 'newt', 'noki',
+ 'palm', 'pana', 'pant',
+ 'phil', 'play', 'port', 'prox', 'qwap', 'sage', 'sams', 'sany', 'sch-', 'sec-', 'send', 'seri',
+ 'sgh-', 'shar', 'sie-',
+ 'siem', 'smal', 'smar', 'sony', 'sph-', 'symb', 't-mo', 'teli', 'tim-', 'tosh', 'tsm-', 'upg1',
+ 'upsi', 'vk-v', 'voda',
+ 'wap-', 'wapa', 'wapi', 'wapp', 'wapr', 'webc', 'winw', 'winw', 'xda ', 'xda-']
+ if mobile_ua in mobile_agents:
+ return True
+ return False
+
+
+def login_required(perm=None):
+ def _dec(func):
+ def _view(*args, **kwargs):
+
+ # In case of setup, no login methods can be accessed
+ if SETUP is not None:
+ redirect("/setup")
+
+ s = request.environ.get('beaker.session')
+ api = get_user_api(s)
+ if api is not None:
+ if perm:
+ if api.user.hasPermission(perm):
+ if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
+ return HTTPError(403, "Forbidden")
+ else:
+ return redirect("/nopermission")
+
+ kwargs["api"] = api
+ return func(*args, **kwargs)
+ else:
+ if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
+ return HTTPError(403, "Forbidden")
+ else:
+ return redirect("/login")
+
+ return _view
+
+ return _dec
diff --git a/pyload/web/webinterface.py b/pyload/web/webinterface.py
new file mode 100644
index 000000000..206603f27
--- /dev/null
+++ b/pyload/web/webinterface.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###############################################################################
+# Copyright(c) 2008-2013 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import sys
+
+from os.path import join, abspath, dirname, exists
+
+PROJECT_DIR = abspath(dirname(__file__))
+PYLOAD_DIR = abspath(join(PROJECT_DIR, "..", ".."))
+
+import bottle
+from bottle import run, app
+
+from middlewares import StripPathMiddleware, GZipMiddleWare, PrefixMiddleware
+
+SETUP = None
+PYLOAD = None
+
+import ServerThread
+
+if not ServerThread.core:
+ if ServerThread.setup:
+ SETUP = ServerThread.setup
+ config = SETUP.config
+ else:
+ raise Exception("Could not access pyLoad Core")
+else:
+ PYLOAD = ServerThread.core.api
+ config = ServerThread.core.config
+
+from pyload.utils.JsEngine import JsEngine
+JS = JsEngine()
+
+TEMPLATE = config.get('webinterface', 'template')
+DL_ROOT = config.get('general', 'download_folder')
+PREFIX = config.get('webinterface', 'prefix')
+
+if PREFIX:
+ PREFIX = PREFIX.rstrip("/")
+ if PREFIX and not PREFIX.startswith("/"):
+ PREFIX = "/" + PREFIX
+
+APP_PATH = "dist"
+UNAVAILALBE = False
+
+# webUI build is available
+if exists(join(PROJECT_DIR, "app", "components")) and exists(join(PROJECT_DIR, ".tmp")) and config.get('webinterface', 'develop'):
+ APP_PATH = "app"
+elif not exists(join(PROJECT_DIR, "dist", "index.html")):
+ UNAVAILALBE = True
+
+DEBUG = config.get("general", "debug_mode") or "-d" in sys.argv or "--debug" in sys.argv
+bottle.debug(DEBUG)
+
+
+# Middlewares
+from beaker.middleware import SessionMiddleware
+
+session_opts = {
+ 'session.type': 'file',
+ 'session.cookie_expires': False,
+ 'session.data_dir': './tmp',
+ 'session.auto': False
+}
+
+session = SessionMiddleware(app(), session_opts)
+web = StripPathMiddleware(session)
+web = GZipMiddleWare(web)
+
+if PREFIX:
+ web = PrefixMiddleware(web, prefix=PREFIX)
+
+import api_app
+import cnl_app
+import setup_app
+# Last routes to register,
+import pyload_app
+
+# Server Adapter
+def run_server(host, port, server):
+ run(app=web, host=host, port=port, quiet=True, server=server)
+
+
+if __name__ == "__main__":
+ run(app=web, port=8001)