summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/plugins/accounts/ShareonlineBiz.py5
-rw-r--r--module/plugins/crypter/LinkCryptWs.py36
-rw-r--r--module/plugins/crypter/ShareLinksBiz.py9
-rw-r--r--module/plugins/hooks/WarezWorld.py277
-rw-r--r--module/plugins/internal/Account.py2
5 files changed, 298 insertions, 31 deletions
diff --git a/module/plugins/accounts/ShareonlineBiz.py b/module/plugins/accounts/ShareonlineBiz.py
index 24b7e98ca..47f6043dc 100644
--- a/module/plugins/accounts/ShareonlineBiz.py
+++ b/module/plugins/accounts/ShareonlineBiz.py
@@ -9,7 +9,7 @@ from module.plugins.internal.utils import set_cookie
class ShareonlineBiz(Account):
__name__ = "ShareonlineBiz"
__type__ = "account"
- __version__ = "0.42"
+ __version__ = "0.43"
__status__ = "testing"
__description__ = """Share-online.biz account plugin"""
@@ -32,9 +32,6 @@ class ShareonlineBiz(Account):
if not 'a' in api:
self.fail_login(res.strip('*'))
- if api['a'].lower() == "not_available":
- self.fail_login(_("No info available"))
-
return api
diff --git a/module/plugins/crypter/LinkCryptWs.py b/module/plugins/crypter/LinkCryptWs.py
index d849f08c2..9d421ad03 100644
--- a/module/plugins/crypter/LinkCryptWs.py
+++ b/module/plugins/crypter/LinkCryptWs.py
@@ -14,7 +14,7 @@ from module.plugins.internal.utils import html_unescape
class LinkCryptWs(Crypter):
__name__ = "LinkCryptWs"
__type__ = "crypter"
- __version__ = "0.14"
+ __version__ = "0.15"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?linkcrypt\.ws/(dir|container)/(?P<ID>\w+)'
@@ -24,7 +24,8 @@ class LinkCryptWs(Crypter):
__license__ = "GPLv3"
__authors__ = [("kagenoshin", "kagenoshin[AT]gmx[DOT]ch"),
("glukgluk", None),
- ("Gummibaer", None)]
+ ("Gummibaer", None),
+ ("Arno-Nymous", None)]
CRYPTED_KEY = "crypted"
@@ -107,7 +108,7 @@ class LinkCryptWs(Crypter):
def is_captcha_protected(self):
- if 'id="captcha">' in self.data:
+ if ('Linkcrypt.ws // Captx' in self.data) or ('Linkcrypt.ws // TextX' in self.data):
self.log_debug("Links are captcha protected")
return True
else:
@@ -132,7 +133,7 @@ class LinkCryptWs(Crypter):
def unlock_captcha_protection(self):
- captcha_url = re.search(r'<form.*?id\s*?=\s*?"captcha"[^>]*?>.*?<\s*?input.*?src="(.+?)"', self.data, re.I | re.S).group(1)
+ captcha_url = 'http://linkcrypt.ws/captx.html?secid=id=&id='
captcha_code = self.captcha.decrypt(captcha_url, input_type="gif", output_type='positional')
self.data = self.load(self.pyfile.url, post={'x': captcha_code[0], 'y': captcha_code[1]})
@@ -148,15 +149,9 @@ class LinkCryptWs(Crypter):
def getunrarpw(self):
- sitein = self.data
- indexi = sitein.find("|source|") + 8
- indexe = sitein.find("|", indexi)
-
- unrarpw = sitein[indexi:indexe]
-
- if unrarpw not in ("Password", "Dateipasswort"):
- self.log_debug("File password set to: [%s]"% unrarpw)
- self.pyfile.package().password = unrarpw
+ # Skip password parsing, since the method was not reliable due to the scrambling of the form data.
+ # That way one could not predict the exact position of the password at a certain index.
+ return
def handle_errors(self):
@@ -198,8 +193,8 @@ class LinkCryptWs(Crypter):
try:
res = self.load("http://linkcrypt.ws/out.html", post = {'file':weblink_id})
- indexs = res.find("window.location =") + 19
- indexe = res.find('"', indexs)
+ indexs = res.find("var url = ") + 11
+ indexe = res.find("'", indexs)
link2 = res[indexs:indexe]
@@ -215,7 +210,7 @@ class LinkCryptWs(Crypter):
def get_container_html(self):
self.container_html = []
- script = re.search(r'<div.*?id="ad_cont".*?<script.*?javascrip[^>]*?>(.*?)</script', self.data, re.I | re.S)
+ script = re.search(r'<script.*?javascript[^>]*?>.*(eval.*?)\s*eval.*</script>.*<div class="clearfix', self.data, re.I | re.S)
if script:
container_html_text = script.group(1)
@@ -261,7 +256,7 @@ class LinkCryptWs(Crypter):
cnl_line = None
for line in self.container_html:
- if "cnl" in line:
+ if "addcrypted2" in line:
cnl_line = line
break
@@ -283,7 +278,7 @@ class LinkCryptWs(Crypter):
def _get_cipher_params(self, cnl_section):
#: Get jk
- jk_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkCryptWs.JK_KEY
+ jk_re = r'<INPUT.*?NAME="%s".*?VALUE="\D*(\d*)\D*"' % LinkCryptWs.JK_KEY
vjk = re.findall(jk_re, cnl_section)
#: Get crypted
@@ -297,10 +292,9 @@ class LinkCryptWs(Crypter):
def _get_links(self, crypted, jk):
#: Get key
- jreturn = self.js.eval("%s f()" % jk)
- key = binascii.unhexlify(jreturn)
+ key = binascii.unhexlify(jk)
- self.log_debug("JsEngine returns value [%s]" % jreturn)
+ self.log_debug("JsEngine returns value [%s]" % key)
#: Decrypt
Key = key
diff --git a/module/plugins/crypter/ShareLinksBiz.py b/module/plugins/crypter/ShareLinksBiz.py
index 80aeb430a..e2ddfd926 100644
--- a/module/plugins/crypter/ShareLinksBiz.py
+++ b/module/plugins/crypter/ShareLinksBiz.py
@@ -11,7 +11,7 @@ from module.plugins.internal.Crypter import Crypter, create_getInfo
class ShareLinksBiz(Crypter):
__name__ = "ShareLinksBiz"
__type__ = "crypter"
- __version__ = "1.21"
+ __version__ = "1.22"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?(share-links|s2l)\.biz/(?P<ID>_?\w+)'
@@ -22,14 +22,14 @@ class ShareLinksBiz(Crypter):
__description__ = """Share-Links.biz decrypter plugin"""
__license__ = "GPLv3"
- __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es")]
+ __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
+ ("Arno-Nymous", None)]
def setup(self):
self.base_url = None
self.file_id = None
self.package = None
- self.captcha = False
def decrypt(self, pyfile):
@@ -49,7 +49,6 @@ class ShareLinksBiz(Crypter):
self.handle_errors()
if self.is_captcha_protected():
- self.captcha = True
self.unlock_captcha_protection()
self.handle_errors()
@@ -139,7 +138,7 @@ class ShareLinksBiz(Crypter):
captchaUrl = self.base_url + '/captcha.gif?d=%s&PHPSESSID=%s' % (m.group(1), m.group(2))
self.log_debug("Waiting user for correct position")
coords = self.captcha.decrypt(captchaUrl, input_type="gif", output_type='positional')
- self.log_debug("Captcha resolved, coords %s" % coords)
+ self.log_debug("Captcha resolved! Coords: {}, {}".format(*coords))
#: Resolve captcha
href = self._resolve_coords(coords, captchaMap)
diff --git a/module/plugins/hooks/WarezWorld.py b/module/plugins/hooks/WarezWorld.py
new file mode 100644
index 000000000..b097af8b2
--- /dev/null
+++ b/module/plugins/hooks/WarezWorld.py
@@ -0,0 +1,277 @@
+import httplib
+import re
+import StringIO
+import sys
+import traceback
+import urllib
+import urllib2
+from bs4 import BeautifulSoup as Soup
+from datetime import datetime
+from module.plugins.internal.Addon import Addon
+from pytz import timezone
+
+
+UNIX_EPOCH = timezone('UTC').localize(datetime(1970, 1, 1))
+
+
+def notifyPushover(**kwargs):
+ Data = kwargs
+ Connection = httplib.HTTPSConnection('api.pushover.net:443')
+ Connection.request('POST', '/1/messages.json', urllib.urlencode(Data),
+ {'Content-type': 'application/x-www-form-urlencoded'})
+ Response = Connection.getresponse()
+
+def replaceUmlauts(title):
+ title = title.replace(unichr(228), 'ae').replace(unichr(196), 'Ae')
+ title = title.replace(unichr(252), 'ue').replace(unichr(220), 'Ue')
+ title = title.replace(unichr(246), 'oe').replace(unichr(214), 'Oe')
+ title = title.replace(unichr(223), 'ss')
+ title = title.replace('&amp;', '&')
+ return title
+
+def getUnixTimestamp(String):
+ String = re.search(r'^.*(\d{2}.\d{2}.\d{4})(\d{1,2}):(\d{2}).*$', String)
+ if String:
+ String = String.group(1) + \
+ ('0' + String.group(2) if String.group(2) < '10' else String.group(2)) + \
+ String.group(3)
+ String = String.replace('.', '')
+
+ UnixTimestamp = (
+ timezone('Europe/Berlin').localize(datetime.strptime(String, '%d%m%Y%H%M')).astimezone(timezone('UTC'))
+ - UNIX_EPOCH
+ ).total_seconds()
+ return UnixTimestamp
+
+
+class WarezWorld(Addon):
+ __name__ = 'WarezWorld'
+ __type__ = 'hook'
+ __status__ = 'testing'
+ __author_name__ = ('Arno-Nymous')
+ __author_mail__ = ('Arno-Nymous@users.noreply.github.com')
+ __version__ = '1.2'
+ __description__ = 'Get new movies from Warez-World.org'
+ __config__ = [
+ ('activated', 'bool', 'Active', 'False'),
+ ('interval', 'int', 'Waiting time until next run in minutes', '60'),
+ ('minYear', 'long', 'No movies older than year', '1970'),
+ ('pushoverAppToken', 'str', 'Pushover app token', ''),
+ ('pushoverUserToken', 'str', 'Pushover user token', ''),
+ ('preferredHosters', 'str', 'Preferred hosters (seperated by;)','Share-online.biz'),
+ ('quality', '720p;1080p', 'Video quality', '720p'),
+ ('ratingCollector', 'float', 'Send releases to link collector with an IMDb rating of (or higher)', '6.5'),
+ ('ratingQueue', 'float', 'Send releases to queue with an IMDb rating of (or higher)', '8.0'),
+ ('rejectGenres', 'str', 'Reject movies of an of the following genres (seperated by ;)', 'Anime;Documentary;Family'),
+ ('rejectReleaseTokens', 'str', 'Reject releases containing any of the following tokens (seperated by ;)', '.ts.;.hdts.'),
+ ('soundError', ';none;alien;bike;bugle;cashregister;classical;climb;cosmic;echo;falling;gamelan;incoming;intermission;magic;mechanical;persistent;pianobar;pushover;siren;spacealarm;tugboat;updown', 'Use this sound for errors pushed via Pushover (empty for default)', ''),
+ ('soundNotification', ';none;alien;bike;bugle;cashregister;classical;climb;cosmic;echo;falling;gamelan;incoming;intermission;magic;mechanical;persistent;pianobar;pushover;siren;spacealarm;tugboat;updown', 'Use this sound for notifications pushed via Pushover (empty for default)', '')
+ ]
+
+ UrlOpener = urllib2.build_opener()
+ RejectGenres = []
+ RejectReleaseTokens = []
+ LastReleaseTimestamp = None
+ # Initialize dictionary keys here to enable quick access on keys via augmented operators
+ # in later code without further code magic
+ Statistics = {'Total': 0, 'Added': 0, 'Skipped': 0, 'AlreadyProcessed': 0}
+
+ def __init__(self, *args, **kwargs):
+ super(WarezWorld, self).__init__(*args, **kwargs)
+ self.start_periodical(self.get_config('interval'))
+
+ def periodical(self):
+ self.log_info(u'Start periodical run...')
+
+ self.interval = self.get_config('interval') * 60
+ self.RejectGenres = self.get_config('rejectGenres').split(';')
+ self.PreferredHosters = self.get_config('preferredHosters').lower().split(';')
+ self.RejectReleaseTokens = self.get_config('rejectReleaseTokens').lower().split(';')
+ self.LastReleaseTimestamp = float(self.retrieve('LastReleaseTimestamp', 0))
+ # Setting statistics to 0 by iterating over dictionary items
+ # instead of recreating dictionary over and over
+ for Key in self.Statistics:
+ self.Statistics[Key] = 0
+
+ try:
+ Request = urllib2.Request('http://warez-world.org/kategorie/filme', 'html5lib')
+ Request.add_header('User-Agent', 'Mozilla/5.0')
+ Page = Soup(self.UrlOpener.open(Request).read())
+ Items = Page.findAll('li', class_='main-single')
+ Releases = []
+
+ for Item in Items:
+ Releases.append({
+ 'MovieName': Item.find('span', class_='main-rls').text,
+ 'ReleaseName': re.search(r'<br/>(.*)</span>', unicode(Item.find('span', class_='main-rls'))).group(1),
+ 'ReleaseLink': unicode(Item.find('span', class_='main-rls').a['href']),
+ 'ReleaseDate': getUnixTimestamp(unicode(Item.find(class_='main-date').text))
+ })
+ self.log_info(u'{0} releases found'.format(len(Releases)))
+
+ for Release in Releases[::-1]:
+ if (Release['ReleaseDate'] < self.LastReleaseTimestamp):
+ self.log_debug(u'Release already processed \"{0}\"'.format (Release['ReleaseName']))
+ self.Statistics['AlreadyProcessed'] += 1
+ continue
+ self.log_debug(u'Processing release \"{0}\"'.format(Release['ReleaseName']))
+ Release['MovieYear'] = 1900
+ Release['MovieRating'] = 0
+ Release['MovieGenres'] = []
+ if self.parseRelease(Release):
+ self.downloadRelease(Release)
+
+ self.store('LastReleaseTimestamp', Releases[0]['ReleaseDate'])
+ self.log_debug(u'Last parsed release timestamp is {0}'.format(Releases[0]['ReleaseDate']))
+
+ self.Statistics['Total'] = sum(self.Statistics.itervalues())
+ self.log_info(u'Periodical run finished. Statistics: {0} total, {1} added, {2} skipped, {3} already processed'.format(
+ self.Statistics['Total'],
+ self.Statistics['Added'],
+ self.Statistics['Skipped'],
+ self.Statistics['AlreadyProcessed']
+ ))
+ except:
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ output = StringIO.StringIO()
+ traceback.print_exception(exc_type, exc_value, exc_traceback, file=output)
+ if 'Release' in locals():
+ msg = '<b>Stacktrace</b>\n{0}\n<b>Release</b>\n{1}\n\n<b>Date</b>\n{2}'.format(
+ output.getvalue(), Release['ReleaseName'].encode('utf-8'), Release['ReleaseDate']
+ )
+ else:
+ msg = '<b>Stacktrace</b>\n{0}'.format(output.getvalue())
+ notifyPushover(
+ token=self.get_config('pushoverAppToken'),
+ user=self.get_config('pushoverUserToken'),
+ title='Error in script \"WarezWorld.py\"',
+ message=msg,
+ sound=self.get_config('soundError'),
+ html=1
+ )
+ raise
+
+ def parseRelease(self, Release):
+ if any([
+ set(re.split(r'[\. ]', Release['ReleaseName'].lower())) & set(self.RejectReleaseTokens),
+ not(self.get_config('quality').lower() in Release['ReleaseName'].lower())
+ ]):
+ self.log_debug(u'...Skip release ({0})'.format("Release name contains unwanted tokens or quality mismatch"))
+ self.Statistics['Skipped'] += 1
+ return False
+
+ Request = urllib2.Request(Release['ReleaseLink'], 'html5lib')
+ Request.add_header('User-Agent', 'Mozilla/5.0')
+ ReleasePage = Soup(self.UrlOpener.open(Request).read())
+
+ DownloadLinks = ReleasePage.findAll('div', id='download-links')
+ if DownloadLinks:
+ for DownloadLink in DownloadLinks:
+ if DownloadLink.a.string and DownloadLink.a.string.lower() in self.PreferredHosters:
+ Release['DownloadLink'] = DownloadLink.a['href']
+ break
+ if 'DownloadLink' not in Release:
+ self.log_debug('...No download link of preferred hoster found')
+ return False
+
+ ReleaseNfo = ReleasePage.find('div', class_='spoiler')
+ ImdbUrl = re.search(r'(http://)?.*(imdb\.com/title/tt\d+)\D', unicode(ReleaseNfo))
+ if ImdbUrl:
+ Release['ImdbUrl'] = 'http://www.' + ImdbUrl.group(2)
+ self.addImdbData(Release)
+ else:
+ for Div in ReleasePage.findAll('div', class_='ui2'):
+ if Div.a and Div.a.string == 'IMDb-Seite':
+ Request = urllib2.Request(urllib.quote_plus(Div.a['href'].encode('utf-8'), '/:?='))
+ ImdbPage = Soup(self.UrlOpener.open(Request).read())
+ if ImdbPage.find('table', class_='findList'):
+ Release['ImdbUrl'] = 'http://www.imdb.com' + \
+ ImdbPage.find('td', class_='result_text').a['href']
+ self.addImdbData(Release)
+ else:
+ self.log_debug(u'...Could not obtain IMDb data for release...Send to link collector')
+ self.Statistics['Added'] += 1
+ break
+
+ if all([Release['MovieYear'] >= self.get_config('minYear'),
+ Release['MovieRating'] >= self.get_config('ratingCollector'),
+ not(set(Release['MovieGenres']) & set(self.RejectGenres))]):
+ return True
+ else:
+ self.log_debug(u'...Skip release ({0})'.format('Movie too old, poor IMDb rating or unwanted genres'))
+ self.Statistics['Skipped'] += 1
+ return False
+
+ def addImdbData(self, Release):
+ self.log_debug(u'...Fetching IMDb data for release ({0})'.format(Release['ImdbUrl']))
+
+ Request = urllib2.Request(Release['ImdbUrl'])
+ Request.add_header('User-Agent', 'Mozilla/5.0')
+ ImdbPage = Soup(self.UrlOpener.open(Request).read())
+
+ MovieName = ImdbPage.find('span', {'itemprop': 'name'}).string
+ # For the year it has to be done a tiny bit of BeautifulSoup magic as it sometimes can
+ # be formatted as a link on IMDb and sometimes not
+ try:
+ MovieYear = ImdbPage.find('h1', class_='header').find('span', class_='nobr').find(
+ text=re.compile(r'\d{4}')
+ ).strip(u' ()\u2013')
+ except:
+ MovieYear = 0
+ self.log_debug('...Could not parse movie year ({0})'.format(Release['ImdbUrl']))
+ try:
+ MovieRating = ImdbPage.find('span', {'itemprop': 'ratingValue'}).string.replace(',', '.')
+ except:
+ MovieRating = 0
+ self.log_debug(u'...Could not parse movie rating ({0})'.format(MovieName, Release['ImdbUrl']))
+ MovieGenres = []
+ try:
+ for Genre in ImdbPage.find('div', {'itemprop': 'genre'}).findAll('a'):
+ MovieGenres.append(Genre.string.strip())
+ except:
+ self.log_debug(u'...Could not parse movie genres ({0})'.format(Release['ImdbUrl']))
+
+ Release['MovieName'] = MovieName
+ Release['MovieYear'] = MovieYear
+ Release['MovieRating'] = MovieRating
+ Release['MovieGenres'] = MovieGenres
+
+ def downloadRelease(self, Release):
+ Storage = self.retrieve(u'{0} ({1})'.format(Release['MovieName'], Release['MovieYear']))
+
+ if Storage == '1':
+ self.log_debug(u'Skip release ({0})'.format('already downloaded'))
+ self.Statistics['Skipped'] += 1
+ else:
+ Storage = u'{0} ({1})'.format(Release['MovieName'], Release['MovieYear'])
+ if Release['MovieRating'] >= self.get_config('ratingQueue'):
+ self.pyload.api.addPackage(Storage + ' IMDb: ' + Release['MovieRating'],
+ [Release['DownloadLink']], 1)
+ PushoverTitle = 'New movie added to queue'
+ self.log_info(u'New movie added to queue ({0})'.format(Storage))
+ else:
+ self.pyload.api.addPackage(Storage + ' IMDb: ' + Release['MovieRating'],
+ [Release['DownloadLink']], 0)
+ PushoverTitle = 'New movie added to link collector'
+ self.log_info(u'New movie added to link collector ({0})'.format(Storage))
+
+ self.Statistics['Added'] += 1
+
+ notifyPushover(
+ token=self.get_config('pushoverAppToken'),
+ user=self.get_config('pushoverUserToken'),
+ title=PushoverTitle,
+ message='<b>{0} ({1})</b>\n<i>Rating:</i> {2}\n<i>Genres:</i> {3}\n\n<i>{4}</i>'.format(
+ Release['MovieName'].encode('utf-8'),
+ Release['MovieYear'].encode('utf-8'),
+ Release['MovieRating'].encode('utf-8'),
+ ', '.join(Release['MovieGenres']).encode('utf-8'),
+ Release['ReleaseName'].encode('utf-8')
+ ),
+ sound=self.get_config('soundNotification'),
+ url=(Release['ImdbUrl'].encode('utf-8') if 'ImdbUrl' in Release else ''),
+ url_title='View on IMDb',
+ html=1
+ )
+
+ self.store(Storage, '1')
diff --git a/module/plugins/internal/Account.py b/module/plugins/internal/Account.py
index b02538c68..ba8db0a6d 100644
--- a/module/plugins/internal/Account.py
+++ b/module/plugins/internal/Account.py
@@ -209,7 +209,7 @@ class Account(Plugin):
self.sync()
clear = lambda x: {} if isinstance(x, dict) else [] if isiterable(x) else None
- self.info['data'] = dict((k, clear(v)) for k, v in self.info['data'])
+ self.info['data'] = dict((k, clear(v)) for k, v in self.info['data'].iteritems())
self.info['data']['options'] = {'limitdl': ['0']}
self.syncback()