summaryrefslogtreecommitdiffstats
path: root/module/plugins/hoster/FourSharedCom.py
diff options
context:
space:
mode:
authorGravatar zoidberg10 <zoidberg@mujmail.cz> 2011-11-13 00:47:02 +0100
committerGravatar zoidberg10 <zoidberg@mujmail.cz> 2011-11-13 00:47:02 +0100
commite3a62f8a2e9dcb4212f6fa8e1a94f832ad66d35a (patch)
tree121b107759d11e11890267355119ecf47e3e7fba /module/plugins/hoster/FourSharedCom.py
parentMerged in nick_de/pyload (pull request #5) (diff)
downloadpyload-e3a62f8a2e9dcb4212f6fa8e1a94f832ad66d35a.tar.xz
Move getFileInfo to SimpleHoster.py; update Mediafire
Diffstat (limited to 'module/plugins/hoster/FourSharedCom.py')
-rw-r--r--module/plugins/hoster/FourSharedCom.py99
1 files changed, 32 insertions, 67 deletions
diff --git a/module/plugins/hoster/FourSharedCom.py b/module/plugins/hoster/FourSharedCom.py
index 03da15fb0..1c12a2d34 100644
--- a/module/plugins/hoster/FourSharedCom.py
+++ b/module/plugins/hoster/FourSharedCom.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-from module.plugins.Hoster import Hoster
+from module.plugins.internal.SimpleHoster import SimpleHoster
from module.network.RequestFactory import getURL
import re
@@ -9,82 +9,47 @@ def getInfo(urls):
result = []
for url in urls:
- html = getURL(url, decode=True)
- if re.search(FourSharedCom.FILE_OFFLINE_PATTERN, html):
- # File offline
- result.append((url, 0, 1, url))
- else:
- # Get file info
- name, size = url, 0
-
- found = re.search(FourSharedCom.FILE_SIZE_PATTERN, html)
- if found is not None:
- size, units = float(found.group(1).replace(',','')), found.group(2)
- size = size * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[units]
-
- found = re.search(FourSharedCom.FILE_NAME_PATTERN, html)
- if found is not None:
- name = re.sub(r"&#(\d+).", lambda m: unichr(int(m.group(1))), found.group(1))
-
- if found or size > 0:
- result.append((name, size, 2, url))
+ name, size, status, url = parseFileInfo(FourSharedCom, url, getURL(url, decode=True))
+ if status == 2:
+ name = re.sub(r"&#(\d+).", lambda m: unichr(int(m.group(1))), name)
+ result.append(name, size, status, url)
+
yield result
-
-class FourSharedCom(Hoster):
+class FourSharedCom(SimpleHoster):
__name__ = "FourSharedCom"
__type__ = "hoster"
__pattern__ = r"http://[\w\.]*?4shared(-china)?\.com/(account/)?(download|get|file|document|photo|video|audio)/.+?/.*"
- __version__ = "0.2"
+ __version__ = "0.21"
__description__ = """4Shared Download Hoster"""
__author_name__ = ("jeix", "zoidberg")
__author_mail__ = ("jeix@hasnomail.de", "zoidberg@mujmail.cz")
FILE_NAME_PATTERN = '<meta name="title" content="([^"]+)" />'
- FILE_SIZE_PATTERN = '<span title="Size: ([0-9,.]+) (KB|MB|GB)">'
- FILE_OFFLINE_PATTERN = 'The file link that you requested is not valid\.'
-
- def setup(self):
- self.multiDL = False
+ FILE_SIZE_PATTERN = '<span title="Size: ([0-9,.]+) ([kKMG]i?B)">'
+ FILE_OFFLINE_PATTERN = 'The file link that you requested is not valid\.|This file was deleted.'
+ FREE_LINK_PATTERN = '<a href="([^"]+)" class="dbtn"'
+ DOWNLOAD_URL_PATTERN = "<div class=\"(?:dl|xxlarge bold)\">\s*<a href='([^']+)'"
def process(self, pyfile):
-
self.html = self.load(pyfile.url, decode=True)
- self.getFileInfo(pyfile)
- self.handleFree(pyfile)
-
- def getFileInfo(self, pyfile):
- if re.search(self.FILE_OFFLINE_PATTERN, self.html): self.offline()
-
- found = re.search(self.FILE_NAME_PATTERN, self.html)
- if not found: self.fail("Parse error (file name)")
- pyfile.name = re.sub(r"&#(\d+).", lambda m: unichr(int(m.group(1))), found.group(1))
-
- found = re.search(self.FILE_SIZE_PATTERN, self.html)
- if found is None: self.fail("Parse error (file size)")
- size, units = float(found.group(1).replace(',','')), found.group(2)
- pyfile.size = size * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[units]
-
- def handleFree(self, pyfile):
- tmp_link = link = ""
- wait = 20
-
- for line in self.html.splitlines():
- if "dbtn" in line:
- tmp_link = line.split('href="')[1].split('"')[0]
-
- if tmp_link:
- self.html = self.load(tmp_link).splitlines()
- for i, line in enumerate(self.html):
- if "id='divDLStart'" in line:
- link = self.html[i+2].split("<a href='")[1].split("'")[0]
- elif '<div class="sec">' in line:
- wait = int(line.split(">")[1].split("<")[0])
-
- self.setWait(wait)
- self.wait()
-
- if link:
- self.download(link)
- else:
- self.offline() \ No newline at end of file
+ self.getFileInfo()
+ pyfile.name = re.sub(r"&#(\d+).", lambda m: unichr(int(m.group(1))), pyfile.name)
+ self.handleFree()
+
+ def handleFree(self):
+ found = re.search(self.FREE_LINK_PATTERN, self.html)
+ if not found: raise PluginParseError('Free download button')
+ link = found.group(1)
+
+ self.html = self.load(link)
+
+ found = re.search(self.DOWNLOAD_URL_PATTERN, self.html)
+ if not found: raise PluginParseError('Download link')
+ link = found.group(1)
+
+ self.setWait(20)
+ self.wait()
+ self.download(link)
+
+ \ No newline at end of file