summaryrefslogtreecommitdiffstats
path: root/module/FileManager.py
diff options
context:
space:
mode:
authorGravatar RaNaN <Mast3rRaNaN@hotmail.de> 2013-06-09 18:10:22 +0200
committerGravatar RaNaN <Mast3rRaNaN@hotmail.de> 2013-06-09 18:10:23 +0200
commit16af85004c84d0d6c626b4f8424ce9647669a0c1 (patch)
tree025d479862d376dbc17e934f4ed20031c8cd97d1 /module/FileManager.py
parentadapted to jshint config (diff)
downloadpyload-16af85004c84d0d6c626b4f8424ce9647669a0c1.tar.xz
moved everything from module to pyload
Diffstat (limited to 'module/FileManager.py')
-rw-r--r--module/FileManager.py582
1 files changed, 0 insertions, 582 deletions
diff --git a/module/FileManager.py b/module/FileManager.py
deleted file mode 100644
index 7b14613f7..000000000
--- a/module/FileManager.py
+++ /dev/null
@@ -1,582 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-###############################################################################
-# Copyright(c) 2008-2012 pyLoad Team
-# http://www.pyload.org
-#
-# This file is part of pyLoad.
-# pyLoad is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# Subjected to the terms and conditions in LICENSE
-#
-# @author: RaNaN
-###############################################################################
-
-from time import time
-from ReadWriteLock import ReadWriteLock
-
-from module.utils import lock, read_lock
-
-from Api import PackageStatus, DownloadStatus as DS, TreeCollection, PackageDoesNotExists
-from datatypes.PyFile import PyFile
-from datatypes.PyPackage import PyPackage, RootPackage
-
-# invalidates the cache
-def invalidate(func):
- def new(*args):
- args[0].downloadstats = {}
- args[0].queuestats = {}
- args[0].jobCache = {}
- return func(*args)
-
- return new
-
-# TODO: needs to be replaced later
-OWNER = 0
-
-class FileManager:
- """Handles all request made to obtain information,
- modify status or other request for links or packages"""
-
- ROOT_PACKAGE = -1
-
- def __init__(self, core):
- """Constructor"""
- self.core = core
- self.evm = core.eventManager
-
- # translations
- self.statusMsg = [_("none"), _("offline"), _("online"), _("queued"), _("paused"),
- _("finished"), _("skipped"), _("failed"), _("starting"),
- _("waiting"), _("downloading"), _("temp. offline"), _("aborted"),
- _("decrypting"), _("processing"), _("custom"), _("unknown")]
-
- self.files = {} # holds instances for files
- self.packages = {} # same for packages
-
- self.jobCache = {}
-
- # locking the caches, db is already locked implicit
- self.lock = ReadWriteLock()
- #self.lock._Verbose__verbose = True
-
- self.downloadstats = {} # cached dl stats
- self.queuestats = {} # cached queue stats
-
- self.db = self.core.db
-
- def save(self):
- """saves all data to backend"""
- self.db.commit()
-
- @read_lock
- def syncSave(self):
- """saves all data to backend and waits until all data are written"""
- for pyfile in self.files.values():
- pyfile.sync()
-
- for pypack in self.packages.values():
- pypack.sync()
-
- self.db.syncSave()
-
- def cachedFiles(self):
- return self.files.values()
-
- def cachedPackages(self):
- return self.packages.values()
-
- def getCollector(self):
- pass
-
- @invalidate
- def addLinks(self, data, package):
- """Add links, data = (plugin, url) tuple. Internal method should use API."""
- self.db.addLinks(data, package, OWNER)
- self.evm.dispatchEvent("package:updated", package)
-
-
- @invalidate
- def addPackage(self, name, folder, root, password, site, comment, paused):
- """Adds a package to database"""
- pid = self.db.addPackage(name, folder, root, password, site, comment,
- PackageStatus.Paused if paused else PackageStatus.Ok, OWNER)
- p = self.db.getPackageInfo(pid)
-
- self.evm.dispatchEvent("package:inserted", pid, p.root, p.packageorder)
- return pid
-
-
- @lock
- def getPackage(self, pid):
- """return package instance"""
- if pid == self.ROOT_PACKAGE:
- return RootPackage(self, OWNER)
- elif pid in self.packages:
- pack = self.packages[pid]
- pack.timestamp = time()
- return pack
- else:
- info = self.db.getPackageInfo(pid, False)
- if not info: return None
-
- pack = PyPackage.fromInfoData(self, info)
- self.packages[pid] = pack
-
- return pack
-
- @read_lock
- def getPackageInfo(self, pid):
- """returns dict with package information"""
- if pid == self.ROOT_PACKAGE:
- pack = RootPackage(self, OWNER).toInfoData()
- elif pid in self.packages:
- pack = self.packages[pid].toInfoData()
- pack.stats = self.db.getStatsForPackage(pid)
- else:
- pack = self.db.getPackageInfo(pid)
-
- if not pack: return None
-
- # todo: what does this todo mean?!
- #todo: fill child packs and files
- packs = self.db.getAllPackages(root=pid)
- if pid in packs: del packs[pid]
- pack.pids = packs.keys()
-
- files = self.db.getAllFiles(package=pid)
- pack.fids = files.keys()
-
- return pack
-
- @lock
- def getFile(self, fid):
- """returns pyfile instance"""
- if fid in self.files:
- return self.files[fid]
- else:
- info = self.db.getFileInfo(fid)
- if not info: return None
-
- f = PyFile.fromInfoData(self, info)
- self.files[fid] = f
- return f
-
- @read_lock
- def getFileInfo(self, fid):
- """returns dict with file information"""
- if fid in self.files:
- return self.files[fid].toInfoData()
-
- return self.db.getFileInfo(fid)
-
- @read_lock
- def getTree(self, pid, full, state, search=None):
- """ return a TreeCollection and fill the info data of containing packages.
- optional filter only unfnished files
- """
- view = TreeCollection(pid)
-
- # for depth=1, we don't need to retrieve all files/packages
- root = pid if not full else None
-
- packs = self.db.getAllPackages(root)
- files = self.db.getAllFiles(package=root, state=state, search=search)
-
- # updating from cache
- for fid, f in self.files.iteritems():
- if fid in files:
- files[fid] = f.toInfoData()
-
- # foreign pid, don't overwrite local pid !
- for fpid, p in self.packages.iteritems():
- if fpid in packs:
- # copy the stats data
- stats = packs[fpid].stats
- packs[fpid] = p.toInfoData()
- packs[fpid].stats = stats
-
- # root package is not in database, create an instance
- if pid == self.ROOT_PACKAGE:
- view.root = RootPackage(self, OWNER).toInfoData()
- packs[self.ROOT_PACKAGE] = view.root
- elif pid in packs:
- view.root = packs[pid]
- else: # package does not exists
- return view
-
- # linear traversal over all data
- for fpid, p in packs.iteritems():
- if p.fids is None: p.fids = []
- if p.pids is None: p.pids = []
-
- root = packs.get(p.root, None)
- if root:
- if root.pids is None: root.pids = []
- root.pids.append(fpid)
-
- for fid, f in files.iteritems():
- p = packs.get(f.package, None)
- if p: p.fids.append(fid)
-
-
- # cutting of tree is not good in runtime, only saves bandwidth
- # need to remove some entries
- if full and pid > -1:
- keep = []
- queue = [pid]
- while queue:
- fpid = queue.pop()
- keep.append(fpid)
- queue.extend(packs[fpid].pids)
-
- # now remove unneeded data
- for fpid in packs.keys():
- if fpid not in keep:
- del packs[fpid]
-
- for fid, f in files.items():
- if f.package not in keep:
- del files[fid]
-
- #remove root
- del packs[pid]
- view.files = files
- view.packages = packs
-
- return view
-
-
- @lock
- def getJob(self, occ):
- """get suitable job"""
-
- #TODO only accessed by one thread, should not need a lock
- #TODO needs to be approved for new database
- #TODO clean mess
- #TODO improve selection of valid jobs
-
- if occ in self.jobCache:
- if self.jobCache[occ]:
- id = self.jobCache[occ].pop()
- if id == "empty":
- pyfile = None
- self.jobCache[occ].append("empty")
- else:
- pyfile = self.getFile(id)
- else:
- jobs = self.db.getJob(occ)
- jobs.reverse()
- if not jobs:
- self.jobCache[occ].append("empty")
- pyfile = None
- else:
- self.jobCache[occ].extend(jobs)
- pyfile = self.getFile(self.jobCache[occ].pop())
-
- else:
- self.jobCache = {} #better not caching to much
- jobs = self.db.getJob(occ)
- jobs.reverse()
- self.jobCache[occ] = jobs
-
- if not jobs:
- self.jobCache[occ].append("empty")
- pyfile = None
- else:
- pyfile = self.getFile(self.jobCache[occ].pop())
-
-
- return pyfile
-
- def getDownloadStats(self, user=None):
- """ return number of downloads """
- if user not in self.downloadstats:
- self.downloadstats[user] = self.db.downloadstats(user)
-
- return self.downloadstats[user]
-
- def getQueueStats(self, user=None, force=False):
- """number of files that have to be processed, failed files will not be included"""
- if user not in self.queuestats or force:
- self.queuestats[user] = self.db.queuestats(user)
-
- return self.queuestats[user]
-
- def scanDownloadFolder(self):
- pass
-
- @lock
- @invalidate
- def deletePackage(self, pid):
- """delete package and all contained links"""
-
- p = self.getPackage(pid)
- if not p: return
-
- oldorder = p.packageorder
- root = p.root
-
- for pyfile in self.cachedFiles():
- if pyfile.packageid == pid:
- pyfile.abortDownload()
-
- # TODO: delete child packages
- # TODO: delete folder
-
- self.db.deletePackage(pid)
- self.releasePackage(pid)
-
- for pack in self.cachedPackages():
- if pack.root == root and pack.packageorder > oldorder:
- pack.packageorder -= 1
-
- self.evm.dispatchEvent("package:deleted", pid)
-
- @lock
- @invalidate
- def deleteFile(self, fid):
- """deletes links"""
-
- f = self.getFile(fid)
- if not f: return
-
- pid = f.packageid
- order = f.fileorder
-
- if fid in self.core.threadManager.processingIds():
- f.abortDownload()
-
- # TODO: delete real file
-
- self.db.deleteFile(fid, f.fileorder, f.packageid)
- self.releaseFile(fid)
-
- for pyfile in self.files.itervalues():
- if pyfile.packageid == pid and pyfile.fileorder > order:
- pyfile.fileorder -= 1
-
- self.evm.dispatchEvent("file:deleted", fid, pid)
-
- @lock
- def releaseFile(self, fid):
- """removes pyfile from cache"""
- if fid in self.files:
- del self.files[fid]
-
- @lock
- def releasePackage(self, pid):
- """removes package from cache"""
- if pid in self.packages:
- del self.packages[pid]
-
- def updateFile(self, pyfile):
- """updates file"""
- self.db.updateFile(pyfile)
-
- # This event is thrown with pyfile or only fid
- self.evm.dispatchEvent("file:updated", pyfile)
-
- def updatePackage(self, pypack):
- """updates a package"""
- self.db.updatePackage(pypack)
- self.evm.dispatchEvent("package:updated", pypack.pid)
-
- @invalidate
- def updateFileInfo(self, data, pid):
- """ updates file info (name, size, status,[ hash,] url)"""
- self.db.updateLinkInfo(data)
- self.evm.dispatchEvent("package:updated", pid)
-
- def checkAllLinksFinished(self):
- """checks if all files are finished and dispatch event"""
-
- # TODO: user context?
- if not self.db.queuestats()[0]:
- self.core.addonManager.dispatchEvent("download:allFinished")
- self.core.log.debug("All downloads finished")
- return True
-
- return False
-
- def checkAllLinksProcessed(self, fid=-1):
- """checks if all files was processed and pyload would idle now, needs fid which will be ignored when counting"""
-
- # reset count so statistic will update (this is called when dl was processed)
- self.resetCount()
-
- # TODO: user context?
- if not self.db.processcount(fid):
- self.core.addonManager.dispatchEvent("download:allProcessed")
- self.core.log.debug("All downloads processed")
- return True
-
- return False
-
- def checkPackageFinished(self, pyfile):
- """ checks if package is finished and calls addonmanager """
-
- ids = self.db.getUnfinished(pyfile.packageid)
- if not ids or (pyfile.id in ids and len(ids) == 1):
- if not pyfile.package().setFinished:
- self.core.log.info(_("Package finished: %s") % pyfile.package().name)
- self.core.addonManager.packageFinished(pyfile.package())
- pyfile.package().setFinished = True
-
- def resetCount(self):
- self.queuecount = -1
-
- @read_lock
- @invalidate
- def restartPackage(self, pid):
- """restart package"""
- for pyfile in self.cachedFiles():
- if pyfile.packageid == pid:
- self.restartFile(pyfile.id)
-
- self.db.restartPackage(pid)
-
- if pid in self.packages:
- self.packages[pid].setFinished = False
-
- self.evm.dispatchEvent("package:updated", pid)
-
- @read_lock
- @invalidate
- def restartFile(self, fid):
- """ restart file"""
- if fid in self.files:
- f = self.files[fid]
- f.status = DS.Queued
- f.name = f.url
- f.error = ""
- f.abortDownload()
-
- self.db.restartFile(fid)
- self.evm.dispatchEvent("file:updated", fid)
-
-
- @lock
- @invalidate
- def orderPackage(self, pid, position):
-
- p = self.getPackageInfo(pid)
- self.db.orderPackage(pid, p.root, p.packageorder, position)
-
- for pack in self.packages.itervalues():
- if pack.root != p.root or pack.packageorder < 0: continue
- if pack.pid == pid:
- pack.packageorder = position
- if p.packageorder > position:
- if position <= pack.packageorder < p.packageorder:
- pack.packageorder += 1
- elif p.order < position:
- if position >= pack.packageorder > p.packageorder:
- pack.packageorder -= 1
-
- self.db.commit()
-
- self.evm.dispatchEvent("package:reordered", pid, position, p.root)
-
- @lock
- @invalidate
- def orderFiles(self, fids, pid, position):
-
- files = [self.getFileInfo(fid) for fid in fids]
- orders = [f.fileorder for f in files]
- if min(orders) + len(files) != max(orders) + 1:
- raise Exception("Tried to reorder non continous block of files")
-
- # minimum fileorder
- f = reduce(lambda x,y: x if x.fileorder < y.fileorder else y, files)
- order = f.fileorder
-
- self.db.orderFiles(pid, fids, order, position)
- diff = len(fids)
-
- if f.fileorder > position:
- for pyfile in self.files.itervalues():
- if pyfile.packageid != f.package or pyfile.fileorder < 0: continue
- if position <= pyfile.fileorder < f.fileorder:
- pyfile.fileorder += diff
-
- for i, fid in enumerate(fids):
- if fid in self.files:
- self.files[fid].fileorder = position + i
-
- elif f.fileorder < position:
- for pyfile in self.files.itervalues():
- if pyfile.packageid != f.package or pyfile.fileorder < 0: continue
- if position >= pyfile.fileorder >= f.fileorder+diff:
- pyfile.fileorder -= diff
-
- for i, fid in enumerate(fids):
- if fid in self.files:
- self.files[fid].fileorder = position -diff + i + 1
-
- self.db.commit()
-
- self.evm.dispatchEvent("file:reordered", pid)
-
- @read_lock
- @invalidate
- def movePackage(self, pid, root):
- """ move pid - root """
-
- p = self.getPackageInfo(pid)
- dest = self.getPackageInfo(root)
- if not p: raise PackageDoesNotExists(pid)
- if not dest: raise PackageDoesNotExists(root)
-
- # cantor won't be happy if we put the package in itself
- if pid == root or p.root == root: return False
-
- # TODO move real folders
-
- # we assume pack is not in use anyway, so we can release it
- self.releasePackage(pid)
- self.db.movePackage(p.root, p.packageorder, pid, root)
-
- return True
-
- @read_lock
- @invalidate
- def moveFiles(self, fids, pid):
- """ move all fids to pid """
-
- f = self.getFileInfo(fids[0])
- if not f or f.package == pid:
- return False
- if not self.getPackageInfo(pid):
- raise PackageDoesNotExists(pid)
-
- # TODO move real files
-
- self.db.moveFiles(f.package, fids, pid)
-
- return True
-
-
- @invalidate
- def reCheckPackage(self, pid):
- """ recheck links in package """
- data = self.db.getPackageData(pid)
-
- urls = []
-
- for pyfile in data.itervalues():
- if pyfile.status not in (DS.NA, DS.Finished, DS.Skipped):
- urls.append((pyfile.url, pyfile.pluginname))
-
- self.core.threadManager.createInfoThread(urls, pid)
-
-
- @invalidate
- def restartFailed(self):
- """ restart all failed links """
- # failed should not be in cache anymore, so working on db is sufficient
- self.db.restartFailed()