diff --git a/addons/document/README.txt b/addons/document/README.txt new file mode 100644 index 00000000000..34887026aeb --- /dev/null +++ b/addons/document/README.txt @@ -0,0 +1,8 @@ +To be done: +----------- + +* Test to not create several times the same file / directory + -> May be put a sql_constraints uniq on several files + -> test through remove or put + +* Retest everything diff --git a/addons/document/__init__.py b/addons/document/__init__.py new file mode 100644 index 00000000000..dc51bcd02e7 --- /dev/null +++ b/addons/document/__init__.py @@ -0,0 +1,30 @@ +############################################################################## +# +# Copyright (c) 2004 TINY SPRL. (http://tiny.be) All Rights Reserved. +# Fabien Pinckaers +# +# WARNING: This program as such is intended to be used by professional +# programmers who take the whole responsability of assessing all potential +# consequences resulting from its eventual inadequacies and bugs +# End users who are looking for a ready-to-use solution with commercial +# garantees and support are strongly adviced to contract a Free Software +# Service Company +# +# This program is Free Software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +############################################################################## + +import document +import ftpserver diff --git a/addons/document/__terp__.py b/addons/document/__terp__.py new file mode 100644 index 00000000000..d31dcc19665 --- /dev/null +++ b/addons/document/__terp__.py @@ -0,0 +1,21 @@ +# +# Use the custom module to put your specific code in a separate module. +# +{ + "name" : "Integrated Document Management System", + "version" : "1.0", + "author" : "Tiny", + "category" : "Generic Modules/Others", + "website": "http://www.tinyerp.com", + "description": """This is a complete document management system: + * FTP Interface + * User Authentification + * Document Indexation +""", + "depends" : ["base"], + "init_xml" : ["document_data.xml"], + "update_xml" : ["document_view.xml"], + "demo_xml" : ["document_demo.xml"], + "active": False, + "installable": True +} diff --git a/addons/document/content_index.py b/addons/document/content_index.py new file mode 100644 index 00000000000..c4ec7c2c795 --- /dev/null +++ b/addons/document/content_index.py @@ -0,0 +1,34 @@ +import time +import os +import StringIO +import odt2txt + +# +# This should be the indexer +# +def content_index(content, filename=None, content_type=None): + fname,ext = os.path.splitext(filename) + result = '' + if ext in ('.doc'): #or content_type ? + (stdin,stdout) = os.popen2('antiword -', 'b') + stdin.write(content) + stdin.close() + result = stdout.read().decode('latin1','replace').encode('utf-8','replace') + elif ext == '.pdf': + fname = os.tempnam(filename)+'.pdf' + fp = file(fname,'wb') + fp.write(content) + fp.close() + fp = os.popen('pdftotext -enc UTF-8 -nopgbrk '+fname+' -', 'r') + result = fp.read() + fp.close() + elif ext in ('.xls','.ods','.odt','.odp'): + s = StringIO.StringIO(content) + o = odt2txt.OpenDocumentTextFile(s) + result = o.toString().encode('ascii','replace') + s.close() + elif ext in ('.txt','.py','.patch','.html','.csv','.xml'): + result = content + else: + result = content + return result \ No newline at end of file diff --git a/addons/document/custom_report.xml b/addons/document/custom_report.xml new file mode 100644 index 00000000000..02cf10e6a4a --- /dev/null +++ b/addons/document/custom_report.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/addons/document/custom_view.xml b/addons/document/custom_view.xml new file mode 100644 index 00000000000..4909336c498 --- /dev/null +++ b/addons/document/custom_view.xml @@ -0,0 +1,12 @@ + + + + grcompta + + + grcomptaadmin + + + + + diff --git a/addons/document/document.py b/addons/document/document.py new file mode 100644 index 00000000000..101874305da --- /dev/null +++ b/addons/document/document.py @@ -0,0 +1,568 @@ +############################################################################# +# +# Copyright (c) 2004 TINY SPRL. (http://tiny.be) All Rights Reserved. +# Fabien Pinckaers +# +# WARNING: This program as such is intended to be used by professional +# programmers who take the whole responsability of assessing all potential +# consequences resulting from its eventual inadequacies and bugs +# End users who are looking for a ready-to-use solution with commercial +# garantees and support are strongly adviced to contract a Free Software +# Service Company +# +# This program is Free Software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +############################################################################## + +import base64 + +from osv import osv, fields +from osv.orm import except_orm +import urlparse + +import os + +import pooler +from content_index import content_index + +# Unsupported WebDAV Commands: +# label +# search +# checkin +# checkout +# propget +# propset + +# +# An object that represent an uri +# path: the uri of the object +# content: the Content it belongs to (_print.pdf) +# type: content or collection +# content: objct = res.partner +# collection: object = directory, object2 = res.partner +# file: objct = ir.attachement +# root: if we are at the first directory of a ressource +# +INVALID_CHARS={'*':str(hash('*')), '|':str(hash('|')) , "\\":str(hash("\\")), '/':'__', ':':str(hash(':')), '"':str(hash('"')), '<':str(hash('<')) , '>':str(hash('>')) , '?':str(hash('?'))} +class node_class(object): + def __init__(self, cr, uid, path,object,object2=False, context={}, content=False, type='collection', root=False): + self.cr = cr + self.uid = uid + self.path = path + self.object = object + self.object2 = object2 + self.context = context + self.content = content + self.type=type + self.root=root + + def _file_get(self, nodename=False): + if not self.object: + return [] + pool = pooler.get_pool(self.cr.dbname) + fobj = pool.get('ir.attachment') + res2 = [] + where = [] + if self.object2: + where.append( ('res_model','=',self.object2._name) ) + where.append( ('res_id','=',self.object2.id) ) + for content in self.object.content_ids: + test_nodename = self.object2.name + (content.suffix or '') + (content.extension or '') + if test_nodename.find('/'): + test_nodename=test_nodename.replace('/', '_') + path = self.path+'/'+test_nodename + #path = self.path+'/'+self.object2.name + (content.suffix or '') + (content.extension or '') + if not nodename: + n = node_class(self.cr, self.uid,path, self.object2, False, content=content, type='content', root=False) + res2.append( n) + else: + if nodename == test_nodename: + n = node_class(self.cr, self.uid, path, self.object2, False, content=content, type='content', root=False) + res2.append(n) + else: + where.append( ('parent_id','=',self.object.id) ) + where.append( ('res_id','=',False) ) + if nodename: + where.append( (fobj._rec_name,'=',nodename) ) + ids = fobj.search(self.cr, self.uid, where+[ ('parent_id','=',self.object and self.object.id or False) ], context=self.context) + if self.object and self.root and (self.object.type=='ressource'): + ids += fobj.search(self.cr, self.uid, where+[ ('parent_id','=',False) ], context=self.context) + res = fobj.browse(self.cr, self.uid, ids, context=self.context) + return map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, False, type='file', root=False), res) + res2 + + def directory_list_for_child(self,nodename,parent=False): + pool = pooler.get_pool(self.cr.dbname) + where = [] + if nodename: + where.append(('name','=',nodename)) + if (self.object and self.object.type=='directory') or not self.object2: + where.append(('parent_id','=',self.object and self.object.id or False)) + else: + where.append(('parent_id','=',False)) + if self.object: + where.append(('ressource_parent_type_id','=',self.object.ressource_type_id.id)) + else: + where.append(('ressource_parent_type_id','=',False)) + + ids = pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',0)], self.context) + if self.object2: + ids += pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',self.object2.id)], self.context) + res = pool.get('document.directory').browse(self.cr, self.uid, ids,self.context) + return res + + def _child_get(self, nodename=False): + if self.type not in ('collection','database'): + return [] + res = self.directory_list_for_child(nodename) + result= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, x.type=='directory' and self.object2 or False, root=self.root), res) + if self.type=='database': + pool = pooler.get_pool(self.cr.dbname) + fobj = pool.get('ir.attachment') + vargs = [('parent_id','=',False),('res_id','=',False)] + if nodename: + vargs.append(('name','=',nodename)) + file_ids=fobj.search(self.cr,self.uid,vargs) + + res = fobj.browse(self.cr, self.uid, file_ids, context=self.context) + result +=map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, False, type='file', root=self.root), res) + if self.type=='collection' and self.object.type=="ressource": + where = self.object.domain and eval(self.object.domain, {'active_id':self.root}) or [] + pool = pooler.get_pool(self.cr.dbname) + obj = pool.get(self.object.ressource_type_id.model) + + if self.object.ressource_tree: + if obj._parent_name in obj.fields_get(self.cr,self.uid): + where.append((obj._parent_name,'=',self.object2 and self.object2.id or False)) + else : + if self.object2: + return result + else: + if self.object2: + return result + + name_for = obj._name.split('.')[-1] + if nodename and nodename.find(name_for) == 0 : + id = int(nodename.replace(name_for,'')) + where.append(('id','=',id)) + elif nodename: + if nodename.find('__') : + nodename=nodename.replace('__','/') + for invalid in INVALID_CHARS: + if nodename.find(INVALID_CHARS[invalid]) : + nodename=nodename.replace(INVALID_CHARS[invalid],invalid) + where.append(('name','=',nodename)) + ids = obj.search(self.cr, self.uid, where, self.context) + res = obj.browse(self.cr, self.uid, ids,self.context) + for r in res: + if not r.name: + r.name = name_for+'%d'%r.id + for invalid in INVALID_CHARS: + if r.name.find(invalid) : + r.name=r.name.replace(invalid,INVALID_CHARS[invalid]) + result2 = map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, root=r.id), res) + if result2: + result = result2 + return result + + def children(self): + return self._child_get() + self._file_get() + + def child(self, name): + res = self._child_get(name) + if res: + return res[0] + res = self._file_get(name) + if res: + return res[0] + return None + + def path_get(self): + path = self.path + if self.path[0]=='/': + path = self.path[1:] + return path + +class document_directory(osv.osv): + _name = 'document.directory' + _description = 'Document directory' + _columns = { + 'name': fields.char('Name', size=64, required=True, select=1), + 'write_date': fields.datetime('Date Modified', readonly=True), + 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True), + 'create_date': fields.datetime('Date Created', readonly=True), + 'create_uid': fields.many2one('res.users', 'Creator', readonly=True), + 'file_type': fields.char('Content Type', size=32), + 'domain': fields.char('Domain', size=128), + 'user_id': fields.many2one('res.users', 'Owner'), + 'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'), + 'parent_id': fields.many2one('document.directory', 'Parent Item'), + 'child_ids': fields.one2many('document.directory', 'parent_id', 'Childs'), + 'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'), + 'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'), + 'type': fields.selection([('directory','Static Directory'),('ressource','Other Ressources')], 'Type', required=True), + 'ressource_type_id': fields.many2one('ir.model', 'Childs Model'), + 'ressource_parent_type_id': fields.many2one('ir.model', 'Linked Model'), + 'ressource_id': fields.integer('Ressource ID'), + 'ressource_tree': fields.boolean('Tree Structure'), + } + _defaults = { + 'user_id': lambda self,cr,uid,ctx: uid, + 'domain': lambda self,cr,uid,ctx: '[]', + 'type': lambda *args: 'directory', + } + _sql_constraints = [ + ('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !') + ] + + def _check_duplication(self, cr, uid,vals): + if 'name' in vals: + where=" name='%s'"% (vals['name']) + if not 'parent_id' in vals or not vals['parent_id']: + where+=' and parent_id is null' + else: + where+=' and parent_id=%d'%(vals['parent_id']) + if not 'ressource_parent_type_id' in vals or not vals['ressource_parent_type_id']: + where+= ' and ressource_parent_type_id is null' + else: + where+=" and ressource_parent_type_id='%s'"%(vals['ressource_parent_type_id']) +# if not 'ressource_id' in vals or not vals['ressource_id']: +# where+= ' and ressource_id is null' +# else: +# where+=" and ressource_id=%d"%(vals['ressource_id']) + cr.execute("select id from document_directory where" + where) + res = cr.fetchall() + if len(res): + return False + return True + def _check_recursion(self, cr, uid, ids): + level = 100 + while len(ids): + cr.execute('select distinct parent_id from document_directory where id in ('+','.join(map(str,ids))+')') + ids = filter(None, map(lambda x:x[0], cr.fetchall())) + if not level: + return False + level -= 1 + return True + + _constraints = [ + (_check_recursion, 'Error! You can not create recursive Directories.', ['parent_id']) + ] + def __init__(self, *args, **kwargs): + res = super(document_directory, self).__init__(*args, **kwargs) + self._cache = {} + return res + + def onchange_content_id(self, cr, uid, ids, ressource_type_id): + return {} + + def _get_childs(self, cr, uid, node, nodename=False, context={}): + where = [] + if nodename: + where.append(('name','=',nodename)) + if object: + where.append(('parent_id','=',object.id)) + ids = self.search(cr, uid, where, context) + return self.browse(cr, uid, ids, context), False + + """ + PRE: + uri: of the form "Sales Order/SO001" + PORT: + uri + object: the object.directory or object.directory.content + object2: the other object linked (if object.directory.content) + """ + def get_object(self, cr, uid, uri, context={}): + if not uri: + return node_class(cr, uid, '', False, type='database') + turi = tuple(uri) + if False and (turi in self._cache): + (path, oo, oo2, content,type,root) = self._cache[turi] + if oo: + object = self.pool.get(oo[0]).browse(cr, uid, oo[1], context) + else: + object = False + if oo2: + object2 = self.pool.get(oo2[0]).browse(cr, uid, oo2[1], context) + else: + object2 = False + node = node_class(cr, uid, path, object,object2, context, content, type, root) + return node + + node = node_class(cr, uid, '/', False, type='database') + for path in uri[:]: + if path: + node = node.child(path) + if not node: + return False + oo = node.object and (node.object._name, node.object.id) or False + oo2 = node.object2 and (node.object2._name, node.object2.id) or False + self._cache[turi] = (node.path, oo, oo2, node.content,node.type,node.root) + return node + + def get_childs(self, cr, uid, uri, context={}): + node = self.get_object(cr, uid, uri, context) + if uri: + children = node.children() + else: + children= [node] + result = map(lambda node: node.path_get(), children) + #childs,object2 = self._get_childs(cr, uid, object, False, context) + #result = map(lambda x: urlparse.urljoin(path+'/',x.name), childs) + return result + + def write(self, cr, uid, ids, vals, context=None): + if not self._check_duplication(cr,uid,vals): + raise except_orm('ValidateError', 'Directory name must be unique!') + return super(document_directory,self).write(cr,uid,ids,vals,context=context) + + def copy(self, cr, uid, id, default=None, context=None): + if not default: + default ={} + name = self.read(cr, uid, [id])[0]['name'] + default.update({'name': name+ " (copy)"}) + return super(document_directory,self).copy(cr,uid,id,default,context) + + def create(self, cr, uid, vals, context=None): + if not self._check_duplication(cr,uid,vals): + raise except_orm('ValidateError', 'Directory name must be unique!') + if vals.get('name',False) and (vals.get('name').find('/')+1 or vals.get('name').find('@')+1 or vals.get('name').find('$')+1 or vals.get('name').find('#')+1) : + raise 'Error' + return super(document_directory,self).create(cr, uid, vals, context) + +document_directory() + +class document_directory_content(osv.osv): + _name = 'document.directory.content' + _description = 'Directory Content' + _order = "sequence" + _columns = { + 'name': fields.char('Content Name', size=64, required=True), + 'sequence': fields.integer('Sequence', size=16), + 'suffix': fields.char('Suffix', size=16), + 'versioning': fields.boolean('Versioning'), + 'report_id': fields.many2one('ir.actions.report.xml', 'Report', required=True), + 'extension': fields.selection([('.pdf','.pdf'),('','None')], 'Extension', required=True), + 'directory_id': fields.many2one('document.directory', 'Directory') + } + _defaults = { + 'extension': lambda *args: '', + 'sequence': lambda *args: 1 + } +document_directory_content() + +class ir_action_report_xml(osv.osv): + _name="ir.actions.report.xml" + _inherit ="ir.actions.report.xml" + + def _model_get(self, cr, uid, ids, name, arg, context): + res = {} + model_pool = self.pool.get('ir.model') + for data in self.read(cr,uid,ids,['model']): + model = data.get('model',False) + if model: + model_id =model_pool.search(cr,uid,[('model','=',model)]) + if model_id: + res[data.get('id')] = model_id[0] + else: + res[data.get('id')] = False + return res + + def _model_search(self, cr, uid, obj, name, args): + if not len(args): + return [] + model_id= args[0][2] + if not model_id: + return [] + model = self.pool.get('ir.model').read(cr,uid,[model_id])[0]['model'] + report_id = self.search(cr,uid,[('model','=',model)]) + if not report_id: + return [('id','=','0')] + return [('id','in',report_id)] + + _columns={ + 'model_id' : fields.function(_model_get,fnct_search=_model_search,method=True,string='Model Id'), + } + +ir_action_report_xml() + + +import random +import string + + +def random_name(): + random.seed() + d = [random.choice(string.letters) for x in xrange(10) ] + name = "".join(d) + return name + + +def create_directory(path): + dir_name = random_name() + path = os.path.join(path,dir_name) + os.mkdir(path) + return dir_name + +class document_file(osv.osv): + _inherit = 'ir.attachment' + def _data_get(self, cr, uid, ids, name, arg, context): + result = {} + cr.execute('select id,store_method,datas,store_fname,link from ir_attachment where id in ('+','.join(map(str,ids))+')') + for id,m,d,r,l in cr.fetchall(): + if m=='db': + result[id] = d + elif m=='fs': + try: + path = os.path.join(os.getcwd(),'filestore') + value = file(os.path.join(path,r), 'rb').read() + result[id] = base64.encodestring(value) + except: + result[id]='' + else: + result[id] = '' + return result + + # + # This code can be improved + # + def _data_set(self, cr, obj, id, name, value, uid=None, context={}): + if not value: + return True + if (not context) or context.get('store_method','fs')=='fs': + path = os.path.join(os.getcwd(), "filestore") + if not os.path.isdir(path): + os.mkdir(path) + flag = None + # This can be improved + for dirs in os.listdir(path): + if os.path.isdir(os.path.join(path,dirs)) and len(os.listdir(os.path.join(path,dirs)))<4000: + flag = dirs + break + flag = flag or create_directory(path) + filename = random_name() + fname = os.path.join(path, flag, filename) + fp = file(fname,'wb') + v = base64.decodestring(value) + fp.write(v) + filesize = os.stat(fname).st_size + cr.execute('update ir_attachment set store_fname=%s,store_method=%s,file_size=%d where id=%d', (os.path.join(flag,filename),'fs',len(v),id)) + else: + cr.execute('update ir_attachment set datas=%s,store_method=%s where id=%d', (psycopg.Binary(value),'db',id)) + return True + + _columns = { + 'user_id': fields.many2one('res.users', 'Owner', select=1), + 'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'), + 'parent_id': fields.many2one('document.directory', 'Directory', select=1), + 'file_size': fields.integer('File Size', required=True), + 'file_type': fields.char('Content Type', size=32), + 'index_content': fields.text('Indexed Content'), + 'write_date': fields.datetime('Date Modified', readonly=True), + 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True), + 'create_date': fields.datetime('Date Created', readonly=True), + 'create_uid': fields.many2one('res.users', 'Creator', readonly=True), + 'store_method': fields.selection([('db','Database'),('fs','Filesystem'),('link','Link')], "Storing Method"), + 'datas': fields.function(_data_get,method=True,store=True,fnct_inv=_data_set,string='File Content',type="binary"), + 'store_fname': fields.char('Stored Filename', size=200), + 'res_model': fields.char('Attached Model', size=64), #res_model + 'res_id': fields.integer('Attached ID'), #res_id + 'partner_id':fields.many2one('res.partner', 'Partner', select=1), + 'title': fields.char('Resource Title',size=64), + } + + _defaults = { + 'user_id': lambda self,cr,uid,ctx:uid, + 'file_size': lambda self,cr,uid,ctx:0, + 'store_method': lambda *args: 'db' + } + _sql_constraints = [ + ('filename_uniq', 'unique (name,parent_id,res_id,res_model)', 'The file name must be unique !') + ] + + def _check_duplication(self, cr, uid,vals): + if 'name' in vals: + res=self.search(cr,uid,[('name','=',vals['name']),('parent_id','=','parent_id' in vals and vals['parent_id'] or False),('res_id','=','res_id' in vals and vals['res_id'] or False),('res_model','=','res_model' in vals and vals['res_model']) or False]) + if len(res): + return False + return True + + def write(self, cr, uid, ids, vals, context=None): + if not self._check_duplication(cr,uid,vals): + raise except_orm('ValidateError', 'File name must be unique!') + result = super(document_file,self).write(cr,uid,ids,vals,context=context) + try: + for f in self.browse(cr, uid, ids, context=context): + if 'datas' not in vals: + vals['datas']=f.datas + res = content_index(base64.decodestring(vals['datas']), f.datas_fname, f.file_type or None) + super(document_file,self).write(cr, uid, ids, { + 'index_content': res + }) + except: + pass + return result + + def create(self, cr, uid, vals, context={}): + vals['title']=vals['name'] + if vals.get('res_id', False) and vals.get('res_model',False): + obj_model=self.pool.get(vals['res_model']) + result = obj_model.read(cr, uid, [vals['res_id']], context=context) + if len(result): + obj=result[0] + vals['title'] = (obj['name'] or '')[:60] + if obj_model._name=='res.partner': + vals['partner_id']=obj['id'] + elif 'address_id' in obj: + address=self.pool.get('res.partner.address').read(cr,uid,[obj['address_id']],context=context) + if len(address): + vals['partner_id']=address[0]['partner_id'] or False + elif 'partner_id' in obj: + if isinstance(obj['partner_id'],tuple) or isinstance(obj['partner_id'],list): + vals['partner_id']=obj['partner_id'][0] + else: + vals['partner_id']=obj['partner_id'] + + datas=None + if 'datas' not in vals: + import urllib + datas=base64.encodestring(urllib.urlopen(vals['link']).read()) + else: + datas=vals['datas'] + vals['file_size']= len(datas) + if not self._check_duplication(cr,uid,vals): + raise except_orm('ValidateError', 'File name must be unique!') + result = super(document_file,self).create(cr, uid, vals, context) + cr.commit() + try: + res = content_index(base64.decodestring(datas), vals['datas_fname'], vals.get('content_type', None)) + super(document_file,self).write(cr, uid, [result], { + 'index_content': res, + }) + cr.commit() + except: + pass + return result + + def unlink(self,cr, uid, ids, context={}): + for f in self.browse(cr, uid, ids, context): + if f.store_method=='fs': + try: + path = os.path.join(os.getcwd(),'filestore',f.store_fname) + os.unlink(path) + except: + pass + return super(document_file, self).unlink(cr, uid, ids, context) +document_file() diff --git a/addons/document/document_data.xml b/addons/document/document_data.xml new file mode 100644 index 00000000000..98793851804 --- /dev/null +++ b/addons/document/document_data.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/addons/document/document_demo.xml b/addons/document/document_demo.xml new file mode 100644 index 00000000000..3de248ffa28 --- /dev/null +++ b/addons/document/document_demo.xml @@ -0,0 +1,93 @@ + + + + + + Main Repository + + + + + + + My Folder + + + + + + + Partner Category + + ressource + 1 + + + + + + + + + Partner + ressource + [('category_id','in',[active_id])] + + + + + + + + Contacts + ressource + [('partner_id','=',active_id)] + + + + + + + + My Folder + + + + + + + Personnal Folders + + + + + + + Sales Order + + + + + + + All Sales Order + + + + + + Sales by Salesman + + + + + + + Projects + + + + + + + diff --git a/addons/document/document_view.xml b/addons/document/document_view.xml new file mode 100644 index 00000000000..fb87bb56d21 --- /dev/null +++ b/addons/document/document_view.xml @@ -0,0 +1,175 @@ + + + + + + document.directory + document.directory + form + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ + document.directory + document.directory + tree + child_ids + + + + + + + + + + + + + ir.actions.act_window + document.directory + form + tree,form + + + + + ir.actions.act_window + document.directory + tree + [('ressource_parent_type_id','=',False),('parent_id','=',False)] + + + + + + ir.attachment + ir.attachment + + form + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + ir.attachment + ir.attachment + tree + + + + + + + + + + + + + + + ir.actions.act_window + ir.attachment + form + + + + + ir.actions.act_window + ir.attachment + form + [('parent_id','=',active_id)] + + + + + + Browse Files + + + + + + ir.attachment.view.inherit + ir.attachment + + + + + + + +
+
+ diff --git a/addons/document/ftpserver/__init__.py b/addons/document/ftpserver/__init__.py new file mode 100644 index 00000000000..62c9191497d --- /dev/null +++ b/addons/document/ftpserver/__init__.py @@ -0,0 +1,22 @@ +import threading +import ftpserver +import authorizer +import abstracted_fs + +PORT = 8021 +HOST = '' + +class ftp_server(threading.Thread): + def run(self): + autho = authorizer.authorizer() + ftpserver.FTPHandler.authorizer = autho + ftpserver.max_cons = 300 + ftpserver.max_cons_per_ip = 50 + ftpserver.FTPHandler.abstracted_fs = abstracted_fs.abstracted_fs + address = (HOST, PORT) + ftpd = ftpserver.FTPServer(address, ftpserver.FTPHandler) + ftpd.serve_forever() + +ds = ftp_server() +ds.start() + diff --git a/addons/document/ftpserver/abstracted_fs.py b/addons/document/ftpserver/abstracted_fs.py new file mode 100644 index 00000000000..2ff2bdbb41c --- /dev/null +++ b/addons/document/ftpserver/abstracted_fs.py @@ -0,0 +1,751 @@ +import os +import time +from tarfile import filemode +import StringIO +import base64 + +import glob +import fnmatch + +import pooler +import netsvc +import posix +from service import security + +class abstracted_fs: + """A class used to interact with the file system, providing a high + level, cross-platform interface compatible with both Windows and + UNIX style filesystems. + + It provides some utility methods and some wraps around operations + involved in file creation and file system operations like moving + files or removing directories. + + Instance attributes: + - (str) root: the user home directory. + - (str) cwd: the current working directory. + - (str) rnfr: source file to be renamed. + """ + + # Ok + def db_list(self): + s = netsvc.LocalService('db') + result = s.list() + self.db_name_list=[] + for db_name in result: + db = pooler.get_db_only(db_name) + cr = db.cursor() + cr.execute("select id from ir_module_module where name like 'document%' and state='installed' ") + res=cr.fetchone() + if res and len(res): + self.db_name_list.append(db_name) + cr.close() + return self.db_name_list + + # Ok + def __init__(self): + self.root = None + self.cwd = '/' + self.rnfr = None + + # --- Pathname / conversion utilities + + # Ok + def ftpnorm(self, ftppath): + """Normalize a "virtual" ftp pathname (tipically the raw string + coming from client) depending on the current working directory. + + Example (having "/foo" as current working directory): + 'x' -> '/foo/x' + + Note: directory separators are system independent ("/"). + Pathname returned is always absolutized. + """ + if os.path.isabs(ftppath): + p = os.path.normpath(ftppath) + else: + p = os.path.normpath(os.path.join(self.cwd, ftppath)) + # normalize string in a standard web-path notation having '/' + # as separator. + p = p.replace("\\", "/") + # os.path.normpath supports UNC paths (e.g. "//a/b/c") but we + # don't need them. In case we get an UNC path we collapse + # redundant separators appearing at the beginning of the string + while p[:2] == '//': + p = p[1:] + # Anti path traversal: don't trust user input, in the event + # that self.cwd is not absolute, return "/" as a safety measure. + # This is for extra protection, maybe not really necessary. + if not os.path.isabs(p): + p = "/" + return p + + # Ok + def ftp2fs(self, path_orig, data): + path = self.ftpnorm(path_orig) + if path and path=='/': + return None + path2 = filter(None,path.split('/'))[1:] + (cr, uid, pool) = data + res = pool.get('document.directory').get_object(cr, uid, path2[:]) + if not res: + raise OSError(2, 'Not such file or directory.') + return res + + # Ok + def fs2ftp(self, node): + res = node and ('/' + node.cr.dbname + '/' + node.path) or '/' + return res + + # Ok + def validpath(self, path): + """Check whether the path belongs to user's home directory. + Expected argument is a "real" filesystem pathname. + + If path is a symbolic link it is resolved to check its real + destination. + + Pathnames escaping from user's root directory are considered + not valid. + """ + return path and True or False + + # --- Wrapper methods around open() and tempfile.mkstemp + + # Ok + def create(self, node, objname, mode): + try: + class file_wrapper(StringIO.StringIO): + def __init__(self, sstr='', ressource_id=False, dbname=None, uid=1, name=''): + StringIO.StringIO.__init__(self, sstr) + self.ressource_id = ressource_id + self.name = name + self.dbname = dbname + self.uid = uid + def close(self, *args, **kwargs): + db,pool = pooler.get_db_and_pool(self.dbname) + cr = db.cursor() + uid =self.uid + val = self.getvalue() + val2 = { + 'datas': base64.encodestring(val), + 'file_size': len(val), + } + pool.get('ir.attachment').write(cr, uid, [self.ressource_id], val2) + cr.commit() + StringIO.StringIO.close(self, *args, **kwargs) + + cr = node.cr + uid = node.uid + pool = pooler.get_pool(cr.dbname) + + fobj = pool.get('ir.attachment') + ext = objname.find('.') >0 and objname.split('.')[1] or False + + # TODO: test if already exist and modify in this case if node.type=file + ### checked already exits + object2=node and node.object2 or False + object=node and node.object or False + cid=False + + where=[('name','=',objname)] + if object and (object.type in ('directory','ressource')) or object2: + where.append(('parent_id','=',object.id)) + else: + where.append(('parent_id','=',False)) + + if object2: + where +=[('res_id','=',object2.id),('res_model','=',object2._name)] + cids = fobj.search(cr, uid,where) + if len(cids): + cid=cids[0] + + if not cid: + val = { + 'name': objname, + 'datas_fname': objname, + 'datas': '', + 'file_size': 0L, + 'file_type': ext, + } + if object and (object.type in ('directory','ressource')) or not object2: + val['parent_id']= object and object.id or False + partner = False + if object2: + if 'partner_id' in object2 and object2.partner_id.id: + partner = object2.partner_id.id + if object2._name == 'res.partner': + partner = object2.id + val.update( { + 'res_model': object2._name, + 'partner_id': partner, + 'res_id': object2.id + }) + cid = fobj.create(cr, uid, val, context={}) + cr.commit() + + s = file_wrapper('', cid, cr.dbname, uid, ) + return s + except Exception,e: + print e + raise OSError(1, 'Operation not permited.') + + # Ok + def open(self, node, mode): + if not node: + raise OSError(1, 'Operation not permited.') + # Reading operation + if node.type=='file': + if not self.isfile(node): + raise OSError(1, 'Operation not permited.') + s = StringIO.StringIO(base64.decodestring(node.object.datas or '')) + s.name = node + return s + elif node.type=='content': + cr = node.cr + uid = node.uid + pool = pooler.get_pool(cr.dbname) + report = pool.get('ir.actions.report.xml').browse(cr, uid, node.content['report_id']['id']) + srv = netsvc.LocalService('report.'+report.report_name) + pdf,pdftype = srv.create(cr, uid, [node.object.id], {}, {}) + s = StringIO.StringIO(pdf) + s.name = node + return s + else: + raise OSError(1, 'Operation not permited.') + + # ok, but need test more + + def mkstemp(self, suffix='', prefix='', dir=None, mode='wb'): + """A wrap around tempfile.mkstemp creating a file with a unique + name. Unlike mkstemp it returns an object with a file-like + interface. + """ + raise 'Not Yet Implemented' +# class FileWrapper: +# def __init__(self, fd, name): +# self.file = fd +# self.name = name +# def __getattr__(self, attr): +# return getattr(self.file, attr) +# +# text = not 'b' in mode +# # max number of tries to find out a unique file name +# tempfile.TMP_MAX = 50 +# fd, name = tempfile.mkstemp(suffix, prefix, dir, text=text) +# file = os.fdopen(fd, mode) +# return FileWrapper(file, name) + + text = not 'b' in mode + # for unique file , maintain version if duplicate file + if dir: + cr = dir.cr + uid = dir.uid + pool = pooler.get_pool(cr.dbname) + object=dir and dir.object or False + object2=dir and dir.object2 or False + res=pool.get('ir.attachment').search(cr,uid,[('name','like',prefix),('parent_id','=',object and object.type in ('directory','ressource') and object.id or False),('res_id','=',object2 and object2.id or False),('res_model','=',object2 and object2._name or False)]) + if len(res): + pre = prefix.split('.') + prefix=pre[0] + '.v'+str(len(res))+'.'+pre[1] + #prefix = prefix + '.' + return self.create(dir,suffix+prefix,text) + + + + # Ok + def chdir(self, path): + if not path: + self.cwd='/' + return None + if path.type in ('collection','database'): + self.cwd = self.fs2ftp(path) + else: + raise OSError(1, 'Operation not permited.') + + # Ok + def mkdir(self, node, basename): + """Create the specified directory.""" + if not node: + raise OSError(1, 'Operation not permited.') + try: + object2=node and node.object2 or False + object=node and node.object or False + cr = node.cr + uid = node.uid + pool = pooler.get_pool(cr.dbname) + if node.object and (node.object.type=='ressource') and not node.object2: + raise OSError(1, 'Operation not permited.') + val = { + 'name': basename, + 'ressource_parent_type_id': object and object.ressource_type_id.id or False, + 'ressource_id': object2 and object2.id or False + } + if (object and (object.type in ('directory'))) or not object2: + val['parent_id'] = object and object.id or False + # Check if it alreayd exists ! + pool.get('document.directory').create(cr, uid, val) + cr.commit() + except Exception,e: + print e + raise OSError(1, 'Operation not permited.') + + + # Ok + def close_cr(self, data): + if data: + data[0].close() + return True + + def get_cr(self, path): + path = self.ftpnorm(path) + if path=='/': + return None + dbname = path.split('/')[1] + try: + db,pool = pooler.get_db_and_pool(dbname) + except: + raise OSError(1, 'Operation not permited.') + cr = db.cursor() + uid = security.login(dbname, self.username, self.password) + if not uid: + raise OSError(1, 'Operation not permited.') + return cr, uid, pool + + # Ok + def listdir(self, path): + """List the content of a directory.""" + class false_node: + object = None + type = 'database' + def __init__(self, db): + self.path = '/'+db + + if path is None: + result = [] + for db in self.db_list(): + result.append(false_node(db)) + return result + return path.children() + + # Ok + def rmdir(self, node): + """Remove the specified directory.""" + cr = node.cr + uid = node.uid + pool = pooler.get_pool(cr.dbname) + object2=node and node.object2 or False + object=node and node.object or False + if object._table_name=='document.directory': + if node.children(): + raise OSError(39, 'Directory not empty.') + res = pool.get('document.directory').unlink(cr, uid, [object.id]) + else: + raise OSError(39, 'Directory not empty.') + + cr.commit() + + # Ok + def remove(self, node): + """Remove the specified file.""" + cr = node.cr + uid = node.uid + pool = pooler.get_pool(cr.dbname) + object2=node and node.object2 or False + object=node and node.object or False + if not object: + raise OSError(2, 'Not such file or directory.') + if object._table_name=='ir.attachment': + res = pool.get('ir.attachment').unlink(cr, uid, [object.id]) + else: + raise OSError(1, 'Operation not permited.') + cr.commit() + + # Ok + def rename(self, src, dst_basedir,dst_basename): + """ + Renaming operation, the effect depends on the src: + * A file: read, create and remove + * A directory: change the parent and reassign childs to ressource + """ + try: + if src.type=='collection': + if src.object._table_name <> 'document.directory': + raise OSError(1, 'Operation not permited.') + result = { + 'directory': [], + 'attachment': [] + } + # Compute all childs to set the new ressource ID + child_ids = [src] + while len(child_ids): + node = child_ids.pop(0) + child_ids += node.children() + if node.type =='collection': + result['directory'].append(node.object.id) + if (not node.object.ressource_id) and node.object2: + raise OSError(1, 'Operation not permited.') + elif node.type =='file': + result['attachment'].append(node.object.id) + + cr = src.cr + uid = src.uid + pool = pooler.get_pool(cr.dbname) + object2=src and src.object2 or False + object=src and src.object or False + if object2 and not object.ressource_id: + raise OSError(1, 'Operation not permited.') + val = { + 'name':dst_basename, + } + if (dst_basedir.object and (dst_basedir.object.type in ('directory'))) or not dst_basedir.object2: + val['parent_id'] = dst_basedir.object and dst_basedir.object.id or False + else: + val['parent_id'] = False + res = pool.get('document.directory').write(cr, uid, [object.id],val) + + if dst_basedir.object2: + ressource_type_id = pool.get('ir.model').search(cr,uid,[('model','=',dst_basedir.object2._name)])[0] + ressource_id = dst_basedir.object2.id + title = dst_basedir.object2.name + ressource_model = dst_basedir.object2._name + if dst_basedir.object2._name=='res.partner': + partner_id=dst_basedir.object2.id + else: + partner_id= dst_basedir.object2.partner_id and dst_basedir.object2.partner_id.id or False + else: + ressource_type_id = False + ressource_id=False + ressource_model = False + partner_id = False + title = False + + pool.get('document.directory').write(cr, uid, result['directory'], { + 'ressource_id': ressource_id, + 'ressource_parent_type_id': ressource_type_id + }) + val = { + 'res_id': ressource_id, + 'res_model': ressource_model, + 'title': title, + 'partner_id': partner_id + } + pool.get('ir.attachment').write(cr, uid, result['attachment'], val) + if (not val['res_id']) and result['attachment']: + dst_basedir.cr.execute('update ir_attachment set res_id=NULL where id in ('+','.join(map(str,result['attachment']))+')') + + cr.commit() + elif src.type=='file': + pool = pooler.get_pool(src.cr.dbname) + val = { + 'partner_id':False, + #'res_id': False, + 'res_model': False, + 'name': dst_basename, + 'datas_fname': dst_basename, + 'title': dst_basename, + } + + if (dst_basedir.object and (dst_basedir.object.type in ('directory','ressource'))) or not dst_basedir.object2: + val['parent_id'] = dst_basedir.object and dst_basedir.object.id or False + else: + val['parent_id'] = False + + if dst_basedir.object2: + val['res_model'] = dst_basedir.object2._name + val['res_id'] = dst_basedir.object2.id + val['title'] = dst_basedir.object2.name + if dst_basedir.object2._name=='res.partner': + val['partner_id']=dst_basedir.object2.id + else: + val['partner_id']= dst_basedir.object2.partner_id and dst_basedir.object2.partner_id.id or False + elif src.object.res_id: + # I had to do that because writing False to an integer writes 0 instead of NULL + # change if one day we decide to improve osv/fields.py + dst_basedir.cr.execute('update ir_attachment set res_id=NULL where id=%d', (src.object.id,)) + + pool.get('ir.attachment').write(src.cr, src.uid, [src.object.id], val) + src.cr.commit() + elif src.type=='content': + src_file=self.open(src,'r') + dst_file=self.create(dst_basedir,dst_basename,'w') + dst_file.write(src_file.getvalue()) + dst_file.close() + src_file.close() + src.cr.commit() + else: + raise OSError(1, 'Operation not permited.') + except Exception,err: + print err + raise OSError(1,'Operation not permited.') + + + + + # Nearly Ok + def stat(self, node): + r = list(os.stat('/')) + if self.isfile(node): + r[0] = 33188 + r[6] = self.getsize(node) + r[7] = self.getmtime(node) + r[8] = self.getmtime(node) + r[9] = self.getmtime(node) + return posix.stat_result(r) + lstat = stat + + # --- Wrapper methods around os.path.* + + # Ok + def isfile(self, node): + if node and (node.type not in ('collection','database')): + return True + return False + + # Ok + def islink(self, path): + """Return True if path is a symbolic link.""" + return False + + # Ok + def isdir(self, node): + """Return True if path is a directory.""" + if node is None: + return True + if node and (node.type in ('collection','database')): + return True + return False + + # Ok + def getsize(self, node): + """Return the size of the specified file in bytes.""" + result = 0L + if node.type=='file': + result = node.object.file_size or 0L + return result + + # Ok + def getmtime(self, node): + """Return the last modified time as a number of seconds since + the epoch.""" + if node.object and node.type<>'content': + dt = (node.object.write_date or node.object.create_date)[:19] + result = time.mktime(time.strptime(dt, '%Y-%m-%d %H:%M:%S')) + else: + result = time.mktime(time.localtime()) + return result + + # Ok + def realpath(self, path): + """Return the canonical version of path eliminating any + symbolic links encountered in the path (if they are + supported by the operating system). + """ + return path + + # Ok + def lexists(self, path): + """Return True if path refers to an existing path, including + a broken or circular symbolic link. + """ + return path and True or False + exists = lexists + + # Ok, can be improved + def glob1(self, dirname, pattern): + """Return a list of files matching a dirname pattern + non-recursively. + + Unlike glob.glob1 raises exception if os.listdir() fails. + """ + names = self.listdir(dirname) + if pattern[0] != '.': + names = filter(lambda x: x.path[0] != '.', names) + return fnmatch.filter(names, pattern) + + # --- Listing utilities + + # note: the following operations are no more blocking + + # Ok + def get_list_dir(self, path): + """"Return an iterator object that yields a directory listing + in a form suitable for LIST command. + """ + if self.isdir(path): + listing = self.listdir(path) + #listing.sort() + return self.format_list(path and path.path or '/', listing) + # if path is a file or a symlink we return information about it + elif self.isfile(path): + basedir, filename = os.path.split(path.path) + self.lstat(path) # raise exc in case of problems + return self.format_list(basedir, [filename]) + + + # Ok + def get_stat_dir(self, rawline, datacr): + """Return an iterator object that yields a list of files + matching a dirname pattern non-recursively in a form + suitable for STAT command. + + - (str) rawline: the raw string passed by client as command + argument. + """ + ftppath = self.ftpnorm(rawline) + if not glob.has_magic(ftppath): + return self.get_list_dir(self.ftp2fs(rawline, datacr)) + else: + basedir, basename = os.path.split(ftppath) + if glob.has_magic(basedir): + return iter(['Directory recursion not supported.\r\n']) + else: + basedir = self.ftp2fs(basedir, datacr) + listing = self.glob1(basedir, basename) + if listing: + listing.sort() + return self.format_list(basedir, listing) + + # Ok + def format_list(self, basedir, listing, ignore_err=True): + """Return an iterator object that yields the entries of given + directory emulating the "/bin/ls -lA" UNIX command output. + + - (str) basedir: the absolute dirname. + - (list) listing: the names of the entries in basedir + - (bool) ignore_err: when False raise exception if os.lstat() + call fails. + + On platforms which do not support the pwd and grp modules (such + as Windows), ownership is printed as "owner" and "group" as a + default, and number of hard links is always "1". On UNIX + systems, the actual owner, group, and number of links are + printed. + + This is how output appears to client: + + -rw-rw-rw- 1 owner group 7045120 Sep 02 3:47 music.mp3 + drwxrwxrwx 1 owner group 0 Aug 31 18:50 e-books + -rw-rw-rw- 1 owner group 380 Sep 02 3:40 module.py + """ + for file in listing: + try: + st = self.lstat(file) + except os.error: + if ignore_err: + continue + raise + perms = filemode(st.st_mode) # permissions + nlinks = st.st_nlink # number of links to inode + if not nlinks: # non-posix system, let's use a bogus value + nlinks = 1 + size = st.st_size # file size + uname = "owner" + gname = "group" + # stat.st_mtime could fail (-1) if last mtime is too old + # in which case we return the local time as last mtime + try: + mtime = time.strftime("%b %d %H:%M", time.localtime(st.st_mtime)) + except ValueError: + mtime = time.strftime("%b %d %H:%M") + + # formatting is matched with proftpd ls output + yield "%s %3s %-8s %-8s %8s %s %s\r\n" %(perms, nlinks, uname, gname, + size, mtime, file.path.split('/')[-1]) + + # Ok + def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): + """Return an iterator object that yields the entries of a given + directory or of a single file in a form suitable with MLSD and + MLST commands. + + Every entry includes a list of "facts" referring the listed + element. See RFC-3659, chapter 7, to see what every single + fact stands for. + + - (str) basedir: the absolute dirname. + - (list) listing: the names of the entries in basedir + - (str) perms: the string referencing the user permissions. + - (str) facts: the list of "facts" to be returned. + - (bool) ignore_err: when False raise exception if os.stat() + call fails. + + Note that "facts" returned may change depending on the platform + and on what user specified by using the OPTS command. + + This is how output could appear to the client issuing + a MLSD request: + + type=file;size=156;perm=r;modify=20071029155301;unique=801cd2; music.mp3 + type=dir;size=0;perm=el;modify=20071127230206;unique=801e33; ebooks + type=file;size=211;perm=r;modify=20071103093626;unique=801e32; module.py + """ + permdir = ''.join([x for x in perms if x not in 'arw']) + permfile = ''.join([x for x in perms if x not in 'celmp']) + if ('w' in perms) or ('a' in perms) or ('f' in perms): + permdir += 'c' + if 'd' in perms: + permdir += 'p' + type = size = perm = modify = create = unique = mode = uid = gid = "" + for basename in listing: + file = os.path.join(basedir, basename) + try: + st = self.stat(file) + except OSError: + if ignore_err: + continue + raise + # type + perm + if stat.S_ISDIR(st.st_mode): + if 'type' in facts: + if basename == '.': + type = 'type=cdir;' + elif basename == '..': + type = 'type=pdir;' + else: + type = 'type=dir;' + if 'perm' in facts: + perm = 'perm=%s;' %permdir + else: + if 'type' in facts: + type = 'type=file;' + if 'perm' in facts: + perm = 'perm=%s;' %permfile + if 'size' in facts: + size = 'size=%s;' %st.st_size # file size + # last modification time + if 'modify' in facts: + try: + modify = 'modify=%s;' %time.strftime("%Y%m%d%H%M%S", + time.localtime(st.st_mtime)) + except ValueError: + # stat.st_mtime could fail (-1) if last mtime is too old + modify = "" + if 'create' in facts: + # on Windows we can provide also the creation time + try: + create = 'create=%s;' %time.strftime("%Y%m%d%H%M%S", + time.localtime(st.st_ctime)) + except ValueError: + create = "" + # UNIX only + if 'unix.mode' in facts: + mode = 'unix.mode=%s;' %oct(st.st_mode & 0777) + if 'unix.uid' in facts: + uid = 'unix.uid=%s;' %st.st_uid + if 'unix.gid' in facts: + gid = 'unix.gid=%s;' %st.st_gid + # We provide unique fact (see RFC-3659, chapter 7.5.2) on + # posix platforms only; we get it by mixing st_dev and + # st_ino values which should be enough for granting an + # uniqueness for the file listed. + # The same approach is used by pure-ftpd. + # Implementors who want to provide unique fact on other + # platforms should use some platform-specific method (e.g. + # on Windows NTFS filesystems MTF records could be used). + if 'unique' in facts: + unique = "unique=%x%x;" %(st.st_dev, st.st_ino) + + yield "%s%s%s%s%s%s%s%s%s %s\r\n" %(type, size, perm, modify, create, + mode, uid, gid, unique, basename) + diff --git a/addons/document/ftpserver/authorizer.py b/addons/document/ftpserver/authorizer.py new file mode 100644 index 00000000000..d33dc5d7234 --- /dev/null +++ b/addons/document/ftpserver/authorizer.py @@ -0,0 +1,70 @@ +#import pooler + +class authorizer: + read_perms = "elr" + write_perms = "adfmw" + + def __init__(self): + self.password = '' + + def validate_authentication(self, username, password): + """Return True if the supplied username and password match the + stored credentials.""" + self.password = password + return True + + def impersonate_user(self, username, password): + """Impersonate another user (noop). + + It is always called before accessing the filesystem. + By default it does nothing. The subclass overriding this + method is expected to provide a mechanism to change the + current user. + """ + + def terminate_impersonation(self): + """Terminate impersonation (noop). + + It is always called after having accessed the filesystem. + By default it does nothing. The subclass overriding this + method is expected to provide a mechanism to switch back + to the original user. + """ + + def has_user(self, username): + """Whether the username exists in the virtual users table.""" + return True + + def has_perm(self, username, perm, path=None): + """Whether the user has permission over path (an absolute + pathname of a file or a directory). + + Expected perm argument is one of the following letters: + "elradfmw". + """ + return True + paths = path.split('/') + if not len(paths)>2: + return True + db_name = paths[1] + db,pool = pooler.get_db_and_pool(db_name) + res = security.login(db_name, username, self.password) + return bool(res) + + def get_perms(self, username): + """Return current user permissions.""" + return 'elr' + + def get_home_dir(self, username): + """Return the user's home directory.""" + return '/' + + def get_msg_login(self, username): + """Return the user's login message.""" + return 'Welcome on OpenERP document management system.' + + def get_msg_quit(self, username): + """Return the user's quitting message.""" + return 'Bye.' + + diff --git a/addons/document/ftpserver/ftpserver.py b/addons/document/ftpserver/ftpserver.py new file mode 100644 index 00000000000..4ca5b06bb39 --- /dev/null +++ b/addons/document/ftpserver/ftpserver.py @@ -0,0 +1,3092 @@ +#!/usr/bin/env python +# ftpserver.py +# +# pyftpdlib is released under the MIT license, reproduced below: +# ====================================================================== +# Copyright (C) 2007 Giampaolo Rodola' +# Hacked by Fabien Pinckaers (C) 2008 +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Giampaolo Rodola' not be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior +# permission. +# +# Giampaolo Rodola' DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN +# NO EVENT Giampaolo Rodola' BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# ====================================================================== + + +"""pyftpdlib: RFC-959 asynchronous FTP server. + +pyftpdlib implements a fully functioning asynchronous FTP server as +defined in RFC-959. A hierarchy of classes outlined below implement +the backend functionality for the FTPd: + + [FTPServer] - the base class for the backend. + + [FTPHandler] - a class representing the server-protocol-interpreter + (server-PI, see RFC-959). Each time a new connection occurs + FTPServer will create a new FTPHandler instance to handle the + current PI session. + + [ActiveDTP], [PassiveDTP] - base classes for active/passive-DTP + backends. + + [DTPHandler] - this class handles processing of data transfer + operations (server-DTP, see RFC-959). + + [DummyAuthorizer] - an "authorizer" is a class handling FTPd + authentications and permissions. It is used inside FTPHandler class + to verify user passwords, to get user's home directory and to get + permissions when a filesystem read/write occurs. "DummyAuthorizer" + is the base authorizer class providing a platform independent + interface for managing virtual users. + + [AbstractedFS] - class used to interact with the file system, + providing a high level, cross-platform interface compatible + with both Windows and UNIX style filesystems. + + [AuthorizerError] - base class for authorizers exceptions. + + +pyftpdlib also provides 3 different logging streams through 3 functions +which can be overridden to allow for custom logging. + + [log] - the main logger that logs the most important messages for + the end user regarding the FTPd. + + [logline] - this function is used to log commands and responses + passing through the control FTP channel. + + [logerror] - log traceback outputs occurring in case of errors. + + +Usage example: + +>>> from pyftpdlib import ftpserver +>>> authorizer = ftpserver.DummyAuthorizer() +>>> authorizer.add_user('user', 'password', '/home/user', perm='elradfmw') +>>> authorizer.add_anonymous('/home/nobody') +>>> ftp_handler = ftpserver.FTPHandler +>>> ftp_handler.authorizer = authorizer +>>> address = ("127.0.0.1", 21) +>>> ftpd = ftpserver.FTPServer(address, ftp_handler) +>>> ftpd.serve_forever() +Serving FTP on 127.0.0.1:21 +[]127.0.0.1:2503 connected. +127.0.0.1:2503 ==> 220 Ready. +127.0.0.1:2503 <== USER anonymous +127.0.0.1:2503 ==> 331 Username ok, send password. +127.0.0.1:2503 <== PASS ****** +127.0.0.1:2503 ==> 230 Login successful. +[anonymous]@127.0.0.1:2503 User anonymous logged in. +127.0.0.1:2503 <== TYPE A +127.0.0.1:2503 ==> 200 Type set to: ASCII. +127.0.0.1:2503 <== PASV +127.0.0.1:2503 ==> 227 Entering passive mode (127,0,0,1,9,201). +127.0.0.1:2503 <== LIST +127.0.0.1:2503 ==> 150 File status okay. About to open data connection. +[anonymous]@127.0.0.1:2503 OK LIST "/". Transfer starting. +127.0.0.1:2503 ==> 226 Transfer complete. +[anonymous]@127.0.0.1:2503 Transfer complete. 706 bytes transmitted. +127.0.0.1:2503 <== QUIT +127.0.0.1:2503 ==> 221 Goodbye. +[anonymous]@127.0.0.1:2503 Disconnected. +""" + + +import asyncore +import asynchat +import socket +import os +import sys +import traceback +import errno +import time +import glob +import fnmatch +import tempfile +import warnings +import random +import stat +from tarfile import filemode + +try: + import pwd + import grp +except ImportError: + pwd = grp = None + + +LOG_ACTIVE = True + +__all__ = ['proto_cmds', 'Error', 'log', 'logline', 'logerror', 'DummyAuthorizer', + 'FTPHandler', 'FTPServer', 'PassiveDTP', 'ActiveDTP', 'DTPHandler', + 'FileProducer', 'IteratorProducer', 'BufferedIteratorProducer', + 'AbstractedFS',] + + +__pname__ = 'Python FTP server library (pyftpdlib)' +__ver__ = '0.4.0' +__date__ = '2008-05-16' +__author__ = "Giampaolo Rodola' " +__web__ = 'http://code.google.com/p/pyftpdlib/' + + +proto_cmds = { + 'ABOR': 'Syntax: ABOR (abort transfer).', + 'ALLO': 'Syntax: ALLO bytes (obsolete; allocate storage).', + 'APPE': 'Syntax: APPE file-name (append data to an existent file).', + 'CDUP': 'Syntax: CDUP (go to parent directory).', + 'CWD' : 'Syntax: CWD dir-name (change current working directory).', + 'DELE': 'Syntax: DELE file-name (delete file).', + 'EPRT': 'Syntax: EPRT |proto|ip|port| (set server in extended active mode).', + 'EPSV': 'Syntax: EPSV [ proto/"ALL"] (set server in extended passive mode).', + 'FEAT': 'Syntax: FEAT (list all new features supported).', + 'HELP': 'Syntax: HELP [ cmd] (show help).', + 'LIST': 'Syntax: LIST [ path-name] (list files).', + 'MDTM': 'Syntax: MDTM file-name (get last modification time).', + 'MLSD': 'Syntax: MLSD [ dir-name] (list files in a machine-processable form)', + 'MLST': 'Syntax: MLST [ path-name] (show a path in a machine-processable form)', + 'MODE': 'Syntax: MODE mode (obsolete; set data transfer mode).', + 'MKD' : 'Syntax: MDK dir-name (create directory).', + 'NLST': 'Syntax: NLST [ path-name] (list files in a compact form).', + 'NOOP': 'Syntax: NOOP (just do nothing).', + 'OPTS': 'Syntax: OPTS ftp-command [ option] (specify options for FTP commands)', + 'PASS': 'Syntax: PASS user-name (set user password).', + 'PASV': 'Syntax: PASV (set server in passive mode).', + 'PORT': 'Syntax: PORT h1,h2,h3,h4,p1,p2 (set server in active mode).', + 'PWD' : 'Syntax: PWD (get current working directory).', + 'QUIT': 'Syntax: QUIT (quit current session).', + 'REIN': 'Syntax: REIN (reinitialize / flush account).', + 'REST': 'Syntax: REST marker (restart file position).', + 'RETR': 'Syntax: RETR file-name (retrieve a file).', + 'RMD' : 'Syntax: RMD dir-name (remove directory).', + 'RNFR': 'Syntax: RNFR file-name (file renaming (source name)).', + 'RNTO': 'Syntax: RNTO file-name (file renaming (destination name)).', + 'SIZE': 'Syntax: HELP file-name (get file size).', + 'STAT': 'Syntax: STAT [ path name] (status information [list files]).', + 'STOR': 'Syntax: STOR file-name (store a file).', + 'STOU': 'Syntax: STOU [ file-name] (store a file with a unique name).', + 'STRU': 'Syntax: STRU type (obsolete; set file structure).', + 'SYST': 'Syntax: SYST (get operating system type).', + 'TYPE': 'Syntax: TYPE [A | I] (set transfer type).', + 'USER': 'Syntax: USER user-name (set username).', + 'XCUP': 'Syntax: XCUP (obsolete; go to parent directory).', + 'XCWD': 'Syntax: XCWD dir-name (obsolete; change current directory).', + 'XMKD': 'Syntax: XMDK dir-name (obsolete; create directory).', + 'XPWD': 'Syntax: XPWD (obsolete; get current dir).', + 'XRMD': 'Syntax: XRMD dir-name (obsolete; remove directory).', + } + + +# hack around format_exc function of traceback module to grant +# backward compatibility with python < 2.4 +if not hasattr(traceback, 'format_exc'): + try: + import cStringIO as StringIO + except ImportError: + import StringIO + + def _format_exc(): + f = StringIO.StringIO() + traceback.print_exc(file=f) + data = f.getvalue() + f.close() + return data + + traceback.format_exc = _format_exc + + +def _strerror(err): + """A wrap around os.strerror() which may be not available on all + platforms (e.g. pythonCE). + + - (instance) err: an EnvironmentError or derived class instance. + """ + if hasattr(os, 'strerror'): + return os.strerror(err.errno) + else: + return err.strerror + + +# --- library defined exceptions + +class Error(Exception): + """Base class for module exceptions.""" + +class AuthorizerError(Error): + """Base class for authorizer exceptions.""" + + +# --- loggers + +def log(msg): + """Log messages intended for the end user.""" + if LOG_ACTIVE: + print msg + +def logline(msg): + """Log commands and responses passing through the command channel.""" + if LOG_ACTIVE: + print msg + +def logerror(msg): + """Log traceback outputs occurring in case of errors.""" + sys.stderr.write(str(msg) + '\n') + sys.stderr.flush() + + +# --- authorizers + +class DummyAuthorizer: + """Basic "dummy" authorizer class, suitable for subclassing to + create your own custom authorizers. + + An "authorizer" is a class handling authentications and permissions + of the FTP server. It is used inside FTPHandler class for verifying + user's password, getting users home directory, checking user + permissions when a file read/write event occurs and changing user + before accessing the filesystem. + + DummyAuthorizer is the base authorizer, providing a platform + independent interface for managing "virtual" FTP users. System + dependent authorizers can by written by subclassing this base + class and overriding appropriate methods as necessary. + """ + + read_perms = "elr" + write_perms = "adfmw" + + def __init__(self): + self.user_table = {} + + def add_user(self, username, password, homedir, perm='elr', + msg_login="Login successful.", msg_quit="Goodbye."): + """Add a user to the virtual users table. + + AuthorizerError exceptions raised on error conditions such as + invalid permissions, missing home directory or duplicate usernames. + + Optional perm argument is a string referencing the user's + permissions explained below: + + Read permissions: + - "e" = change directory (CWD command) + - "l" = list files (LIST, NLST, MLSD commands) + - "r" = retrieve file from the server (RETR command) + + Write permissions: + - "a" = append data to an existing file (APPE command) + - "d" = delete file or directory (DELE, RMD commands) + - "f" = rename file or directory (RNFR, RNTO commands) + - "m" = create directory (MKD command) + - "w" = store a file to the server (STOR, STOU commands) + + Optional msg_login and msg_quit arguments can be specified to + provide customized response strings when user log-in and quit. + """ + if self.has_user(username): + raise AuthorizerError('User "%s" already exists' %username) + homedir = os.path.realpath(homedir) + if not os.path.isdir(homedir): + raise AuthorizerError('No such directory: "%s"' %homedir) + for p in perm: + if p not in 'elradfmw': + raise AuthorizerError('No such permission "%s"' %p) + for p in perm: + if (p in self.write_perms) and (username == 'anonymous'): + warnings.warn("write permissions assigned to anonymous user.", + RuntimeWarning) + break + dic = {'pwd': str(password), + 'home': homedir, + 'perm': perm, + 'msg_login': str(msg_login), + 'msg_quit': str(msg_quit) + } + self.user_table[username] = dic + + def add_anonymous(self, homedir, **kwargs): + """Add an anonymous user to the virtual users table. + + AuthorizerError exception raised on error conditions such as + invalid permissions, missing home directory, or duplicate + anonymous users. + + The keyword arguments in kwargs are the same expected by + add_user method: "perm", "msg_login" and "msg_quit". + + The optional "perm" keyword argument is a string defaulting to + "elr" referencing "read-only" anonymous user's permissions. + + Using write permission values ("adfmw") results in a + RuntimeWarning. + """ + DummyAuthorizer.add_user(self, 'anonymous', '', homedir, **kwargs) + + def remove_user(self, username): + """Remove a user from the virtual users table.""" + del self.user_table[username] + + def validate_authentication(self, username, password): + """Return True if the supplied username and password match the + stored credentials.""" + return self.user_table[username]['pwd'] == password + + def impersonate_user(self, username, password): + """Impersonate another user (noop). + + It is always called before accessing the filesystem. + By default it does nothing. The subclass overriding this + method is expected to provide a mechanism to change the + current user. + """ + + def terminate_impersonation(self): + """Terminate impersonation (noop). + + It is always called after having accessed the filesystem. + By default it does nothing. The subclass overriding this + method is expected to provide a mechanism to switch back + to the original user. + """ + + def has_user(self, username): + """Whether the username exists in the virtual users table.""" + return username in self.user_table + + def has_perm(self, username, perm, path=None): + """Whether the user has permission over path (an absolute + pathname of a file or a directory). + + Expected perm argument is one of the following letters: + "elradfmw". + """ + return perm in self.user_table[username]['perm'] + + def get_perms(self, username): + """Return current user permissions.""" + return self.user_table[username]['perm'] + + def get_home_dir(self, username): + """Return the user's home directory.""" + return self.user_table[username]['home'] + + def get_msg_login(self, username): + """Return the user's login message.""" + return self.user_table[username]['msg_login'] + + def get_msg_quit(self, username): + """Return the user's quitting message.""" + return self.user_table[username]['msg_quit'] + + +# --- DTP classes + +class PassiveDTP(asyncore.dispatcher): + """This class is an asyncore.disptacher subclass. It creates a + socket listening on a local port, dispatching the resultant + connection to DTPHandler. + """ + + def __init__(self, cmd_channel, extmode=False): + """Initialize the passive data server. + + - (instance) cmd_channel: the command channel class instance. + - (bool) extmode: wheter use extended passive mode response type. + """ + asyncore.dispatcher.__init__(self) + self.cmd_channel = cmd_channel + + ip = self.cmd_channel.getsockname()[0] + self.create_socket(self.cmd_channel.af, socket.SOCK_STREAM) + + if not self.cmd_channel.passive_ports: + # By using 0 as port number value we let kernel choose a free + # unprivileged random port. + self.bind((ip, 0)) + else: + ports = list(self.cmd_channel.passive_ports) + while ports: + port = ports.pop(random.randint(0, len(ports) -1)) + try: + self.bind((ip, port)) + except socket.error, why: + if why[0] == errno.EADDRINUSE: # port already in use + if ports: + continue + # If cannot use one of the ports in the configured + # range we'll use a kernel-assigned port, and log + # a message reporting the issue. + # By using 0 as port number value we let kernel + # choose a free unprivileged random port. + else: + self.bind((ip, 0)) + self.cmd_channel.log( + "Can't find a valid passive port in the " + "configured range. A random kernel-assigned " + "port will be used." + ) + else: + raise + else: + break + self.listen(5) + port = self.socket.getsockname()[1] + if not extmode: + if self.cmd_channel.masquerade_address: + ip = self.cmd_channel.masquerade_address + # The format of 227 response in not standardized. + # This is the most expected: + self.cmd_channel.respond('227 Entering passive mode (%s,%d,%d).' %( + ip.replace('.', ','), port / 256, port % 256)) + else: + self.cmd_channel.respond('229 Entering extended passive mode ' + '(|||%d|).' %port) + + # --- connection / overridden + + def handle_accept(self): + """Called when remote client initiates a connection.""" + sock, addr = self.accept() + + # Check the origin of data connection. If not expressively + # configured we drop the incoming data connection if remote + # IP address does not match the client's IP address. + if (self.cmd_channel.remote_ip != addr[0]): + if not self.cmd_channel.permit_foreign_addresses: + try: + sock.close() + except socket.error: + pass + msg = 'Rejected data connection from foreign address %s:%s.' \ + %(addr[0], addr[1]) + self.cmd_channel.respond("425 %s" %msg) + self.cmd_channel.log(msg) + # do not close listening socket: it couldn't be client's blame + return + else: + # site-to-site FTP allowed + msg = 'Established data connection with foreign address %s:%s.'\ + %(addr[0], addr[1]) + self.cmd_channel.log(msg) + # Immediately close the current channel (we accept only one + # connection at time) and avoid running out of max connections + # limit. + self.close() + # delegate such connection to DTP handler + handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel) + self.cmd_channel.data_channel = handler + self.cmd_channel.on_dtp_connection() + + def writable(self): + return 0 + + def handle_error(self): + """Called to handle any uncaught exceptions.""" + try: + raise + except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): + raise + logerror(traceback.format_exc()) + self.close() + + def handle_close(self): + """Called on closing the data connection.""" + self.close() + + +class ActiveDTP(asyncore.dispatcher): + """This class is an asyncore.disptacher subclass. It creates a + socket resulting from the connection to a remote user-port, + dispatching it to DTPHandler. + """ + + def __init__(self, ip, port, cmd_channel): + """Initialize the active data channel attemping to connect + to remote data socket. + + - (str) ip: the remote IP address. + - (int) port: the remote port. + - (instance) cmd_channel: the command channel class instance. + """ + asyncore.dispatcher.__init__(self) + self.cmd_channel = cmd_channel + self.create_socket(self.cmd_channel.af, socket.SOCK_STREAM) + try: + self.connect((ip, port)) + except socket.gaierror: + self.cmd_channel.respond("425 Can't connect to specified address.") + self.close() + + # --- connection / overridden + + def handle_write(self): + """NOOP, must be overridden to prevent unhandled write event.""" + + def handle_connect(self): + """Called when connection is established.""" + self.cmd_channel.respond('200 Active data connection established.') + # delegate such connection to DTP handler + handler = self.cmd_channel.dtp_handler(self.socket, self.cmd_channel) + self.cmd_channel.data_channel = handler + self.cmd_channel.on_dtp_connection() + #self.close() # <-- (done automatically) + + def handle_expt(self): + self.cmd_channel.respond("425 Can't connect to specified address.") + self.close() + + def handle_error(self): + """Called to handle any uncaught exceptions.""" + try: + raise + except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): + raise + except socket.error: + pass + except: + logerror(traceback.format_exc()) + self.cmd_channel.respond("425 Can't connect to specified address.") + self.close() + + +try: + from collections import deque +except ImportError: + # backward compatibility with Python < 2.4 by replacing deque with a list + class deque(list): + def appendleft(self, obj): + list.insert(self, 0, obj) + + +class DTPHandler(asyncore.dispatcher): + """Class handling server-data-transfer-process (server-DTP, see + RFC-959) managing data-transfer operations involving sending + and receiving data. + + Instance attributes defined in this class, initialized when + channel is opened: + + - (instance) cmd_channel: the command channel class instance. + - (file) file_obj: the file transferred (if any). + - (bool) receive: True if channel is used for receiving data. + - (bool) transfer_finished: True if transfer completed successfully. + - (int) tot_bytes_sent: the total bytes sent. + - (int) tot_bytes_received: the total bytes received. + + DTPHandler implementation note: + + When a producer is consumed and close_when_done() has been called + previously, refill_buffer() erroneously calls close() instead of + handle_close() - (see: http://bugs.python.org/issue1740572) + + To avoid this problem DTPHandler is implemented as a subclass of + asyncore.dispatcher instead of asynchat.async_chat. + This implementation follows the same approach that asynchat module + should use in Python 2.6. + + The most important change in the implementation is related to + producer_fifo, which is a pure deque object instead of a + producer_fifo instance. + + Since we don't want to break backward compatibily with older python + versions (deque has been introduced in Python 2.4), if deque is not + available we use a list instead. + """ + + ac_in_buffer_size = 8192 + ac_out_buffer_size = 8192 + + def __init__(self, sock_obj, cmd_channel): + """Initialize the command channel. + + - (instance) sock_obj: the socket object instance of the newly + established connection. + - (instance) cmd_channel: the command channel class instance. + """ + asyncore.dispatcher.__init__(self, sock_obj) + # we toss the use of the asynchat's "simple producer" and + # replace it with a pure deque, which the original fifo + # was a wrapping of + self.producer_fifo = deque() + + self.cmd_channel = cmd_channel + self.file_obj = None + self.receive = False + self.transfer_finished = False + self.tot_bytes_sent = 0 + self.tot_bytes_received = 0 + self.data_wrapper = lambda x: x + + # --- utility methods + + def enable_receiving(self, type): + """Enable receiving of data over the channel. Depending on the + TYPE currently in use it creates an appropriate wrapper for the + incoming data. + + - (str) type: current transfer type, 'a' (ASCII) or 'i' (binary). + """ + if type == 'a': + self.data_wrapper = lambda x: x.replace('\r\n', os.linesep) + elif type == 'i': + self.data_wrapper = lambda x: x + else: + raise TypeError, "Unsupported type" + self.receive = True + + def get_transmitted_bytes(self): + "Return the number of transmitted bytes." + return self.tot_bytes_sent + self.tot_bytes_received + + def transfer_in_progress(self): + "Return True if a transfer is in progress, else False." + return self.get_transmitted_bytes() != 0 + + # --- connection + + def handle_read(self): + """Called when there is data waiting to be read.""" + try: + chunk = self.recv(self.ac_in_buffer_size) + except socket.error: + self.handle_error() + else: + self.tot_bytes_received += len(chunk) + if not chunk: + self.transfer_finished = True + #self.close() # <-- asyncore.recv() already do that... + return + # while we're writing on the file an exception could occur + # in case that filesystem gets full; if this happens we + # let handle_error() method handle this exception, providing + # a detailed error message. + self.file_obj.write(self.data_wrapper(chunk)) + + def handle_write(self): + """Called when data is ready to be written, initiates send.""" + self.initiate_send() + + def push(self, data): + """Push data onto the deque and initiate send.""" + sabs = self.ac_out_buffer_size + if len(data) > sabs: + for i in xrange(0, len(data), sabs): + self.producer_fifo.append(data[i:i+sabs]) + else: + self.producer_fifo.append(data) + self.initiate_send() + + def push_with_producer(self, producer): + """Push data using a producer and initiate send.""" + self.producer_fifo.append(producer) + self.initiate_send() + + def readable(self): + """Predicate for inclusion in the readable for select().""" + return self.receive + + def writable(self): + """Predicate for inclusion in the writable for select().""" + return self.producer_fifo or (not self.connected) + + def close_when_done(self): + """Automatically close this channel once the outgoing queue is empty.""" + self.producer_fifo.append(None) + + def initiate_send(self): + """Attempt to send data in fifo order.""" + while self.producer_fifo and self.connected: + first = self.producer_fifo[0] + # handle empty string/buffer or None entry + if not first: + del self.producer_fifo[0] + if first is None: + self.transfer_finished = True + self.handle_close() + return + + # handle classic producer behavior + obs = self.ac_out_buffer_size + try: + data = buffer(first, 0, obs) + except TypeError: + data = first.more() + if data: + self.producer_fifo.appendleft(data) + else: + del self.producer_fifo[0] + continue + + # send the data + try: + num_sent = self.send(data) + except socket.error: + self.handle_error() + return + + if num_sent: + self.tot_bytes_sent += num_sent + if num_sent < len(data) or obs < len(first): + self.producer_fifo[0] = first[num_sent:] + else: + del self.producer_fifo[0] + # we tried to send some actual data + return + + def handle_expt(self): + """Called on "exceptional" data events.""" + self.cmd_channel.respond("426 Connection error; transfer aborted.") + self.close() + + def handle_error(self): + """Called when an exception is raised and not otherwise handled.""" + try: + raise + except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): + raise + except socket.error, err: + # fix around asyncore bug (http://bugs.python.org/issue1736101) + if err[0] in (errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN, \ + errno.ECONNABORTED): + self.handle_close() + return + else: + error = str(err[1]) + # an error could occur in case we fail reading / writing + # from / to file (e.g. file system gets full) + except EnvironmentError, err: + error = _strerror(err) + except: + # some other exception occurred; we don't want to provide + # confidential error messages + logerror(traceback.format_exc()) + error = "Internal error" + self.cmd_channel.respond("426 %s; transfer aborted." %error) + self.close() + + def handle_close(self): + """Called when the socket is closed.""" + # If we used channel for receiving we assume that transfer is + # finished when client close connection , if we used channel + # for sending we have to check that all data has been sent + # (responding with 226) or not (responding with 426). + if self.receive: + self.transfer_finished = True + action = 'received' + else: + action = 'sent' + if self.transfer_finished: + self.cmd_channel.respond("226 Transfer complete.") + if self.file_obj: + fname = self.cmd_channel.fs.fs2ftp(self.file_obj.name) + self.cmd_channel.log('"%s" %s.' %(fname, action)) + else: + tot_bytes = self.get_transmitted_bytes() + msg = "Transfer aborted; %d bytes transmitted." %tot_bytes + self.cmd_channel.respond("426 " + msg) + self.cmd_channel.log(msg) + self.close() + + def close(self): + """Close the data channel, first attempting to close any remaining + file handles.""" + if self.file_obj and not self.file_obj.closed: + self.file_obj.close() + asyncore.dispatcher.close(self) + self.cmd_channel.on_dtp_close() + + +# --- producers + +class FileProducer: + """Producer wrapper for file[-like] objects.""" + + buffer_size = 65536 + + def __init__(self, file, type): + """Initialize the producer with a data_wrapper appropriate to TYPE. + + - (file) file: the file[-like] object. + - (str) type: the current TYPE, 'a' (ASCII) or 'i' (binary). + """ + self.done = False + self.file = file + if type == 'a': + self.data_wrapper = lambda x: x.replace(os.linesep, '\r\n') + elif type == 'i': + self.data_wrapper = lambda x: x + else: + raise TypeError, "Unsupported type" + + def more(self): + """Attempt a chunk of data of size self.buffer_size.""" + if self.done: + return '' + data = self.data_wrapper(self.file.read(self.buffer_size)) + if not data: + self.done = True + if not self.file.closed: + self.file.close() + return data + + +class IteratorProducer: + """Producer for iterator objects.""" + + def __init__(self, iterator): + self.iterator = iterator + + def more(self): + """Attempt a chunk of data from iterator by calling its next() + method. + """ + try: + return self.iterator.next() + except StopIteration: + return '' + + +class BufferedIteratorProducer: + """Producer for iterator objects with buffer capabilities.""" + # how many times iterator.next() will be called before + # returning some data + loops = 20 + + def __init__(self, iterator): + self.iterator = iterator + + def more(self): + """Attempt a chunk of data from iterator by calling + its next() method different times. + """ + buffer = [] + for x in xrange(self.loops): + try: + buffer.append(self.iterator.next()) + except StopIteration: + break + return ''.join(buffer) + + +# --- filesystem + +class AbstractedFS: + """A class used to interact with the file system, providing a high + level, cross-platform interface compatible with both Windows and + UNIX style filesystems. + + It provides some utility methods and some wraps around operations + involved in file creation and file system operations like moving + files or removing directories. + + Instance attributes: + - (str) root: the user home directory. + - (str) cwd: the current working directory. + - (str) rnfr: source file to be renamed. + """ + + def __init__(self): + self.root = None + self.cwd = '/' + self.rnfr = None + + # --- Pathname / conversion utilities + + def ftpnorm(self, ftppath): + """Normalize a "virtual" ftp pathname (tipically the raw string + coming from client) depending on the current working directory. + + Example (having "/foo" as current working directory): + 'x' -> '/foo/x' + + Note: directory separators are system independent ("/"). + Pathname returned is always absolutized. + """ + if os.path.isabs(ftppath): + p = os.path.normpath(ftppath) + else: + p = os.path.normpath(os.path.join(self.cwd, ftppath)) + # normalize string in a standard web-path notation having '/' + # as separator. + p = p.replace("\\", "/") + # os.path.normpath supports UNC paths (e.g. "//a/b/c") but we + # don't need them. In case we get an UNC path we collapse + # redundant separators appearing at the beginning of the string + while p[:2] == '//': + p = p[1:] + # Anti path traversal: don't trust user input, in the event + # that self.cwd is not absolute, return "/" as a safety measure. + # This is for extra protection, maybe not really necessary. + if not os.path.isabs(p): + p = "/" + return p + + def ftp2fs(self, ftppath): + """Translate a "virtual" ftp pathname (tipically the raw string + coming from client) into equivalent absolute "real" filesystem + pathname. + + Example (having "/home/user" as root directory): + 'x' -> '/home/user/x' + + Note: directory separators are system dependent. + """ + # as far as I know, it should always be path traversal safe... + if os.path.normpath(self.root) == os.sep: + return os.path.normpath(self.ftpnorm(ftppath)) + else: + p = self.ftpnorm(ftppath)[1:] + return os.path.normpath(os.path.join(self.root, p)) + + def fs2ftp(self, fspath): + """Translate a "real" filesystem pathname into equivalent + absolute "virtual" ftp pathname depending on the user's + root directory. + + Example (having "/home/user" as root directory): + '/home/user/x' -> '/x' + + As for ftpnorm, directory separators are system independent + ("/") and pathname returned is always absolutized. + + On invalid pathnames escaping from user's root directory + (e.g. "/home" when root is "/home/user") always return "/". + """ + if os.path.isabs(fspath): + p = os.path.normpath(fspath) + else: + p = os.path.normpath(os.path.join(self.root, fspath)) + if not self.validpath(p): + return '/' + p = p.replace(os.sep, "/") + p = p[len(self.root):] + if not p.startswith('/'): + p = '/' + p + return p + + # alias for backward compatibility with 0.2.0 + normalize = ftpnorm + translate = ftp2fs + + def validpath(self, path): + """Check whether the path belongs to user's home directory. + Expected argument is a "real" filesystem pathname. + + If path is a symbolic link it is resolved to check its real + destination. + + Pathnames escaping from user's root directory are considered + not valid. + """ + root = self.realpath(self.root) + path = self.realpath(path) + if not self.root.endswith(os.sep): + root = self.root + os.sep + if not path.endswith(os.sep): + path = path + os.sep + if path[0:len(root)] == root: + return True + return False + + # --- Wrapper methods around open() and tempfile.mkstemp + + def open(self, filename, mode): + """Open a file returning its handler.""" + return open(filename, mode) + + def mkstemp(self, suffix='', prefix='', dir=None, mode='wb'): + """A wrap around tempfile.mkstemp creating a file with a unique + name. Unlike mkstemp it returns an object with a file-like + interface. + """ + class FileWrapper: + def __init__(self, fd, name): + self.file = fd + self.name = name + def __getattr__(self, attr): + return getattr(self.file, attr) + + text = not 'b' in mode + # max number of tries to find out a unique file name + tempfile.TMP_MAX = 50 + fd, name = tempfile.mkstemp(suffix, prefix, dir, text=text) + file = os.fdopen(fd, mode) + return FileWrapper(file, name) + + # --- Wrapper methods around os.* + + def chdir(self, path): + """Change the current directory.""" + # temporarily join the specified directory to see if we have + # permissions to do so + basedir = os.getcwd() + try: + os.chdir(path) + except os.error: + raise + else: + os.chdir(basedir) + self.cwd = self.fs2ftp(path) + + def mkdir(self, path, basename): + """Create the specified directory.""" + os.mkdir(os.path.join(path, basename)) + + def listdir(self, path): + """List the content of a directory.""" + return os.listdir(path) + + def rmdir(self, path): + """Remove the specified directory.""" + os.rmdir(path) + + def remove(self, path): + """Remove the specified file.""" + os.remove(path) + + def rename(self, src, dst): + """Rename the specified src file to the dst filename.""" + os.rename(src, dst) + + def stat(self, path): + """Perform a stat() system call on the given path.""" + return os.stat(path) + + def lstat(self, path): + """Like stat but does not follow symbolic links.""" + return os.lstat(path) + + if not hasattr(os, 'lstat'): + lstat = stat + + # --- Wrapper methods around os.path.* + + def isfile(self, path): + """Return True if path is a file.""" + return os.path.isfile(path) + + def islink(self, path): + """Return True if path is a symbolic link.""" + return os.path.islink(path) + + def isdir(self, path): + """Return True if path is a directory.""" + return os.path.isdir(path) + + def getsize(self, path): + """Return the size of the specified file in bytes.""" + return os.path.getsize(path) + + def getmtime(self, path): + """Return the last modified time as a number of seconds since + the epoch.""" + return os.path.getmtime(path) + + def realpath(self, path): + """Return the canonical version of path eliminating any + symbolic links encountered in the path (if they are + supported by the operating system). + """ + return os.path.realpath(path) + + def lexists(self, path): + """Return True if path refers to an existing path, including + a broken or circular symbolic link. + """ + if hasattr(os.path, 'lexists'): + return os.path.lexists(path) + # grant backward compatibility with python 2.3 + elif hasattr(os, 'lstat'): + try: + os.lstat(path) + except os.error: + return False + return True + # fallback + else: + return os.path.exists(path) + + exists = lexists # alias for backward compatibility with 0.2.0 + + def glob1(self, dirname, pattern): + """Return a list of files matching a dirname pattern + non-recursively. + + Unlike glob.glob1 raises exception if os.listdir() fails. + """ + names = self.listdir(dirname) + if pattern[0] != '.': + names = filter(lambda x: x[0] != '.', names) + return fnmatch.filter(names, pattern) + + # --- Listing utilities + + # note: the following operations are no more blocking + + def get_list_dir(self, path): + """"Return an iterator object that yields a directory listing + in a form suitable for LIST command. + """ + if self.isdir(path): + listing = self.listdir(path) + listing.sort() + return self.format_list(path, listing) + # if path is a file or a symlink we return information about it + else: + basedir, filename = os.path.split(path) + self.lstat(path) # raise exc in case of problems + return self.format_list(basedir, [filename]) + + def get_stat_dir(self, rawline): + """Return an iterator object that yields a list of files + matching a dirname pattern non-recursively in a form + suitable for STAT command. + + - (str) rawline: the raw string passed by client as command + argument. + """ + ftppath = self.ftpnorm(rawline) + if not glob.has_magic(ftppath): + return self.get_list_dir(self.ftp2fs(rawline)) + else: + basedir, basename = os.path.split(ftppath) + if glob.has_magic(basedir): + return iter(['Directory recursion not supported.\r\n']) + else: + basedir = self.ftp2fs(basedir) + listing = self.glob1(basedir, basename) + if listing: + listing.sort() + return self.format_list(basedir, listing) + + def format_list(self, basedir, listing, ignore_err=True): + """Return an iterator object that yields the entries of given + directory emulating the "/bin/ls -lA" UNIX command output. + + - (str) basedir: the absolute dirname. + - (list) listing: the names of the entries in basedir + - (bool) ignore_err: when False raise exception if os.lstat() + call fails. + + On platforms which do not support the pwd and grp modules (such + as Windows), ownership is printed as "owner" and "group" as a + default, and number of hard links is always "1". On UNIX + systems, the actual owner, group, and number of links are + printed. + + This is how output appears to client: + + -rw-rw-rw- 1 owner group 7045120 Sep 02 3:47 music.mp3 + drwxrwxrwx 1 owner group 0 Aug 31 18:50 e-books + -rw-rw-rw- 1 owner group 380 Sep 02 3:40 module.py + """ + for basename in listing: + file = os.path.join(basedir, basename) + try: + st = self.lstat(file) + except os.error: + if ignore_err: + continue + raise + perms = filemode(st.st_mode) # permissions + nlinks = st.st_nlink # number of links to inode + if not nlinks: # non-posix system, let's use a bogus value + nlinks = 1 + size = st.st_size # file size + if pwd and grp: + # get user and group name, else just use the raw uid/gid + try: + uname = pwd.getpwuid(st.st_uid).pw_name + except KeyError: + uname = st.st_uid + try: + gname = grp.getgrgid(st.st_gid).gr_name + except KeyError: + gname = st.st_gid + else: + # on non-posix systems the only chance we use default + # bogus values for owner and group + uname = "owner" + gname = "group" + # stat.st_mtime could fail (-1) if last mtime is too old + # in which case we return the local time as last mtime + try: + mtime = time.strftime("%b %d %H:%M", time.localtime(st.st_mtime)) + except ValueError: + mtime = time.strftime("%b %d %H:%M") + # if the file is a symlink, resolve it, e.g. "symlink -> realfile" + if stat.S_ISLNK(st.st_mode): + basename = basename + " -> " + os.readlink(file) + + # formatting is matched with proftpd ls output + yield "%s %3s %-8s %-8s %8s %s %s\r\n" %(perms, nlinks, uname, gname, + size, mtime, basename) + + def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): + """Return an iterator object that yields the entries of a given + directory or of a single file in a form suitable with MLSD and + MLST commands. + + Every entry includes a list of "facts" referring the listed + element. See RFC-3659, chapter 7, to see what every single + fact stands for. + + - (str) basedir: the absolute dirname. + - (list) listing: the names of the entries in basedir + - (str) perms: the string referencing the user permissions. + - (str) facts: the list of "facts" to be returned. + - (bool) ignore_err: when False raise exception if os.stat() + call fails. + + Note that "facts" returned may change depending on the platform + and on what user specified by using the OPTS command. + + This is how output could appear to the client issuing + a MLSD request: + + type=file;size=156;perm=r;modify=20071029155301;unique=801cd2; music.mp3 + type=dir;size=0;perm=el;modify=20071127230206;unique=801e33; ebooks + type=file;size=211;perm=r;modify=20071103093626;unique=801e32; module.py + """ + permdir = ''.join([x for x in perms if x not in 'arw']) + permfile = ''.join([x for x in perms if x not in 'celmp']) + if ('w' in perms) or ('a' in perms) or ('f' in perms): + permdir += 'c' + if 'd' in perms: + permdir += 'p' + type = size = perm = modify = create = unique = mode = uid = gid = "" + for basename in listing: + file = os.path.join(basedir, basename) + try: + st = self.stat(file) + except OSError: + if ignore_err: + continue + raise + # type + perm + if stat.S_ISDIR(st.st_mode): + if 'type' in facts: + if basename == '.': + type = 'type=cdir;' + elif basename == '..': + type = 'type=pdir;' + else: + type = 'type=dir;' + if 'perm' in facts: + perm = 'perm=%s;' %permdir + else: + if 'type' in facts: + type = 'type=file;' + if 'perm' in facts: + perm = 'perm=%s;' %permfile + if 'size' in facts: + size = 'size=%s;' %st.st_size # file size + # last modification time + if 'modify' in facts: + try: + modify = 'modify=%s;' %time.strftime("%Y%m%d%H%M%S", + time.localtime(st.st_mtime)) + except ValueError: + # stat.st_mtime could fail (-1) if last mtime is too old + modify = "" + if 'create' in facts: + # on Windows we can provide also the creation time + try: + create = 'create=%s;' %time.strftime("%Y%m%d%H%M%S", + time.localtime(st.st_ctime)) + except ValueError: + create = "" + # UNIX only + if 'unix.mode' in facts: + mode = 'unix.mode=%s;' %oct(st.st_mode & 0777) + if 'unix.uid' in facts: + uid = 'unix.uid=%s;' %st.st_uid + if 'unix.gid' in facts: + gid = 'unix.gid=%s;' %st.st_gid + # We provide unique fact (see RFC-3659, chapter 7.5.2) on + # posix platforms only; we get it by mixing st_dev and + # st_ino values which should be enough for granting an + # uniqueness for the file listed. + # The same approach is used by pure-ftpd. + # Implementors who want to provide unique fact on other + # platforms should use some platform-specific method (e.g. + # on Windows NTFS filesystems MTF records could be used). + if 'unique' in facts: + unique = "unique=%x%x;" %(st.st_dev, st.st_ino) + + yield "%s%s%s%s%s%s%s%s%s %s\r\n" %(type, size, perm, modify, create, + mode, uid, gid, unique, basename) + + +# --- FTP + +class FTPHandler(asynchat.async_chat): + """Implements the FTP server Protocol Interpreter (see RFC-959), + handling commands received from the client on the control channel. + + All relevant session information is stored in class attributes + reproduced below and can be modified before instantiating this + class. + + - (str) banner: the string sent when client connects. + + - (int) max_login_attempts: + the maximum number of wrong authentications before disconnecting + the client (default 3). + + - (bool)permit_foreign_addresses: + FTP site-to-site transfer feature: also referenced as "FXP" it + permits for transferring a file between two remote FTP servers + without the transfer going through the client's host (not + recommended for security reasons as described in RFC-2577). + Having this attribute set to False means that all data + connections from/to remote IP addresses which do not match the + client's IP address will be dropped (defualt False). + + - (bool) permit_privileged_ports: + set to True if you want to permit active data connections (PORT) + over privileged ports (not recommended, defaulting to False). + + - (str) masquerade_address: + the "masqueraded" IP address to provide along PASV reply when + pyftpdlib is running behind a NAT or other types of gateways. + When configured pyftpdlib will hide its local address and + instead use the public address of your NAT (default None). + + - (list) passive_ports: + what ports ftpd will use for its passive data transfers. + Value expected is a list of integers (e.g. range(60000, 65535)). + When configured pyftpdlib will no longer use kernel-assigned + random ports (default None). + + + All relevant instance attributes initialized when client connects + are reproduced below. You may be interested in them in case you + want to subclass the original FTPHandler. + + - (bool) authenticated: True if client authenticated himself. + - (str) username: the name of the connected user (if any). + - (int) attempted_logins: number of currently attempted logins. + - (str) current_type: the current transfer type (default "a") + - (int) af: the address family (IPv4/IPv6) + - (instance) server: the FTPServer class instance. + - (instance) data_server: the data server instance (if any). + - (instance) data_channel: the data channel instance (if any). + """ + # these are overridable defaults + + # default classes + authorizer = DummyAuthorizer() + active_dtp = ActiveDTP + passive_dtp = PassiveDTP + dtp_handler = DTPHandler + abstracted_fs = AbstractedFS + + # session attributes (explained in the docstring) + banner = "pyftpdlib %s ready." %__ver__ + max_login_attempts = 3 + permit_foreign_addresses = False + permit_privileged_ports = False + masquerade_address = None + passive_ports = None + + def __init__(self, conn, server): + """Initialize the command channel. + + - (instance) conn: the socket object instance of the newly + established connection. + - (instance) server: the ftp server class instance. + """ + asynchat.async_chat.__init__(self, conn=conn) + self.server = server + self.remote_ip, self.remote_port = self.socket.getpeername()[:2] + self.in_buffer = [] + self.in_buffer_len = 0 + self.set_terminator("\r\n") + + # session attributes + self.fs = self.abstracted_fs() + self.authenticated = False + self.username = "" + self.password = "" + self.attempted_logins = 0 + self.current_type = 'a' + self.restart_position = 0 + self.quit_pending = False + self._epsvall = False + self.__in_dtp_queue = None + self.__out_dtp_queue = None + + # mlsx facts attributes + self.current_facts = ['type', 'perm', 'size', 'modify'] + if os.name == 'posix': + self.current_facts.append('unique') + self.available_facts = self.current_facts[:] + if pwd and grp: + self.available_facts += ['unix.mode', 'unix.uid', 'unix.gid'] + if os.name == 'nt': + self.available_facts.append('create') + + # dtp attributes + self.data_server = None + self.data_channel = None + + if hasattr(self.socket, 'family'): + self.af = self.socket.family + else: # python < 2.5 + ip, port = self.socket.getsockname()[:2] + self.af = socket.getaddrinfo(ip, port, socket.AF_UNSPEC, + socket.SOCK_STREAM)[0][0] + + def handle(self): + """Return a 220 'Ready' response to the client over the command + channel. + """ + if len(self.banner) <= 75: + self.respond("220 %s" %str(self.banner)) + else: + self.push('220-%s\r\n' %str(self.banner)) + self.respond('220 ') + + def handle_max_cons(self): + """Called when limit for maximum number of connections is reached.""" + msg = "Too many connections. Service temporary unavailable." + self.respond("421 %s" %msg) + self.log(msg) + # If self.push is used, data could not be sent immediately in + # which case a new "loop" will occur exposing us to the risk of + # accepting new connections. Since this could cause asyncore to + # run out of fds (...and exposes the server to DoS attacks), we + # immediately close the channel by using close() instead of + # close_when_done(). If data has not been sent yet client will + # be silently disconnected. + self.close() + + def handle_max_cons_per_ip(self): + """Called when too many clients are connected from the same IP.""" + msg = "Too many connections from the same IP address." + self.respond("421 %s" %msg) + self.log(msg) + self.close_when_done() + + # --- asyncore / asynchat overridden methods + + def readable(self): + # if there's a quit pending we stop reading data from socket + return not self.quit_pending + + def collect_incoming_data(self, data): + """Read incoming data and append to the input buffer.""" + self.in_buffer.append(data) + self.in_buffer_len += len(data) + # Flush buffer if it gets too long (possible DoS attacks). + # RFC-959 specifies that a 500 response could be given in + # such cases + buflimit = 2048 + if self.in_buffer_len > buflimit: + self.respond('500 Command too long.') + self.log('Command received exceeded buffer limit of %s.' %(buflimit)) + self.in_buffer = [] + self.in_buffer_len = 0 + + # commands accepted before authentication + unauth_cmds = ('FEAT','HELP','NOOP','PASS','QUIT','STAT','SYST','USER') + + # commands needing an argument + arg_cmds = ('ALLO','APPE','DELE','EPRT','MDTM','MODE','MKD','OPTS','PORT', + 'REST','RETR','RMD','RNFR','RNTO','SIZE', 'STOR','STRU', + 'TYPE','USER','XMKD','XRMD') + + # commands needing no argument + unarg_cmds = ('ABOR','CDUP','FEAT','NOOP','PASV','PWD','QUIT','REIN', + 'SYST','XCUP','XPWD') + + def found_terminator(self): + r"""Called when the incoming data stream matches the \r\n + terminator. + + Depending on the command received it calls the command's + corresponding method (e.g. for received command "MKD pathname", + ftp_MKD() method is called with "pathname" as the argument). + """ + line = ''.join(self.in_buffer) + self.in_buffer = [] + self.in_buffer_len = 0 + + cmd = line.split(' ')[0].upper() + space = line.find(' ') + if space != -1: + arg = line[space + 1:] + else: + arg = "" + + if cmd != 'PASS': + self.logline("<== %s" %line) + else: + self.logline("<== %s %s" %(line.split(' ')[0], '*' * 6)) + + # let's check if user provided an argument for those commands + # needing one + if not arg and cmd in self.arg_cmds: + self.respond("501 Syntax error: command needs an argument.") + return + + # let's do the same for those commands requiring no argument. + elif arg and cmd in self.unarg_cmds: + self.respond("501 Syntax error: command does not accept arguments.") + return + + # provide a limited set of commands if user isn't + # authenticated yet + if (not self.authenticated): + if cmd in self.unauth_cmds: + # we permit STAT during this phase but we don't want + # STAT to return a directory LISTing if the user is + # not authenticated yet (this could happen if STAT + # is used with an argument) + if (cmd == 'STAT') and arg: + self.respond("530 Log in with USER and PASS first.") + else: + method = getattr(self, 'ftp_' + cmd) + method(arg) # call the proper ftp_* method + elif cmd in proto_cmds: + self.respond("530 Log in with USER and PASS first.") + else: + self.respond('500 Command "%s" not understood.' %line) + + # provide full command set + elif (self.authenticated) and (cmd in proto_cmds): + if not (self.__check_path(arg, arg)): # and self.__check_perm(cmd, arg)): + return + method = getattr(self, 'ftp_' + cmd) + method(arg) # call the proper ftp_* method + + else: + # recognize those commands having "special semantics" + if 'ABOR' in cmd: + self.ftp_ABOR("") + elif 'STAT' in cmd: + self.ftp_STAT("") + # unknown command + else: + self.respond('500 Command "%s" not understood.' %line) + + def __check_path(self, cmd, line): + """Check whether a path is valid.""" + # For the following commands we have to make sure that the real + # path destination belongs to the user's root directory. + # If provided path is a symlink we follow its final destination + # to do so. + if cmd in ('APPE','CWD','DELE','MDTM','NLST','MLSD','MLST','RETR', + 'RMD','SIZE','STOR','XCWD','XRMD'): + datacr = None + datacr = self.fs.get_cr(line) + try: + if not self.fs.validpath(self.fs.ftp2fs(line, datacr)): + line = self.fs.ftpnorm(line) + err = '"%s" points to a path which is outside ' \ + "the user's root directory" %line + self.respond("550 %s." %err) + self.log('FAIL %s "%s". %s.' %(cmd, line, err)) + self.fs.close_cr(datacr) + return False + except: + pass + self.fs.close_cr(datacr) + return True + + def __check_perm(self, cmd, line, datacr): + """Check permissions depending on issued command.""" + map = {'CWD':'e', 'XCWD':'e', 'CDUP':'e', 'XCUP':'e', + 'LIST':'l', 'NLST':'l', 'MLSD':'l', 'STAT':'l', + 'RETR':'r', + 'APPE':'a', + 'DELE':'d', 'RMD':'d', 'XRMD':'d', + 'RNFR':'f', + 'MKD':'m', 'XMKD':'m', + 'STOR':'w'} + if cmd in map: + if cmd == 'STAT' and not line: + return True + perm = map[cmd] + if not line and (cmd in ('LIST','NLST','MLSD')): + path = self.fs.ftp2fs(self.fs.cwd, datacr) + else: + path = self.fs.ftp2fs(line, datacr) + if not self.authorizer.has_perm(self.username, perm, path): + self.log('FAIL %s "%s". Not enough privileges.' \ + %(cmd, self.fs.ftpnorm(line))) + self.respond("550 Can't %s. Not enough privileges." %cmd) + return False + return True + + def handle_expt(self): + """Called when there is out of band (OOB) data for the socket + connection. This could happen in case of such commands needing + "special action" (typically STAT and ABOR) in which case we + append OOB data to incoming buffer. + """ + if hasattr(socket, 'MSG_OOB'): + try: + data = self.socket.recv(1024, socket.MSG_OOB) + except socket.error: + pass + else: + self.in_buffer.append(data) + return + self.log("Can't handle OOB data.") + self.close() + + def handle_error(self): + try: + raise + except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): + raise + except socket.error, err: + # fix around asyncore bug (http://bugs.python.org/issue1736101) + if err[0] in (errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN, \ + errno.ECONNABORTED): + self.handle_close() + return + else: + logerror(traceback.format_exc()) + except: + logerror(traceback.format_exc()) + self.close() + + def handle_close(self): + self.close() + + _closed = False + def close(self): + """Close the current channel disconnecting the client.""" + if not self._closed: + self._closed = True + if self.data_server: + self.data_server.close() + del self.data_server + + if self.data_channel: + self.data_channel.close() + del self.data_channel + + del self.__out_dtp_queue + del self.__in_dtp_queue + + # remove client IP address from ip map + self.server.ip_map.remove(self.remote_ip) + asynchat.async_chat.close(self) + self.log("Disconnected.") + + # --- callbacks + + def on_dtp_connection(self): + """Called every time data channel connects (either active or + passive). + + Incoming and outgoing queues are checked for pending data. + If outbound data is pending, it is pushed into the data channel. + If awaiting inbound data, the data channel is enabled for + receiving. + """ + if self.data_server: + self.data_server.close() + self.data_server = None + + # check for data to send + if self.__out_dtp_queue: + data, isproducer, file = self.__out_dtp_queue + if file: + self.data_channel.file_obj = file + if not isproducer: + self.data_channel.push(data) + else: + self.data_channel.push_with_producer(data) + if self.data_channel: + self.data_channel.close_when_done() + self.__out_dtp_queue = None + + # check for data to receive + elif self.__in_dtp_queue: + self.data_channel.file_obj = self.__in_dtp_queue + self.data_channel.enable_receiving(self.current_type) + self.__in_dtp_queue = None + + def on_dtp_close(self): + """Called every time the data channel is closed.""" + self.data_channel = None + if self.quit_pending: + self.close_when_done() + + # --- utility + + def respond(self, resp): + """Send a response to the client using the command channel.""" + self.push(resp + '\r\n') + self.logline('==> %s' % resp) + + def push_dtp_data(self, data, isproducer=False, file=None): + """Pushes data into the data channel. + + It is usually called for those commands requiring some data to + be sent over the data channel (e.g. RETR). + If data channel does not exist yet, it queues the data to send + later; data will then be pushed into data channel when + on_dtp_connection() will be called. + + - (str/classobj) data: the data to send which may be a string + or a producer object). + - (bool) isproducer: whether treat data as a producer. + - (file) file: the file[-like] object to send (if any). + """ + if self.data_channel: + self.respond("125 Data connection already open. Transfer starting.") + if file: + self.data_channel.file_obj = file + if not isproducer: + self.data_channel.push(data) + else: + self.data_channel.push_with_producer(data) + if self.data_channel: + self.data_channel.close_when_done() + else: + self.respond("150 File status okay. About to open data connection.") + self.__out_dtp_queue = (data, isproducer, file) + + def log(self, msg): + """Log a message, including additional identifying session data.""" + log("[%s]@%s:%s %s" %(self.username, self.remote_ip, + self.remote_port, msg)) + + def logline(self, msg): + """Log a line including additional indentifying session data.""" + logline("%s:%s %s" %(self.remote_ip, self.remote_port, msg)) + + def flush_account(self): + """Flush account information by clearing attributes that need + to be reset on a REIN or new USER command. + """ + if self.data_channel: + if not self.data_channel.transfer_in_progress(): + self.data_channel.close() + self.data_channel = None + if self.data_server: + self.data_server.close() + self.data_server = None + + self.fs.rnfr = None + self.authenticated = False + self.username = "" + self.password = "" + self.attempted_logins = 0 + self.current_type = 'a' + self.restart_position = 0 + self.quit_pending = False + self.__in_dtp_queue = None + self.__out_dtp_queue = None + + def run_as_current_user(self, function, *args, **kwargs): + """Execute a function impersonating the current logged-in user.""" + self.authorizer.impersonate_user(self.username, self.password) + try: + return function(*args, **kwargs) + finally: + self.authorizer.terminate_impersonation() + + # --- connection + + def _make_eport(self, ip, port): + """Establish an active data channel with remote client which + issued a PORT or EPRT command. + """ + # FTP bounce attacks protection: according to RFC-2577 it's + # recommended to reject PORT if IP address specified in it + # does not match client IP address. + if not self.permit_foreign_addresses: + if ip != self.remote_ip: + self.log("Rejected data connection to foreign address %s:%s." + %(ip, port)) + self.respond("501 Can't connect to a foreign address.") + return + + # ...another RFC-2577 recommendation is rejecting connections + # to privileged ports (< 1024) for security reasons. + if not self.permit_privileged_ports: + if port < 1024: + self.log('PORT against the privileged port "%s" refused.' %port) + self.respond("501 Can't connect over a privileged port.") + return + + # close existent DTP-server instance, if any. + if self.data_server: + self.data_server.close() + self.data_server = None + if self.data_channel: + self.data_channel.close() + self.data_channel = None + + # make sure we are not hitting the max connections limit + if self.server.max_cons: + if len(self._map) >= self.server.max_cons: + msg = "Too many connections. Can't open data channel." + self.respond("425 %s" %msg) + self.log(msg) + return + + # open data channel + self.active_dtp(ip, port, self) + + def _make_epasv(self, extmode=False): + """Initialize a passive data channel with remote client which + issued a PASV or EPSV command. + If extmode argument is False we assume that client issued EPSV in + which case extended passive mode will be used (see RFC-2428). + """ + # close existing DTP-server instance, if any + if self.data_server: + self.data_server.close() + self.data_server = None + + if self.data_channel: + self.data_channel.close() + self.data_channel = None + + # make sure we are not hitting the max connections limit + if self.server.max_cons: + if len(self._map) >= self.server.max_cons: + msg = "Too many connections. Can't open data channel." + self.respond("425 %s" %msg) + self.log(msg) + return + + # open data channel + self.data_server = self.passive_dtp(self, extmode) + + def ftp_PORT(self, line): + """Start an active data channel by using IPv4.""" + if self._epsvall: + self.respond("501 PORT not allowed after EPSV ALL.") + return + if self.af != socket.AF_INET: + self.respond("425 You cannot use PORT on IPv6 connections. " + "Use EPRT instead.") + return + # Parse PORT request for getting IP and PORT. + # Request comes in as: + # > h1,h2,h3,h4,p1,p2 + # ...where the client's IP address is h1.h2.h3.h4 and the TCP + # port number is (p1 * 256) + p2. + try: + addr = map(int, line.split(',')) + assert len(addr) == 6 + for x in addr[:4]: + assert 0 <= x <= 255 + ip = '%d.%d.%d.%d' %tuple(addr[:4]) + port = (addr[4] * 256) + addr[5] + assert 0 <= port <= 65535 + except (AssertionError, ValueError, OverflowError): + self.respond("501 Invalid PORT format.") + return + self._make_eport(ip, port) + + def ftp_EPRT(self, line): + """Start an active data channel by choosing the network protocol + to use (IPv4/IPv6) as defined in RFC-2428. + """ + if self._epsvall: + self.respond("501 EPRT not allowed after EPSV ALL.") + return + # Parse EPRT request for getting protocol, IP and PORT. + # Request comes in as: + # # protoipport + # ...where is an arbitrary delimiter character (usually "|") and + # is the network protocol to use (1 for IPv4, 2 for IPv6). + try: + af, ip, port = line.split(line[0])[1:-1] + port = int(port) + assert 0 <= port <= 65535 + except (AssertionError, ValueError, IndexError, OverflowError): + self.respond("501 Invalid EPRT format.") + return + + if af == "1": + if self.af != socket.AF_INET: + self.respond('522 Network protocol not supported (use 2).') + else: + try: + octs = map(int, ip.split('.')) + assert len(octs) == 4 + for x in octs: + assert 0 <= x <= 255 + except (AssertionError, ValueError, OverflowError), err: + self.respond("501 Invalid EPRT format.") + else: + self._make_eport(ip, port) + elif af == "2": + if self.af == socket.AF_INET: + self.respond('522 Network protocol not supported (use 1).') + else: + self._make_eport(ip, port) + else: + if self.af == socket.AF_INET: + self.respond('501 Unknown network protocol (use 1).') + else: + self.respond('501 Unknown network protocol (use 2).') + + def ftp_PASV(self, line): + """Start a passive data channel by using IPv4.""" + if self._epsvall: + self.respond("501 PASV not allowed after EPSV ALL.") + return + if self.af != socket.AF_INET: + self.respond("425 You cannot use PASV on IPv6 connections. " + "Use EPSV instead.") + else: + self._make_epasv(extmode=False) + + def ftp_EPSV(self, line): + """Start a passive data channel by using IPv4 or IPv6 as defined + in RFC-2428. + """ + # RFC-2428 specifies that if an optional parameter is given, + # we have to determine the address family from that otherwise + # use the same address family used on the control connection. + # In such a scenario a client may use IPv4 on the control channel + # and choose to use IPv6 for the data channel. + # But how could we use IPv6 on the data channel without knowing + # which IPv6 address to use for binding the socket? + # Unfortunately RFC-2428 does not provide satisfing information + # on how to do that. The assumption is that we don't have any way + # to know wich address to use, hence we just use the same address + # family used on the control connection. + if not line: + self._make_epasv(extmode=True) + elif line == "1": + if self.af != socket.AF_INET: + self.respond('522 Network protocol not supported (use 2).') + else: + self._make_epasv(extmode=True) + elif line == "2": + if self.af == socket.AF_INET: + self.respond('522 Network protocol not supported (use 1).') + else: + self._make_epasv(extmode=True) + elif line.lower() == 'all': + self._epsvall = True + self.respond('220 Other commands other than EPSV are now disabled.') + else: + if self.af == socket.AF_INET: + self.respond('501 Unknown network protocol (use 1).') + else: + self.respond('501 Unknown network protocol (use 2).') + + def ftp_QUIT(self, line): + """Quit the current session.""" + # From RFC-959: + # This command terminates a USER and if file transfer is not + # in progress, the server closes the control connection. + # If file transfer is in progress, the connection will remain + # open for result response and the server will then close it. + if self.authenticated: + msg_quit = self.authorizer.get_msg_quit(self.username) + else: + msg_quit = "Goodbye." + if len(msg_quit) <= 75: + self.respond("221 %s" %msg_quit) + else: + self.push("221-%s\r\n" %msg_quit) + self.respond("221 ") + + if not self.data_channel: + self.close_when_done() + else: + # tell the cmd channel to stop responding to commands. + self.quit_pending = True + + + # --- data transferring + + def ftp_LIST(self, line): + """Return a list of files in the specified directory to the + client. + """ + # - If no argument, fall back on cwd as default. + # - Some older FTP clients erroneously issue /bin/ls-like LIST + # formats in which case we fall back on cwd as default. + if not line or line.lower() in ('-a', '-l', '-al', '-la'): + line = self.fs.cwd + try: + data = None + data = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, data) + line = self.fs.ftpnorm(line) + iterator = self.run_as_current_user(self.fs.get_list_dir, path) + except OSError, err: + self.fs.close_cr(data) + why = _strerror(err) + self.log('FAIL LIST "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.fs.close_cr(data) + self.log('OK LIST "%s". Transfer starting.' %line) + producer = BufferedIteratorProducer(iterator) + self.push_dtp_data(producer, isproducer=True) + + def ftp_NLST(self, line): + """Return a list of files in the specified directory in a + compact form to the client. + """ + if not line: + line = self.fs.cwd + try: + data = None + data = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, data) + line = self.fs.ftpnorm(line) + if self.fs.isdir(path): + listing = self.run_as_current_user(self.fs.listdir, path) + listing = map(lambda x:os.path.split(x.path)[1], listing) + else: + # if path is a file we just list its name + self.fs.lstat(path) # raise exc in case of problems + basedir, filename = os.path.split(line) + listing = [filename] + except OSError, err: + self.fs.close_cr(data) + why = _strerror(err) + self.log('FAIL NLST "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.fs.close_cr(data) + data = '' + if listing: + print listing + listing.sort() + data = '\r\n'.join(listing) + '\r\n' + self.log('OK NLST "%s". Transfer starting.' %line) + self.push_dtp_data(data) + + # --- MLST and MLSD commands + + # The MLST and MLSD commands are intended to standardize the file and + # directory information returned by the server-FTP process. These + # commands differ from the LIST command in that the format of the + # replies is strictly defined although extensible. + + def ftp_MLST(self, line): + """Return information about a pathname in a machine-processable + form as defined in RFC-3659. + """ + # if no argument, fall back on cwd as default + if not line: + line = self.fs.cwd + try: + datacr = None + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + basedir, basename = os.path.split(path) + perms = self.authorizer.get_perms(self.username) + iterator = self.run_as_current_user(self.fs.format_mlsx, basedir, + [basename], perms, self.current_facts, ignore_err=False) + data = ''.join(iterator) + except OSError, err: + self.fs.close_cr(datacr) + why = _strerror(err) + self.log('FAIL MLST "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.fs.close_cr(datacr) + # since TVFS is supported (see RFC-3659 chapter 6), a fully + # qualified pathname should be returned + data = data.split(' ')[0] + ' %s\r\n' %line + # response is expected on the command channel + self.push('250-Listing "%s":\r\n' %line) + # the fact set must be preceded by a space + self.push(' ' + data) + self.respond('250 End MLST.') + + def ftp_MLSD(self, line): + """Return contents of a directory in a machine-processable form + as defined in RFC-3659. + """ + # if no argument, fall back on cwd as default + if not line: + line = self.fs.cwd + try: + datacr = None + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + # RFC-3659 requires 501 response code if path is not a directory + if not self.fs.isdir(path): + err = 'No such directory' + self.log('FAIL MLSD "%s". %s.' %(line, err)) + self.respond("501 %s." %err) + return + listing = self.run_as_current_user(self.fs.listdir, path) + except OSError, err: + self.fs.close_cr(datacr) + why = _strerror(err) + self.log('FAIL MLSD "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.fs.close_cr(datacr) + perms = self.authorizer.get_perms(self.username) + iterator = self.fs.format_mlsx(path, listing, perms, + self.current_facts) + producer = BufferedIteratorProducer(iterator) + self.log('OK MLSD "%s". Transfer starting.' %line) + self.push_dtp_data(producer, isproducer=True) + + def ftp_RETR(self, line): + """Retrieve the specified file (transfer from the server to the + client) + """ + try: + datacr = None + datacr = self.fs.get_cr(line) + file = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + fd = self.run_as_current_user(self.fs.open, file, 'rb') + except OSError, err: + self.fs.close_cr(datacr) + why = _strerror(err) + self.log('FAIL RETR "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + return + except IOError, err: + self.fs.close_cr(datacr) + why = _strerror(err) + self.log('FAIL RETR "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + return + + if self.restart_position: + # Make sure that the requested offset is valid (within the + # size of the file being resumed). + # According to RFC-1123 a 554 reply may result in case that + # the existing file cannot be repositioned as specified in + # the REST. + ok = 0 + try: + assert not self.restart_position > self.fs.getsize(file) + fd.seek(self.restart_position) + ok = 1 + except AssertionError: + why = "Invalid REST parameter" + except IOError, err: + why = _strerror(err) + self.restart_position = 0 + if not ok: + self.respond('554 %s' %why) + self.log('FAIL RETR "%s". %s.' %(line, why)) + self.fs.close_cr(datacr) + return + self.log('OK RETR "%s". Download starting.' %line) + producer = FileProducer(fd, self.current_type) + self.push_dtp_data(producer, isproducer=True, file=fd) + self.fs.close_cr(datacr) + + def ftp_STOR(self, line, mode='w'): + """Store a file (transfer from the client to the server).""" + # A resume could occur in case of APPE or REST commands. + # In that case we have to open file object in different ways: + # STOR: mode = 'w' + # APPE: mode = 'a' + # REST: mode = 'r+' (to permit seeking on file object) + if 'a' in mode: + cmd = 'APPE' + else: + cmd = 'STOR' + + line = self.fs.ftpnorm(line) + basedir,basename = os.path.split(line) + + datacr = None + try: + datacr = self.fs.get_cr(line) + file = self.fs.ftp2fs(basedir, datacr) + + except OSError, err: + self.fs.close_cr(datacr) + why = _strerror(err) + self.log('FAIL %s "%s". %s.' %(cmd, line, why)) + self.respond('550 %s.' %why) + return + + if self.restart_position: + mode = 'r+' + try: + fd = self.run_as_current_user(self.fs.create, file, basename, mode + 'b') + except IOError, err: + self.fs.close_cr(datacr) + why = _strerror(err) + self.log('FAIL %s "%s". %s.' %(cmd, line, why)) + self.respond('550 %s.' %why) + return + + if self.restart_position: + # Make sure that the requested offset is valid (within the + # size of the file being resumed). + # According to RFC-1123 a 554 reply may result in case + # that the existing file cannot be repositioned as + # specified in the REST. + ok = 0 + try: + assert not self.restart_position > self.fs.getsize(self.fs.ftp2fs(line, datacr)) + fd.seek(self.restart_position) + ok = 1 + except AssertionError: + why = "Invalid REST parameter" + except IOError, err: + why = _strerror(err) + self.restart_position = 0 + if not ok: + self.fs.close_cr(datacr) + self.respond('554 %s' %why) + self.log('FAIL %s "%s". %s.' %(cmd, line, why)) + return + + self.log('OK %s "%s". Upload starting.' %(cmd, line)) + if self.data_channel: + self.respond("125 Data connection already open. Transfer starting.") + self.data_channel.file_obj = fd + self.data_channel.enable_receiving(self.current_type) + else: + self.respond("150 File status okay. About to open data connection.") + self.__in_dtp_queue = fd + self.fs.close_cr(datacr) + + + def ftp_STOU(self, line): + """Store a file on the server with a unique name.""" + # Note 1: RFC-959 prohibited STOU parameters, but this + # prohibition is obsolete. + # Note 2: 250 response wanted by RFC-959 has been declared + # incorrect in RFC-1123 that wants 125/150 instead. + # Note 3: RFC-1123 also provided an exact output format + # defined to be as follow: + # > 125 FILE: pppp + # ...where pppp represents the unique path name of the + # file that will be written. + + # watch for STOU preceded by REST, which makes no sense. + if self.restart_position: + self.respond("450 Can't STOU while REST request is pending.") + return + + datacr = None + datacr = self.fs.get_cr(line) + + if line: + line = self.fs.ftpnorm(line) + basedir,prefix = os.path.split(line) + basedir = self.fs.ftp2fs(basedir, datacr) + #prefix = prefix + '.' + else: + basedir = self.fs.ftp2fs(self.fs.cwd, datacr) + prefix = 'ftpd.' + try: + fd = self.run_as_current_user(self.fs.mkstemp, prefix=prefix, + dir=basedir) + except IOError, err: + # hitted the max number of tries to find out file with + # unique name + if err.errno == errno.EEXIST: + why = 'No usable unique file name found' + # something else happened + else: + why = _strerror(err) + self.respond("450 %s." %why) + self.log('FAIL STOU "%s". %s.' %(self.fs.ftpnorm(line), why)) + self.fs.close_cr(datacr) + return + + filename = line + if not self.authorizer.has_perm(self.username, 'w', filename): + self.log('FAIL STOU "%s". Not enough privileges' + %self.fs.ftpnorm(line)) + self.respond("550 Can't STOU: not enough privileges.") + self.fs.close_cr(datacr) + return + + # now just acts like STOR except that restarting isn't allowed + self.log('OK STOU "%s". Upload starting.' %filename) + if self.data_channel: + self.respond("125 FILE: %s" %filename) + self.data_channel.file_obj = fd + self.data_channel.enable_receiving(self.current_type) + else: + self.respond("150 FILE: %s" %filename) + self.__in_dtp_queue = fd + self.fs.close_cr(datacr) + + + def ftp_APPE(self, line): + """Append data to an existing file on the server.""" + # watch for APPE preceded by REST, which makes no sense. + if self.restart_position: + self.respond("550 Can't APPE while REST request is pending.") + else: + self.ftp_STOR(line, mode='a') + + def ftp_REST(self, line): + """Restart a file transfer from a previous mark.""" + try: + marker = int(line) + if marker < 0: + raise ValueError + except (ValueError, OverflowError): + self.respond("501 Invalid parameter.") + else: + self.respond("350 Restarting at position %s. " \ + "Now use RETR/STOR for resuming." %marker) + self.log("OK REST %s." %marker) + self.restart_position = marker + + def ftp_ABOR(self, line): + """Abort the current data transfer.""" + + # ABOR received while no data channel exists + if (self.data_server is None) and (self.data_channel is None): + resp = "225 No transfer to abort." + else: + # a PASV was received but connection wasn't made yet + if self.data_server: + self.data_server.close() + self.data_server = None + resp = "225 ABOR command successful; data channel closed." + + # If a data transfer is in progress the server must first + # close the data connection, returning a 426 reply to + # indicate that the transfer terminated abnormally, then it + # must send a 226 reply, indicating that the abort command + # was successfully processed. + # If no data has been transmitted we just respond with 225 + # indicating that no transfer was in progress. + if self.data_channel: + if self.data_channel.transfer_in_progress(): + self.data_channel.close() + self.data_channel = None + self.respond("426 Connection closed; transfer aborted.") + self.log("OK ABOR. Transfer aborted, data channel closed.") + resp = "226 ABOR command successful." + else: + self.data_channel.close() + self.data_channel = None + self.log("OK ABOR. Data channel closed.") + resp = "225 ABOR command successful; data channel closed." + self.respond(resp) + + + # --- authentication + + def ftp_USER(self, line): + """Set the username for the current session.""" + # we always treat anonymous user as lower-case string. + if line.lower() == "anonymous": + line = "anonymous" + + # RFC-959 specifies a 530 response to the USER command if the + # username is not valid. If the username is valid is required + # ftpd returns a 331 response instead. In order to prevent a + # malicious client from determining valid usernames on a server, + # it is suggested by RFC-2577 that a server always return 331 to + # the USER command and then reject the combination of username + # and password for an invalid username when PASS is provided later. + if not self.authenticated: + self.respond('331 Username ok, send password.') + else: + # a new USER command could be entered at any point in order + # to change the access control flushing any user, password, + # and account information already supplied and beginning the + # login sequence again. + self.flush_account() + msg = 'Previous account information was flushed' + self.log('OK USER "%s". %s.' %(line, msg)) + self.respond('331 %s, send password.' %msg) + self.username = line + + def ftp_PASS(self, line): + """Check username's password against the authorizer.""" + + if self.authenticated: + self.respond("503 User already authenticated.") + return + if not self.username: + self.respond("503 Login with USER first.") + return + + # username ok + if self.authorizer.has_user(self.username): + if self.username == 'anonymous' \ + or self.authorizer.validate_authentication(self.username, line): + msg_login = self.authorizer.get_msg_login(self.username) + if len(msg_login) <= 75: + self.respond('230 %s' %msg_login) + else: + self.push("230-%s\r\n" %msg_login) + self.respond("230 ") + + self.authenticated = True + self.password = line + self.attempted_logins = 0 + self.fs.root = self.authorizer.get_home_dir(self.username) + self.fs.username=self.username + self.fs.password=line + self.log("User %s logged in." %self.username) + else: + self.attempted_logins += 1 + if self.attempted_logins >= self.max_login_attempts: + self.respond("530 Maximum login attempts. Disconnecting.") + self.close() + else: + self.respond("530 Authentication failed.") + self.log('Authentication failed (user: "%s").' %self.username) + self.username = "" + + # wrong username + else: + self.attempted_logins += 1 + if self.attempted_logins >= self.max_login_attempts: + self.log('Authentication failed: unknown username "%s".' + %self.username) + self.respond("530 Maximum login attempts. Disconnecting.") + self.close() + elif self.username.lower() == 'anonymous': + self.respond("530 Anonymous access not allowed.") + self.log('Authentication failed: anonymous access not allowed.') + else: + self.respond("530 Authentication failed.") + self.log('Authentication failed: unknown username "%s".' + %self.username) + self.username = "" + + def ftp_REIN(self, line): + """Reinitialize user's current session.""" + # From RFC-959: + # REIN command terminates a USER, flushing all I/O and account + # information, except to allow any transfer in progress to be + # completed. All parameters are reset to the default settings + # and the control connection is left open. This is identical + # to the state in which a user finds himself immediately after + # the control connection is opened. + self.log("OK REIN. Flushing account information.") + self.flush_account() + # Note: RFC-959 erroneously mention "220" as the correct response + # code to be given in this case, but this is wrong... + self.respond("230 Ready for new user.") + + + # --- filesystem operations + + def ftp_PWD(self, line): + """Return the name of the current working directory to the client.""" + self.respond('257 "%s" is the current directory.' %self.fs.cwd) + + def ftp_CWD(self, line): + """Change the current working directory.""" + # TODO: a lot of FTP servers go back to root directory if no + # arg is provided but this is not specified in RFC-959. + # Search for official references about this behaviour. + if not line: + line = '/' + datacr = None + try: + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + self.run_as_current_user(self.fs.chdir, path) + except OSError, err: + why = _strerror(err) + self.log('FAIL CWD "%s". %s.' %(self.fs.ftpnorm(line), why)) + self.respond('550 %s.' %why) + else: + self.log('OK CWD "%s".' %self.fs.cwd) + self.respond('250 "%s" is the current directory.' %self.fs.cwd) + self.fs.close_cr(datacr) + + def ftp_CDUP(self, line): + """Change into the parent directory.""" + # Note: RFC-959 says that code 200 is required but it also says + # that CDUP uses the same codes as CWD. + self.ftp_CWD('..') + + def ftp_SIZE(self, line): + """Return size of file in a format suitable for using with + RESTart as defined in RFC-3659. + + Implementation note: + properly handling the SIZE command when TYPE ASCII is used would + require to scan the entire file to perform the ASCII translation + logic (file.read().replace(os.linesep, '\r\n')) and then + calculating the len of such data which may be different than + the actual size of the file on the server. Considering that + calculating such result could be very resource-intensive it + could be easy for a malicious client to try a DoS attack, thus + we do not perform the ASCII translation. + + However, clients in general should not be resuming downloads in + ASCII mode. Resuming downloads in binary mode is the recommended + way as specified in RFC-3659. + """ + datacr = None + try: + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + if self.fs.isdir(path): + why = "%s is not retrievable" %line + self.log('FAIL SIZE "%s". %s.' %(line, why)) + self.respond("550 %s." %why) + return + size = self.run_as_current_user(self.fs.getsize, path) + except OSError, err: + why = _strerror(err) + self.log('FAIL SIZE "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.respond("213 %s" %size) + self.log('OK SIZE "%s".' %line) + self.fs.close_cr(datacr) + + def ftp_MDTM(self, line): + """Return last modification time of file to the client as an ISO + 3307 style timestamp (YYYYMMDDHHMMSS) as defined in RFC-3659. + """ + datacr = None + try: + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + if not self.fs.isfile(self.fs.realpath(path)): + why = "%s is not retrievable" %line + self.log('FAIL MDTM "%s". %s.' %(line, why)) + self.respond("550 %s." %why) + self.fs.close_cr(datacr) + return + lmt = self.run_as_current_user(self.fs.getmtime, path) + except OSError, err: + why = _strerror(err) + self.log('FAIL MDTM "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + lmt = time.strftime("%Y%m%d%H%M%S", time.localtime(lmt)) + self.respond("213 %s" %lmt) + self.log('OK MDTM "%s".' %line) + self.fs.close_cr(datacr) + + def ftp_MKD(self, line): + """Create the specified directory.""" + datacr = None + line = self.fs.ftpnorm(line) + basedir,basename = os.path.split(line) + try: + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(basedir, datacr) + self.run_as_current_user(self.fs.mkdir, path, basename) + except OSError, err: + why = _strerror(err) + self.log('FAIL MKD "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.log('OK MKD "%s".' %line) + self.respond("257 Directory created.") + self.fs.close_cr(datacr) + + def ftp_RMD(self, line): + """Remove the specified directory.""" + datacr = None + try: + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + if self.fs.realpath(path) == self.fs.realpath(self.fs.root): + msg = "Can't remove root directory." + self.respond("550 %s" %msg) + self.log('FAIL MKD "/". %s' %msg) + self.fs.close_cr(datacr) + return + self.run_as_current_user(self.fs.rmdir, path) + except OSError, err: + why = _strerror(err) + self.log('FAIL RMD "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.log('OK RMD "%s".' %line) + self.respond("250 Directory removed.") + self.fs.close_cr(datacr) + + def ftp_DELE(self, line): + """Delete the specified file.""" + datacr = None + try: + datacr = self.fs.get_cr(line) + path = self.fs.ftp2fs(line, datacr) + line = self.fs.ftpnorm(line) + self.run_as_current_user(self.fs.remove, path) + except OSError, err: + why = _strerror(err) + self.log('FAIL DELE "%s". %s.' %(line, why)) + self.respond('550 %s.' %why) + else: + self.log('OK DELE "%s".' %line) + self.respond("250 File removed.") + self.fs.close_cr(datacr) + + def ftp_RNFR(self, line): + """Rename the specified (only the source name is specified + here, see RNTO command)""" + datacr = None + try: + datacr = self.fs.get_cr(line) + line = self.fs.ftpnorm(line) + path = self.fs.ftp2fs(line, datacr) + if not self.fs.lexists(path): + self.respond("550 No such file or directory.") + elif self.fs.realpath(path) == self.fs.realpath(self.fs.root): + self.respond("550 Can't rename the home directory.") + else: + self.fs.rnfr = line + self.respond("350 Ready for destination name.") + except: + self.respond("550 Can't find the file or directory.") + self.fs.close_cr(datacr) + + def ftp_RNTO(self, line): + """Rename file (destination name only, source is specified with + RNFR). + """ + if not self.fs.rnfr: + self.respond("503 Bad sequence of commands: use RNFR first.") + return + datacr = None + try: + try: + datacr = self.fs.get_cr(line) + src = self.fs.ftp2fs(self.fs.rnfr, datacr) + line = self.fs.ftpnorm(line) + basedir,basename = os.path.split(line) + dst = self.fs.ftp2fs(basedir, datacr) + self.run_as_current_user(self.fs.rename, src, dst,basename) + except OSError, err: + why = _strerror(err) + self.log('FAIL RNFR/RNTO "%s ==> %s". %s.' \ + %(self.fs.ftpnorm(self.fs.rnfr), line, why)) + self.respond('550 %s.' %why) + else: + self.log('OK RNFR/RNTO "%s ==> %s".' \ + %(self.fs.ftpnorm(self.fs.rnfr), line)) + self.respond("250 Renaming ok.") + finally: + self.fs.rnfr = None + self.fs.close_cr(datacr) + + + # --- others + + def ftp_TYPE(self, line): + """Set current type data type to binary/ascii""" + line = line.upper() + if line in ("A", "AN", "A N"): + self.respond("200 Type set to: ASCII.") + self.current_type = 'a' + elif line in ("I", "L8", "L 8"): + self.respond("200 Type set to: Binary.") + self.current_type = 'i' + else: + self.respond('504 Unsupported type "%s".' %line) + + def ftp_STRU(self, line): + """Set file structure (obsolete).""" + # obsolete (backward compatibility with older ftp clients) + if line in ('f','F'): + self.respond('200 File transfer structure set to: F.') + else: + self.respond('504 Unimplemented STRU type.') + + def ftp_MODE(self, line): + """Set data transfer mode (obsolete)""" + # obsolete (backward compatibility with older ftp clients) + if line in ('s', 'S'): + self.respond('200 Transfer mode set to: S') + else: + self.respond('504 Unimplemented MODE type.') + + def ftp_STAT(self, line): + """Return statistics about current ftp session. If an argument + is provided return directory listing over command channel. + + Implementation note: + + RFC-959 do not explicitly mention globbing; this means that FTP + servers are not required to support globbing in order to be + compliant. However, many FTP servers do support globbing as a + measure of convenience for FTP clients and users. + + In order to search for and match the given globbing expression, + the code has to search (possibly) many directories, examine + each contained filename, and build a list of matching files in + memory. Since this operation can be quite intensive, both CPU- + and memory-wise, we limit the search to only one directory + non-recursively, as LIST does. + """ + # return STATus information about ftpd + if not line: + s = [] + s.append('Connected to: %s:%s' %self.socket.getsockname()[:2]) + if self.authenticated: + s.append('Logged in as: %s' %self.username) + else: + if not self.username: + s.append("Waiting for username.") + else: + s.append("Waiting for password.") + if self.current_type == 'a': + type = 'ASCII' + else: + type = 'Binary' + s.append("TYPE: %s; STRUcture: File; MODE: Stream" %type) + if self.data_server: + s.append('Passive data channel waiting for connection.') + elif self.data_channel: + bytes_sent = self.data_channel.tot_bytes_sent + bytes_recv = self.data_channel.tot_bytes_received + s.append('Data connection open:') + s.append('Total bytes sent: %s' %bytes_sent) + s.append('Total bytes received: %s' %bytes_recv) + else: + s.append('Data connection closed.') + + self.push('211-FTP server status:\r\n') + self.push(''.join([' %s\r\n' %item for item in s])) + self.respond('211 End of status.') + # return directory LISTing over the command channel + else: + datacr = None + try: + datacr = self.fs.get_cr(line) + iterator = self.run_as_current_user(self.fs.get_stat_dir, line, datacr) + except OSError, err: + self.respond('550 %s.' %_strerror(err)) + else: + self.push('213-Status of "%s":\r\n' %self.fs.ftpnorm(line)) + self.push_with_producer(BufferedIteratorProducer(iterator)) + self.respond('213 End of status.') + self.fs.close_cr(datacr) + + def ftp_FEAT(self, line): + """List all new features supported as defined in RFC-2398.""" + features = ['EPRT','EPSV','MDTM','MLSD','REST STREAM','SIZE','TVFS'] + s = '' + for fact in self.available_facts: + if fact in self.current_facts: + s += fact + '*;' + else: + s += fact + ';' + features.append('MLST ' + s) + features.sort() + self.push("211-Features supported:\r\n") + self.push("".join([" %s\r\n" %x for x in features])) + self.respond('211 End FEAT.') + + def ftp_OPTS(self, line): + """Specify options for FTP commands as specified in RFC-2389.""" + try: + assert (not line.count(' ') > 1), 'Invalid number of arguments' + if ' ' in line: + cmd, arg = line.split(' ') + assert (';' in arg), 'Invalid argument' + else: + cmd, arg = line, '' + # actually the only command able to accept options is MLST + assert (cmd.upper() == 'MLST'), 'Unsupported command "%s"' %cmd + except AssertionError, err: + self.respond('501 %s.' %err) + else: + facts = [x.lower() for x in arg.split(';')] + self.current_facts = [x for x in facts if x in self.available_facts] + f = ''.join([x + ';' for x in self.current_facts]) + self.respond('200 MLST OPTS ' + f) + + def ftp_NOOP(self, line): + """Do nothing.""" + self.respond("200 I successfully done nothin'.") + + def ftp_SYST(self, line): + """Return system type (always returns UNIX type: L8).""" + # This command is used to find out the type of operating system + # at the server. The reply shall have as its first word one of + # the system names listed in RFC-943. + # Since that we always return a "/bin/ls -lA"-like output on + # LIST we prefer to respond as if we would on Unix in any case. + self.respond("215 UNIX Type: L8") + + def ftp_ALLO(self, line): + """Allocate bytes for storage (obsolete).""" + # obsolete (always respond with 202) + self.respond("202 No storage allocation necessary.") + + def ftp_HELP(self, line): + """Return help text to the client.""" + if line: + if line.upper() in proto_cmds: + self.respond("214 %s" %proto_cmds[line.upper()]) + else: + self.respond("501 Unrecognized command.") + else: + # provide a compact list of recognized commands + def formatted_help(): + cmds = [] + keys = proto_cmds.keys() + keys.sort() + while keys: + elems = tuple((keys[0:8])) + cmds.append(' %-6s' * len(elems) %elems + '\r\n') + del keys[0:8] + return ''.join(cmds) + + self.push("214-The following commands are recognized:\r\n") + self.push(formatted_help()) + self.respond("214 Help command successful.") + + + # --- support for deprecated cmds + + # RFC-1123 requires that the server treat XCUP, XCWD, XMKD, XPWD + # and XRMD commands as synonyms for CDUP, CWD, MKD, LIST and RMD. + # Such commands are obsoleted but some ftp clients (e.g. Windows + # ftp.exe) still use them. + + def ftp_XCUP(self, line): + """Change to the parent directory. Synonym for CDUP. Deprecated.""" + self.ftp_CDUP(line) + + def ftp_XCWD(self, line): + """Change the current working directory. Synonym for CWD. Deprecated.""" + self.ftp_CWD(line) + + def ftp_XMKD(self, line): + """Create the specified directory. Synonym for MKD. Deprecated.""" + self.ftp_MKD(line) + + def ftp_XPWD(self, line): + """Return the current working directory. Synonym for PWD. Deprecated.""" + self.ftp_PWD(line) + + def ftp_XRMD(self, line): + """Remove the specified directory. Synonym for RMD. Deprecated.""" + self.ftp_RMD(line) + + +class FTPServer(asyncore.dispatcher): + """This class is an asyncore.disptacher subclass. It creates a FTP + socket listening on
, dispatching the requests to a + (typically FTPHandler class). + + Depending on the type of address specified IPv4 or IPv6 connections + (or both, depending from the underlying system) will be accepted. + + All relevant session information is stored in class attributes + described below. + Overriding them is strongly recommended to avoid running out of + file descriptors (DoS)! + + - (int) max_cons: + number of maximum simultaneous connections accepted (defaults + to 0 == unlimited). + + - (int) max_cons_per_ip: + number of maximum connections accepted for the same IP address + (defaults to 0 == unlimited). + """ + + max_cons = 0 + max_cons_per_ip = 0 + + def __init__(self, address, handler): + """Initiate the FTP server opening listening on address. + + - (tuple) address: the host:port pair on which the command + channel will listen. + + - (classobj) handler: the handler class to use. + """ + asyncore.dispatcher.__init__(self) + self.handler = handler + self.ip_map = [] + host, port = address + + # AF_INET or AF_INET6 socket + # Get the correct address family for our host (allows IPv6 addresses) + try: + info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, + socket.SOCK_STREAM, 0, socket.AI_PASSIVE) + except socket.gaierror: + # Probably a DNS issue. Assume IPv4. + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.set_reuse_addr() + self.bind((host, port)) + else: + for res in info: + af, socktype, proto, canonname, sa = res + try: + self.create_socket(af, socktype) + self.set_reuse_addr() + self.bind(sa) + except socket.error, msg: + if self.socket: + self.socket.close() + self.socket = None + continue + break + if not self.socket: + raise socket.error, msg + self.listen(5) + + def set_reuse_addr(self): + # Overridden for convenience. Avoid to reuse address on Windows. + if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'): + return + asyncore.dispatcher.set_reuse_addr(self) + + def serve_forever(self, **kwargs): + """A wrap around asyncore.loop(); starts the asyncore polling + loop. + + The keyword arguments in kwargs are the same expected by + asyncore.loop() function: timeout, use_poll, map and count. + """ + if not 'count' in kwargs: + log("Serving FTP on %s:%s" %self.socket.getsockname()[:2]) + + # backward compatibility for python < 2.4 + if not hasattr(self, '_map'): + if not 'map' in kwargs: + map = asyncore.socket_map + else: + map = kwargs['map'] + self._map = self.handler._map = map + + try: + # FIX #16, #26 + # use_poll specifies whether to use select module's poll() + # with asyncore or whether to use asyncore's own poll() + # method Python versions < 2.4 need use_poll set to False + # This breaks on OS X systems if use_poll is set to True. + # All systems seem to work fine with it set to False + # (tested on Linux, Windows, and OS X platforms) + if kwargs: + asyncore.loop(**kwargs) + else: + asyncore.loop(timeout=1.0, use_poll=False) + except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): + log("Shutting down FTPd.") + self.close_all() + + def handle_accept(self): + """Called when remote client initiates a connection.""" + sock_obj, addr = self.accept() + log("[]%s:%s Connected." %addr[:2]) + + handler = self.handler(sock_obj, self) + ip = addr[0] + self.ip_map.append(ip) + + # For performance and security reasons we should always set a + # limit for the number of file descriptors that socket_map + # should contain. When we're running out of such limit we'll + # use the last available channel for sending a 421 response + # to the client before disconnecting it. + if self.max_cons: + if len(self._map) > self.max_cons: + handler.handle_max_cons() + return + + # accept only a limited number of connections from the same + # source address. + if self.max_cons_per_ip: + if self.ip_map.count(ip) > self.max_cons_per_ip: + handler.handle_max_cons_per_ip() + return + + handler.handle() + + def writable(self): + return 0 + + def handle_error(self): + """Called to handle any uncaught exceptions.""" + try: + raise + except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): + raise + logerror(traceback.format_exc()) + self.close() + + def close_all(self, map=None, ignore_all=False): + """Stop serving; close all existent connections disconnecting + clients. + + - (dict) map: + A dictionary whose items are the channels to close. + If map is omitted, the default asyncore.socket_map is used. + + - (bool) ignore_all: + having it set to False results in raising exception in case + of unexpected errors. + + Implementation note: + + Instead of using the current asyncore.close_all() function + which only close sockets, we iterate over all existent channels + calling close() method for each one of them, avoiding memory + leaks. + + This is how asyncore.close_all() function should work in + Python 2.6. + """ + if map is None: + map = self._map + for x in map.values(): + try: + x.close() + except OSError, x: + if x[0] == errno.EBADF: + pass + elif not ignore_all: + raise + except (asyncore.ExitNow, KeyboardInterrupt, SystemExit): + raise + except: + if not ignore_all: + raise + map.clear() + + +def test(): + # cmd line usage (provide a read-only anonymous ftp server): + # python -m pyftpdlib.FTPServer + authorizer = DummyAuthorizer() + authorizer.add_anonymous(os.getcwd(), perm='elradfmw') + FTPHandler.authorizer = authorizer + address = ('', 8021) + ftpd = FTPServer(address, FTPHandler) + ftpd.serve_forever() + +if __name__ == '__main__': + test() diff --git a/addons/document/odt2txt.py b/addons/document/odt2txt.py new file mode 100644 index 00000000000..07f792a830e --- /dev/null +++ b/addons/document/odt2txt.py @@ -0,0 +1,31 @@ +#!/usr/bin/python + +import sys, zipfile, xml.dom.minidom +import StringIO + +class OpenDocumentTextFile : + def __init__ (self, filepath) : + zip = zipfile.ZipFile(filepath) + self.content = xml.dom.minidom.parseString(zip.read("content.xml")) + + def toString (self) : + """ Converts the document to a string. """ + buffer = u"" + for val in ["text:p", "text:h", "text:list"]: + for paragraph in self.content.getElementsByTagName(val) : + buffer += self.textToString(paragraph) + "\n" + return buffer + + def textToString(self, element) : + buffer = u"" + for node in element.childNodes : + if node.nodeType == xml.dom.Node.TEXT_NODE : + buffer += node.nodeValue + elif node.nodeType == xml.dom.Node.ELEMENT_NODE : + buffer += self.textToString(node) + return buffer + +if __name__ == "__main__" : + s =StringIO.StringIO(file(sys.argv[1]).read()) + odt = OpenDocumentTextFile(s) + print odt.toString().encode('ascii','replace')