bzr revid: hmo@tinyerp.com-20100714084946-m0gyak17tvdra5k9
This commit is contained in:
Harry (OpenERP) 2010-07-14 14:19:46 +05:30
commit 78d6e086f6
7 changed files with 139 additions and 79 deletions

View File

@ -157,7 +157,10 @@ class document_file(osv.osv):
return False
if not self._check_duplication(cr, uid, vals, ids, 'write'):
raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
if ('parent_id' in vals) or ('name' in vals):
# if nodes call this write(), they must skip the code below
from_node = context and context.get('__from_node', False)
if (('parent_id' in vals) or ('name' in vals)) and not from_node:
# perhaps this file is renaming or changing directory
nctx = nodes.get_node_context(cr,uid,context={})
dirobj = self.pool.get('document.directory')

View File

@ -98,10 +98,9 @@ class nodefd_file(nodes.node_descriptor):
def __init__(self, parent, path, mode):
nodes.node_descriptor.__init__(self, parent)
self.__file = open(path, mode)
if mode in ('w', 'w+', 'r+'):
self._need_index = True
else:
self._need_index = False
if mode.endswith('b'):
mode = mode[:-1]
self.mode = mode
for attr in ('closed', 'read', 'write', 'seek', 'tell'):
setattr(self,attr, getattr(self.__file, attr))
@ -110,8 +109,8 @@ class nodefd_file(nodes.node_descriptor):
# TODO: locking in init, close()
fname = self.__file.name
self.__file.close()
if self._need_index:
if self.mode in ('w', 'w+', 'r+'):
par = self._get_parent()
cr = pooler.get_db(par.context.dbname).cursor()
icont = ''
@ -133,14 +132,33 @@ class nodefd_file(nodes.node_descriptor):
icont_u = ''
try:
cr.execute('UPDATE ir_attachment SET index_content = %s, file_type = %s WHERE id = %s',
(icont_u, mime, par.file_id))
par.content_length = filesize
fsize = os.stat(fname).st_size
cr.execute("UPDATE ir_attachment " \
" SET index_content = %s, file_type = %s, " \
" file_size = %s " \
" WHERE id = %s",
(icont_u, mime, fsize, par.file_id))
par.content_length = fsize
par.content_type = mime
cr.commit()
cr.close()
except Exception:
logging.getLogger('document.storage').debug('Cannot save file indexed content:', exc_info=True)
logging.getLogger('document.storage').warning('Cannot save file indexed content:', exc_info=True)
elif self.mode in ('a', 'a+' ):
try:
par = self._get_parent()
cr = pooler.get_db(par.context.dbname).cursor()
fsize = os.stat(fname).st_size
cr.execute("UPDATE ir_attachment SET file_size = %s " \
" WHERE id = %s",
(fsize, par.file_id))
par.content_length = fsize
cr.commit()
cr.close()
except Exception:
logging.getLogger('document.storage').warning('Cannot save file appended content:', exc_info=True)
class nodefd_db(StringIO, nodes.node_descriptor):
@ -336,6 +354,49 @@ class document_storage(osv.osv):
('path_uniq', 'UNIQUE(type,path)', "The storage path must be unique!")
]
def __get_random_fname(self, path):
flag = None
# This can be improved
if os.path.isdir(path):
for dirs in os.listdir(path):
if os.path.isdir(os.path.join(path, dirs)) and len(os.listdir(os.path.join(path, dirs))) < 4000:
flag = dirs
break
flag = flag or create_directory(path)
filename = random_name()
return os.path.join(flag, filename)
def __prepare_realpath(self, cr, file_node, ira, store_path, do_create=True):
""" Cleanup path for realstore, create dirs if needed
@param file_node the node
@param ira ir.attachment browse of the file_node
@param store_path the path of the parent storage object, list
@param do_create create the directories, if needed
@return tuple(path "/var/filestore/real/dir/", npath ['dir','fname.ext'] )
"""
file_node.fix_ppath(cr, ira)
npath = file_node.full_path() or []
# npath may contain empty elements, for root directory etc.
npath = filter(lambda x: x is not None, npath)
# if self._debug:
# self._doclog.debug('Npath: %s', npath)
for n in npath:
if n == '..':
raise ValueError("Invalid '..' element in path")
for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?',):
if ch in n:
raise ValueError("Invalid char %s in path %s" %(ch, n))
dpath = [store_path,]
dpath += npath[:-1]
path = os.path.join(*dpath)
if not os.path.isdir(path):
self._doclog.debug("Create dirs: %s", path)
os.makedirs(path)
return path, npath
def get_data(self, cr, uid, id, file_node, context=None, fil_obj=None):
""" retrieve the contents of some file_node having storage_id = id
optionally, fil_obj could point to the browse object of the file
@ -364,10 +425,17 @@ class document_storage(osv.osv):
if not ira.store_fname:
# On a migrated db, some files may have the wrong storage type
# try to fix their directory.
if ira.file_size:
self._doclog.warning( "ir.attachment #%d does not have a filename, but is at filestore, fix it!" % ira.id)
raise IOError(errno.ENOENT, 'No file can be located')
fpath = os.path.join(boo.path, ira.store_fname)
if mode in ('r','r+'):
if ira.file_size:
self._doclog.warning( "ir.attachment #%d does not have a filename, but is at filestore, fix it!" % ira.id)
raise IOError(errno.ENOENT, 'No file can be located')
else:
store_fname = self.__get_random_fname(boo.path)
cr.execute('UPDATE ir_attachment SET store_fname = %s WHERE id = %s',
(store_fname, ira.id))
fpath = os.path.join(boo.path, store_fname)
else:
fpath = os.path.join(boo.path, ira.store_fname)
return nodefd_file(file_node, path=fpath, mode=mode)
elif boo.type == 'db':
@ -378,15 +446,15 @@ class document_storage(osv.osv):
return nodefd_db64(file_node, ira_browse=ira, mode=mode)
elif boo.type == 'realstore':
if not ira.store_fname:
# On a migrated db, some files may have the wrong storage type
# try to fix their directory.
if ira.file_size:
self._doclog.warning("ir.attachment #%d does not have a filename, trying the name." %ira.id)
sfname = ira.name
fpath = os.path.join(boo.path,ira.store_fname or ira.name)
if not os.path.exists(fpath):
path, npath = self.__prepare_realpath(cr, file_node, ira, boo.path,
do_create = (mode[1] in ('w','a')) )
fpath = os.path.join(path, npath[-1])
if (not os.path.exists(fpath)) and mode[1] == 'r':
raise IOError("File not found: %s" % fpath)
elif mode[1] in ('w', 'a') and not ira.store_fname:
store_fname = os.path.join(*npath)
cr.execute('UPDATE ir_attachment SET store_fname = %s WHERE id = %s',
(store_fname, ira.id))
return nodefd_file(file_node, path=fpath, mode=mode)
elif boo.type == 'virtual':
@ -464,23 +532,14 @@ class document_storage(osv.osv):
if boo.type == 'filestore':
path = boo.path
try:
flag = None
# This can be improved
if os.path.isdir(path):
for dirs in os.listdir(path):
if os.path.isdir(os.path.join(path, dirs)) and len(os.listdir(os.path.join(path, dirs))) < 4000:
flag = dirs
break
flag = flag or create_directory(path)
filename = random_name()
fname = os.path.join(path, flag, filename)
store_fname = self.__get_random_fname(path)
fname = os.path.join(path, store_fname)
fp = file(fname, 'wb')
fp.write(data)
fp.close()
self._doclog.debug( "Saved data to %s" % fname)
filesize = len(data) # os.stat(fname).st_size
store_fname = os.path.join(flag, filename)
# TODO Here, an old file would be left hanging.
except Exception, e:
@ -500,21 +559,7 @@ class document_storage(osv.osv):
(out, file_node.file_id))
elif boo.type == 'realstore':
try:
file_node.fix_ppath(cr, ira)
npath = file_node.full_path() or []
# npath may contain empty elements, for root directory etc.
for i, n in enumerate(npath):
if n == None:
del npath[i]
for n in npath:
for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?', '..'):
if ch in n:
raise ValueError("Invalid char %s in path %s" %(ch, n))
dpath = [boo.path,]
dpath += npath[:-1]
path = os.path.join(*dpath)
if not os.path.isdir(path):
os.makedirs(path)
path, npath = self.__prepare_realpath(cr, file_node, ira, boo.path, do_create=True)
fname = os.path.join(path, npath[-1])
fp = file(fname,'wb')
fp.write(data)
@ -615,25 +660,23 @@ class document_storage(osv.osv):
# nothing to do for a rename, allow to change the db field
return { 'name': new_name, 'datas_fname': new_name }
elif sbro.type == 'realstore':
fname = fil_bo.store_fname
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
path, npath = self.__prepare_realpath(cr, file_node, ira, sbro.path, do_create=False)
fname = ira.store_fname
if not fname:
return ValueError("Tried to rename a non-stored file")
path = storage_bo.path
oldpath = os.path.join(path, fname)
for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?', '..'):
if ch in new_name:
raise ValueError("Invalid char %s in name %s" %(ch, new_name))
file_node.fix_ppath(cr, ira)
npath = file_node.full_path() or []
dpath = [path,]
dpath.extend(npath[:-1])
dpath.append(new_name)
newpath = os.path.join(*dpath)
# print "old, new paths:", oldpath, newpath
self._doclog.warning("Trying to rename a non-stored file")
if fname != os.path.join(*npath):
self._doclog.warning("inconsistency in realstore: %s != %s" , fname, repr(npath))
oldpath = os.path.join(path, npath[-1])
newpath = os.path.join(path, new_name)
os.rename(oldpath, newpath)
return { 'name': new_name, 'datas_fname': new_name, 'store_fname': new_name }
store_path = npath[:-1]
store_path.append(new_name)
store_fname = os.path.join(*store_path)
return { 'name': new_name, 'datas_fname': new_name, 'store_fname': store_fname }
else:
raise TypeError("No %s storage" % boo.type)

View File

@ -631,7 +631,9 @@ class node_dir(node_database):
if not in_write:
# We have to update the data ourselves
if ret:
dir_obj.write(cr, self.context.uid, [self.dir_id,], ret, self.context.context)
ctx = self.context.context.copy()
ctx['__from_node'] = True
dir_obj.write(cr, self.context.uid, [self.dir_id,], ret, ctx)
ret = True
return ret
@ -1196,7 +1198,9 @@ class node_file(node_class):
if not in_write:
# We have to update the data ourselves
if ret:
doc_obj.write(cr, self.context.uid, [self.file_id,], ret, self.context.context)
ctx = self.context.context.copy()
ctx['__from_node'] = True
doc_obj.write(cr, self.context.uid, [self.file_id,], ret, ctx )
ret = True
return ret

View File

@ -70,4 +70,3 @@
assert ids == [ ref("attach_3rd")], ids
-
!delete {model: ir.attachment, id: attach_3rd, search: }
-

View File

@ -150,6 +150,7 @@ class abstracted_fs(object):
raise OSError(1, 'Operation not permited.')
ret = child.open_data(cr, mode)
cr.commit()
return ret
except EnvironmentError:
raise
@ -159,7 +160,11 @@ class abstracted_fs(object):
try:
child = node.create_child(cr, objname, data=None)
return child.open_data(cr, mode)
ret = child.open_data(cr, mode)
cr.commit()
return ret
except EnvironmentError:
raise
except Exception,e:
self._log.exception('Cannot create item %s at node %s', objname, repr(node))
raise OSError(1, 'Operation not permited.')
@ -171,6 +176,7 @@ class abstracted_fs(object):
cr, node, rem = datacr
try:
res = node.open_data(cr, mode)
cr.commit()
except TypeError, e:
raise IOError(errno.EINVAL, "No data")
return res
@ -428,7 +434,7 @@ class abstracted_fs(object):
def getsize(self, datacr):
"""Return the size of the specified file in bytes."""
if not (datacr and datacr[1]):
return 0L
raise IOError(errno.ENOENT, "No such file or directory")
if datacr[1].type in ('file', 'content'):
return datacr[1].get_data_len(datacr[0]) or 0L
return 0L

View File

@ -2565,9 +2565,12 @@ class FTPHandler(asynchat.async_chat):
# return
size = self.run_as_current_user(self.fs.getsize, datacr)
except EnvironmentError, err:
why = _strerror(err)
why = err.strerror
self.log('FAIL SIZE "%s". %s.' %(line, why))
self.respond('550 %s.' %why)
if err.errno == errno.ENOENT:
self.respond("404 %s." % why)
else:
self.respond('550 %s.' % why)
else:
self.respond("213 %s" %size)
self.log('OK SIZE "%s".' %line)
@ -2640,10 +2643,13 @@ class FTPHandler(asynchat.async_chat):
try:
datacr = self.fs.get_crdata(line, mode='delete')
self.run_as_current_user(self.fs.remove, datacr)
except OSError, err:
why = _strerror(err)
except EnvironmentError, err:
why = err.strerror
self.log('FAIL DELE "%s". %s.' %(line, why))
self.respond('550 %s.' %why)
if err.errno == errno.ENOENT:
self.respond('404 %s.' % why)
else:
self.respond('550 %s.' %why)
else:
self.log('OK DELE "%s".' %line)
self.respond("250 File removed.")

View File

@ -217,5 +217,4 @@
# TODO
-
I move the 200 files to 'Test-Folder2'
# TODO
-
# TODO