2009-12-02 05:36:57 +00:00
|
|
|
# -*- encoding: utf-8 -*-
|
|
|
|
##############################################################################
|
|
|
|
#
|
|
|
|
# OpenERP, Open Source Management Solution
|
|
|
|
#
|
|
|
|
# Copyright (C) P. Christeas, 2009, all rights reserved
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
##############################################################################
|
|
|
|
|
|
|
|
from osv import osv, fields
|
|
|
|
import os
|
|
|
|
import tools
|
|
|
|
import base64
|
2010-06-29 14:05:30 +00:00
|
|
|
import errno
|
2010-06-25 10:29:33 +00:00
|
|
|
import logging
|
2010-06-29 14:05:30 +00:00
|
|
|
from StringIO import StringIO
|
2010-07-09 08:23:51 +00:00
|
|
|
import psycopg2
|
2010-06-25 10:29:33 +00:00
|
|
|
|
2009-12-02 05:36:57 +00:00
|
|
|
from tools.misc import ustr
|
2010-01-21 12:01:23 +00:00
|
|
|
from tools.translate import _
|
2009-12-02 05:36:57 +00:00
|
|
|
|
|
|
|
from osv.orm import except_orm
|
|
|
|
|
|
|
|
import random
|
|
|
|
import string
|
2010-07-01 17:51:32 +00:00
|
|
|
import pooler
|
2009-12-02 05:36:57 +00:00
|
|
|
import netsvc
|
2010-06-29 14:05:30 +00:00
|
|
|
import nodes
|
2009-12-02 05:36:57 +00:00
|
|
|
from content_index import cntIndex
|
|
|
|
|
2010-02-24 09:32:10 +00:00
|
|
|
DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config.get('root_path'), 'filestore'))
|
|
|
|
|
2009-12-02 05:36:57 +00:00
|
|
|
|
|
|
|
""" The algorithm of data storage
|
|
|
|
|
|
|
|
We have to consider 3 cases of data /retrieval/:
|
|
|
|
Given (context,path) we need to access the file (aka. node).
|
|
|
|
given (directory, context), we need one of its children (for listings, views)
|
|
|
|
given (ir.attachment, context), we needs its data and metadata (node).
|
|
|
|
|
|
|
|
For data /storage/ we have the cases:
|
|
|
|
Have (ir.attachment, context), we modify the file (save, update, rename etc).
|
|
|
|
Have (directory, context), we create a file.
|
|
|
|
Have (path, context), we create or modify a file.
|
|
|
|
|
|
|
|
Note that in all above cases, we don't explicitly choose the storage media,
|
|
|
|
but always require a context to be present.
|
|
|
|
|
|
|
|
Note that a node will not always have a corresponding ir.attachment. Dynamic
|
|
|
|
nodes, for once, won't. Their metadata will be computed by the parent storage
|
|
|
|
media + directory.
|
|
|
|
|
|
|
|
The algorithm says that in any of the above cases, our first goal is to locate
|
|
|
|
the node for any combination of search criteria. It would be wise NOT to
|
|
|
|
represent each node in the path (like node[/] + node[/dir1] + node[/dir1/dir2])
|
|
|
|
but directly jump to the end node (like node[/dir1/dir2]) whenever possible.
|
|
|
|
|
|
|
|
We also contain all the parenting loop code in one function. This is intentional,
|
|
|
|
because one day this will be optimized in the db (Pg 8.4).
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
def random_name():
|
|
|
|
random.seed()
|
|
|
|
d = [random.choice(string.ascii_letters) for x in xrange(10) ]
|
|
|
|
name = "".join(d)
|
|
|
|
return name
|
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
INVALID_CHARS = {'*':str(hash('*')), '|':str(hash('|')) , "\\":str(hash("\\")), '/':'__', ':':str(hash(':')), '"':str(hash('"')), '<':str(hash('<')) , '>':str(hash('>')) , '?':str(hash('?'))}
|
2009-12-02 05:36:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
def create_directory(path):
|
|
|
|
dir_name = random_name()
|
2010-02-24 08:54:04 +00:00
|
|
|
path = os.path.join(path, dir_name)
|
2009-12-02 05:36:57 +00:00
|
|
|
os.makedirs(path)
|
|
|
|
return dir_name
|
|
|
|
|
2010-06-29 14:05:30 +00:00
|
|
|
class nodefd_file(nodes.node_descriptor):
|
|
|
|
""" A descriptor to a real file
|
|
|
|
|
|
|
|
Inheriting directly from file doesn't work, since file exports
|
|
|
|
some read-only attributes (like 'name') that we don't like.
|
|
|
|
"""
|
|
|
|
def __init__(self, parent, path, mode):
|
|
|
|
nodes.node_descriptor.__init__(self, parent)
|
|
|
|
self.__file = open(path, mode)
|
2010-07-13 20:53:24 +00:00
|
|
|
if mode.endswith('b'):
|
|
|
|
mode = mode[:-1]
|
|
|
|
self.mode = mode
|
2010-06-29 14:05:30 +00:00
|
|
|
|
|
|
|
for attr in ('closed', 'read', 'write', 'seek', 'tell'):
|
|
|
|
setattr(self,attr, getattr(self.__file, attr))
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
# TODO: locking in init, close()
|
2010-07-11 14:26:17 +00:00
|
|
|
fname = self.__file.name
|
2010-06-29 14:05:30 +00:00
|
|
|
self.__file.close()
|
2010-07-13 20:53:24 +00:00
|
|
|
|
|
|
|
if self.mode in ('w', 'w+', 'r+'):
|
2010-07-11 14:26:17 +00:00
|
|
|
par = self._get_parent()
|
|
|
|
cr = pooler.get_db(par.context.dbname).cursor()
|
|
|
|
icont = ''
|
|
|
|
mime = ''
|
|
|
|
filename = par.path
|
|
|
|
if isinstance(filename, (tuple, list)):
|
|
|
|
filename = '/'.join(filename)
|
|
|
|
|
|
|
|
try:
|
|
|
|
mime, icont = cntIndex.doIndex(None, filename=filename,
|
|
|
|
content_type=None, realfname=fname)
|
|
|
|
except Exception:
|
|
|
|
logging.getLogger('document.storage').debug('Cannot index file:', exc_info=True)
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
icont_u = ustr(icont)
|
|
|
|
except UnicodeError:
|
|
|
|
icont_u = ''
|
|
|
|
|
|
|
|
try:
|
2010-07-13 20:53:24 +00:00
|
|
|
fsize = os.stat(fname).st_size
|
|
|
|
cr.execute("UPDATE ir_attachment " \
|
|
|
|
" SET index_content = %s, file_type = %s, " \
|
|
|
|
" file_size = %s " \
|
|
|
|
" WHERE id = %s",
|
|
|
|
(icont_u, mime, fsize, par.file_id))
|
|
|
|
par.content_length = fsize
|
|
|
|
par.content_type = mime
|
|
|
|
cr.commit()
|
|
|
|
cr.close()
|
|
|
|
except Exception:
|
|
|
|
logging.getLogger('document.storage').warning('Cannot save file indexed content:', exc_info=True)
|
|
|
|
|
|
|
|
elif self.mode in ('a', 'a+' ):
|
|
|
|
try:
|
|
|
|
par = self._get_parent()
|
|
|
|
cr = pooler.get_db(par.context.dbname).cursor()
|
|
|
|
fsize = os.stat(fname).st_size
|
|
|
|
cr.execute("UPDATE ir_attachment SET file_size = %s " \
|
|
|
|
" WHERE id = %s",
|
|
|
|
(fsize, par.file_id))
|
|
|
|
par.content_length = fsize
|
2010-07-11 14:26:17 +00:00
|
|
|
par.content_type = mime
|
|
|
|
cr.commit()
|
|
|
|
cr.close()
|
|
|
|
except Exception:
|
2010-07-13 20:53:24 +00:00
|
|
|
logging.getLogger('document.storage').warning('Cannot save file appended content:', exc_info=True)
|
|
|
|
|
2010-07-11 14:26:17 +00:00
|
|
|
|
2010-06-29 14:05:30 +00:00
|
|
|
|
|
|
|
class nodefd_db(StringIO, nodes.node_descriptor):
|
|
|
|
""" A descriptor to db data
|
|
|
|
"""
|
|
|
|
def __init__(self, parent, ira_browse, mode):
|
|
|
|
nodes.node_descriptor.__init__(self, parent)
|
|
|
|
if mode.endswith('b'):
|
|
|
|
mode = mode[:-1]
|
|
|
|
|
2010-07-01 17:51:32 +00:00
|
|
|
if mode in ('r', 'r+'):
|
2010-07-09 08:23:51 +00:00
|
|
|
cr = ira_browse._cr # reuse the cursor of the browse object, just now
|
|
|
|
cr.execute('SELECT db_datas FROM ir_attachment WHERE id = %s',(ira_browse.id,))
|
2010-07-01 17:51:33 +00:00
|
|
|
data = cr.fetchone()[0]
|
|
|
|
StringIO.__init__(self, data)
|
2010-07-01 17:51:32 +00:00
|
|
|
elif mode in ('w', 'w+'):
|
|
|
|
StringIO.__init__(self, None)
|
2010-06-29 14:05:30 +00:00
|
|
|
# at write, we start at 0 (= overwrite), but have the original
|
|
|
|
# data available, in case of a seek()
|
|
|
|
elif mode == 'a':
|
|
|
|
StringIO.__init__(self, None)
|
|
|
|
else:
|
|
|
|
logging.getLogger('document.storage').error("Incorrect mode %s specified", mode)
|
|
|
|
raise IOError(errno.EINVAL, "Invalid file mode")
|
|
|
|
self.mode = mode
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
# we now open a *separate* cursor, to update the data.
|
|
|
|
# FIXME: this may be improved, for concurrency handling
|
2010-07-01 17:51:32 +00:00
|
|
|
par = self._get_parent()
|
|
|
|
uid = par.context.uid
|
|
|
|
cr = pooler.get_db(par.context.dbname).cursor()
|
|
|
|
try:
|
2010-07-01 17:51:32 +00:00
|
|
|
if self.mode in ('w', 'w+', 'r+'):
|
2010-07-09 08:23:51 +00:00
|
|
|
data = self.getvalue()
|
2010-07-11 14:26:17 +00:00
|
|
|
icont = ''
|
|
|
|
mime = ''
|
|
|
|
filename = par.path
|
|
|
|
if isinstance(filename, (tuple, list)):
|
|
|
|
filename = '/'.join(filename)
|
|
|
|
|
|
|
|
try:
|
|
|
|
mime, icont = cntIndex.doIndex(data, filename=filename,
|
|
|
|
content_type=None, realfname=None)
|
|
|
|
except Exception:
|
|
|
|
logging.getLogger('document.storage').debug('Cannot index file:', exc_info=True)
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
icont_u = ustr(icont)
|
|
|
|
except UnicodeError:
|
|
|
|
icont_u = ''
|
|
|
|
|
2010-07-09 08:23:51 +00:00
|
|
|
out = psycopg2.Binary(data)
|
2010-07-11 14:26:17 +00:00
|
|
|
cr.execute("UPDATE ir_attachment " \
|
|
|
|
"SET db_datas = %s, file_size=%s, " \
|
|
|
|
" index_content= %s, file_type=%s " \
|
|
|
|
" WHERE id = %s",
|
|
|
|
(out, len(data), icont_u, mime, par.file_id))
|
2010-07-01 17:51:32 +00:00
|
|
|
elif self.mode == 'a':
|
2010-07-09 08:23:51 +00:00
|
|
|
data = self.getvalue()
|
|
|
|
out = psycopg2.Binary(data)
|
2010-07-01 17:51:32 +00:00
|
|
|
cr.execute("UPDATE ir_attachment " \
|
2010-07-09 08:23:51 +00:00
|
|
|
"SET db_datas = COALESCE(db_datas,'') || %s, " \
|
2010-07-01 17:51:33 +00:00
|
|
|
" file_size = COALESCE(file_size, 0) + %s " \
|
2010-07-01 17:51:32 +00:00
|
|
|
" WHERE id = %s",
|
2010-07-09 08:23:51 +00:00
|
|
|
(out, len(data), par.file_id))
|
2010-07-01 17:51:32 +00:00
|
|
|
cr.commit()
|
|
|
|
except Exception, e:
|
|
|
|
logging.getLogger('document.storage').exception('Cannot update db file #%d for close:', par.file_id)
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
cr.close()
|
2010-06-29 14:05:30 +00:00
|
|
|
StringIO.close(self)
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2010-07-01 17:51:33 +00:00
|
|
|
class nodefd_db64(StringIO, nodes.node_descriptor):
|
|
|
|
""" A descriptor to db data, base64 (the old way)
|
|
|
|
|
|
|
|
It stores the data in base64 encoding at the db. Not optimal, but
|
|
|
|
the transparent compression of Postgres will save the day.
|
|
|
|
"""
|
|
|
|
def __init__(self, parent, ira_browse, mode):
|
|
|
|
nodes.node_descriptor.__init__(self, parent)
|
|
|
|
if mode.endswith('b'):
|
|
|
|
mode = mode[:-1]
|
|
|
|
|
|
|
|
if mode in ('r', 'r+'):
|
|
|
|
StringIO.__init__(self, base64.decodestring(ira_browse.db_datas))
|
|
|
|
elif mode in ('w', 'w+'):
|
|
|
|
StringIO.__init__(self, None)
|
|
|
|
# at write, we start at 0 (= overwrite), but have the original
|
|
|
|
# data available, in case of a seek()
|
|
|
|
elif mode == 'a':
|
|
|
|
StringIO.__init__(self, None)
|
|
|
|
else:
|
|
|
|
logging.getLogger('document.storage').error("Incorrect mode %s specified", mode)
|
|
|
|
raise IOError(errno.EINVAL, "Invalid file mode")
|
|
|
|
self.mode = mode
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
# we now open a *separate* cursor, to update the data.
|
|
|
|
# FIXME: this may be improved, for concurrency handling
|
|
|
|
par = self._get_parent()
|
|
|
|
uid = par.context.uid
|
|
|
|
cr = pooler.get_db(par.context.dbname).cursor()
|
|
|
|
try:
|
|
|
|
if self.mode in ('w', 'w+', 'r+'):
|
2010-07-11 14:26:17 +00:00
|
|
|
data = self.getvalue()
|
|
|
|
icont = ''
|
|
|
|
mime = ''
|
|
|
|
filename = par.path
|
|
|
|
if isinstance(filename, (tuple, list)):
|
|
|
|
filename = '/'.join(filename)
|
|
|
|
|
|
|
|
try:
|
|
|
|
mime, icont = cntIndex.doIndex(data, filename=filename,
|
|
|
|
content_type=None, realfname=None)
|
|
|
|
except Exception:
|
|
|
|
logging.getLogger('document.storage').debug('Cannot index file:', exc_info=True)
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
icont_u = ustr(icont)
|
|
|
|
except UnicodeError:
|
|
|
|
icont_u = ''
|
|
|
|
|
|
|
|
cr.execute('UPDATE ir_attachment SET db_datas = %s::bytea, file_size=%s, ' \
|
|
|
|
'index_content = %s, file_type = %s ' \
|
|
|
|
'WHERE id = %s',
|
|
|
|
(base64.encodestring(out), len(out), icont_u, mime, par.file_id))
|
2010-07-01 17:51:33 +00:00
|
|
|
elif self.mode == 'a':
|
|
|
|
out = self.getvalue()
|
|
|
|
# Yes, we're obviously using the wrong representation for storing our
|
|
|
|
# data as base64-in-bytea
|
|
|
|
cr.execute("UPDATE ir_attachment " \
|
|
|
|
"SET db_datas = encode( (COALESCE(decode(encode(db_datas,'escape'),'base64'),'') || decode(%s, 'base64')),'base64')::bytea , " \
|
|
|
|
" file_size = COALESCE(file_size, 0) + %s " \
|
|
|
|
" WHERE id = %s",
|
|
|
|
(base64.encodestring(out), len(out), par.file_id))
|
|
|
|
cr.commit()
|
|
|
|
except Exception, e:
|
|
|
|
logging.getLogger('document.storage').exception('Cannot update db file #%d for close:', par.file_id)
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
cr.close()
|
|
|
|
StringIO.close(self)
|
|
|
|
|
2009-12-02 05:36:57 +00:00
|
|
|
class document_storage(osv.osv):
|
|
|
|
""" The primary object for data storage.
|
|
|
|
Each instance of this object is a storage media, in which our application
|
|
|
|
can store contents. The object here controls the behaviour of the storage
|
|
|
|
media.
|
|
|
|
The referring document.directory-ies will control the placement of data
|
|
|
|
into the storage.
|
|
|
|
|
|
|
|
It is a bad idea to have multiple document.storage objects pointing to
|
|
|
|
the same tree of filesystem storage.
|
|
|
|
"""
|
|
|
|
_name = 'document.storage'
|
2010-05-19 18:32:32 +00:00
|
|
|
_description = 'Storage Media'
|
2010-06-25 10:29:33 +00:00
|
|
|
_doclog = logging.getLogger('document')
|
|
|
|
|
2009-12-02 05:36:57 +00:00
|
|
|
_columns = {
|
|
|
|
'name': fields.char('Name', size=64, required=True, select=1),
|
|
|
|
'write_date': fields.datetime('Date Modified', readonly=True),
|
|
|
|
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
|
|
|
|
'create_date': fields.datetime('Date Created', readonly=True),
|
|
|
|
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
|
|
|
|
'user_id': fields.many2one('res.users', 'Owner'),
|
2010-04-07 13:18:54 +00:00
|
|
|
'group_ids': fields.many2many('res.groups', 'document_storage_group_rel', 'item_id', 'group_id', 'Groups'),
|
2009-12-02 05:36:57 +00:00
|
|
|
'dir_ids': fields.one2many('document.directory', 'parent_id', 'Directories'),
|
2010-02-24 08:54:04 +00:00
|
|
|
'type': fields.selection([('db', 'Database'), ('filestore', 'Internal File storage'),
|
2010-07-08 22:50:42 +00:00
|
|
|
('realstore','External file storage'),], 'Type', required=True),
|
2010-02-24 08:54:04 +00:00
|
|
|
'path': fields.char('Path', size=250, select=1, help="For file storage, the root path of the storage"),
|
|
|
|
'online': fields.boolean('Online', help="If not checked, media is currently offline and its contents not available", required=True),
|
2009-12-07 13:11:11 +00:00
|
|
|
'readonly': fields.boolean('Read Only', help="If set, media is for reading only"),
|
2009-12-02 05:36:57 +00:00
|
|
|
}
|
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
def _get_rootpath(self, cr, uid, context=None):
|
2010-02-24 09:32:10 +00:00
|
|
|
return os.path.join(DMS_ROOT_PATH, cr.dbname)
|
2009-12-02 05:36:57 +00:00
|
|
|
|
|
|
|
_defaults = {
|
2010-02-24 08:54:04 +00:00
|
|
|
'user_id': lambda self, cr, uid, ctx: uid,
|
2009-12-07 13:11:11 +00:00
|
|
|
'online': lambda *args: True,
|
|
|
|
'readonly': lambda *args: False,
|
2009-12-02 05:36:57 +00:00
|
|
|
# Note: the defaults below should only be used ONCE for the default
|
|
|
|
# storage media. All other times, we should create different paths at least.
|
|
|
|
'type': lambda *args: 'filestore',
|
|
|
|
'path': _get_rootpath,
|
|
|
|
}
|
|
|
|
_sql_constraints = [
|
2009-12-07 13:11:11 +00:00
|
|
|
# SQL note: a path = NULL doesn't have to be unique.
|
|
|
|
('path_uniq', 'UNIQUE(type,path)', "The storage path must be unique!")
|
|
|
|
]
|
2010-02-24 08:54:04 +00:00
|
|
|
|
2010-07-13 20:53:24 +00:00
|
|
|
def __get_random_fname(self, path):
|
|
|
|
flag = None
|
|
|
|
# This can be improved
|
|
|
|
if os.path.isdir(path):
|
|
|
|
for dirs in os.listdir(path):
|
|
|
|
if os.path.isdir(os.path.join(path, dirs)) and len(os.listdir(os.path.join(path, dirs))) < 4000:
|
|
|
|
flag = dirs
|
|
|
|
break
|
|
|
|
flag = flag or create_directory(path)
|
|
|
|
filename = random_name()
|
|
|
|
return os.path.join(flag, filename)
|
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
def get_data(self, cr, uid, id, file_node, context=None, fil_obj=None):
|
2009-12-07 13:11:11 +00:00
|
|
|
""" retrieve the contents of some file_node having storage_id = id
|
|
|
|
optionally, fil_obj could point to the browse object of the file
|
|
|
|
(ir.attachment)
|
|
|
|
"""
|
|
|
|
if not context:
|
|
|
|
context = {}
|
2010-02-24 08:54:04 +00:00
|
|
|
boo = self.browse(cr, uid, id, context)
|
2009-12-07 13:11:11 +00:00
|
|
|
if fil_obj:
|
|
|
|
ira = fil_obj
|
|
|
|
else:
|
2010-02-24 08:54:04 +00:00
|
|
|
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
|
|
|
|
return self.__get_data_3(cr, uid, boo, ira, context)
|
|
|
|
|
2010-06-29 14:05:30 +00:00
|
|
|
def get_file(self, cr, uid, id, file_node, mode, context=None):
|
2010-07-08 22:50:42 +00:00
|
|
|
""" Return a file-like object for the contents of some node
|
|
|
|
"""
|
2010-06-29 14:05:30 +00:00
|
|
|
if context is None:
|
|
|
|
context = {}
|
|
|
|
boo = self.browse(cr, uid, id, context)
|
|
|
|
if not boo.online:
|
|
|
|
raise RuntimeError('media offline')
|
|
|
|
|
|
|
|
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
|
|
|
|
if boo.type == 'filestore':
|
|
|
|
if not ira.store_fname:
|
|
|
|
# On a migrated db, some files may have the wrong storage type
|
|
|
|
# try to fix their directory.
|
2010-07-13 20:53:24 +00:00
|
|
|
if mode in ('r','r+'):
|
|
|
|
if ira.file_size:
|
|
|
|
self._doclog.warning( "ir.attachment #%d does not have a filename, but is at filestore, fix it!" % ira.id)
|
|
|
|
raise IOError(errno.ENOENT, 'No file can be located')
|
|
|
|
else:
|
|
|
|
store_fname = self.__get_random_fname(boo.path)
|
|
|
|
cr.execute('UPDATE ir_attachment SET store_fname = %s WHERE id = %s',
|
|
|
|
(store_fname, ira.id))
|
|
|
|
fpath = os.path.join(boo.path, store_fname)
|
|
|
|
else:
|
|
|
|
fpath = os.path.join(boo.path, ira.store_fname)
|
2010-06-29 14:05:30 +00:00
|
|
|
return nodefd_file(file_node, path=fpath, mode=mode)
|
|
|
|
|
|
|
|
elif boo.type == 'db':
|
|
|
|
# TODO: we need a better api for large files
|
2010-07-08 22:50:42 +00:00
|
|
|
return nodefd_db(file_node, ira_browse=ira, mode=mode)
|
|
|
|
|
|
|
|
elif boo.type == 'db64':
|
2010-07-01 17:51:33 +00:00
|
|
|
return nodefd_db64(file_node, ira_browse=ira, mode=mode)
|
2010-06-29 14:05:30 +00:00
|
|
|
|
|
|
|
elif boo.type == 'realstore':
|
|
|
|
if not ira.store_fname:
|
|
|
|
# On a migrated db, some files may have the wrong storage type
|
|
|
|
# try to fix their directory.
|
|
|
|
if ira.file_size:
|
|
|
|
self._doclog.warning("ir.attachment #%d does not have a filename, trying the name." %ira.id)
|
|
|
|
sfname = ira.name
|
|
|
|
fpath = os.path.join(boo.path,ira.store_fname or ira.name)
|
2010-07-13 20:53:24 +00:00
|
|
|
if (not os.path.exists(fpath)) and mode in ('r','r+'):
|
2010-06-29 14:05:30 +00:00
|
|
|
raise IOError("File not found: %s" % fpath)
|
|
|
|
return nodefd_file(file_node, path=fpath, mode=mode)
|
|
|
|
|
2010-07-08 22:50:42 +00:00
|
|
|
elif boo.type == 'virtual':
|
|
|
|
raise ValueError('Virtual storage does not support static files')
|
|
|
|
|
2010-06-29 14:05:30 +00:00
|
|
|
else:
|
|
|
|
raise TypeError("No %s storage" % boo.type)
|
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
def __get_data_3(self, cr, uid, boo, ira, context):
|
2009-12-07 13:11:11 +00:00
|
|
|
if not boo.online:
|
|
|
|
raise RuntimeError('media offline')
|
|
|
|
if boo.type == 'filestore':
|
|
|
|
if not ira.store_fname:
|
|
|
|
# On a migrated db, some files may have the wrong storage type
|
|
|
|
# try to fix their directory.
|
|
|
|
if ira.file_size:
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.warning( "ir.attachment #%d does not have a filename, but is at filestore, fix it!" % ira.id)
|
2009-12-07 13:11:11 +00:00
|
|
|
return None
|
2010-02-24 08:54:04 +00:00
|
|
|
fpath = os.path.join(boo.path, ira.store_fname)
|
|
|
|
return file(fpath, 'rb').read()
|
2010-07-08 22:50:42 +00:00
|
|
|
elif boo.type == 'db64':
|
2009-12-07 13:11:11 +00:00
|
|
|
# TODO: we need a better api for large files
|
|
|
|
if ira.db_datas:
|
2010-02-24 08:54:04 +00:00
|
|
|
out = base64.decodestring(ira.db_datas)
|
2009-12-07 13:11:11 +00:00
|
|
|
else:
|
2010-02-24 08:54:04 +00:00
|
|
|
out = ''
|
2009-12-07 13:11:11 +00:00
|
|
|
return out
|
2010-07-08 22:50:42 +00:00
|
|
|
elif boo.type == 'db':
|
|
|
|
# We do an explicit query, to avoid type transformations.
|
|
|
|
cr.execute('SELECT db_datas FROM ir_attachment WHERE id = %s', (ira.id,))
|
|
|
|
res = cr.fetchone()
|
|
|
|
if res:
|
|
|
|
return res[0]
|
|
|
|
else:
|
|
|
|
return ''
|
2009-12-07 13:11:11 +00:00
|
|
|
elif boo.type == 'realstore':
|
2010-03-16 08:53:40 +00:00
|
|
|
if not ira.store_fname:
|
|
|
|
# On a migrated db, some files may have the wrong storage type
|
|
|
|
# try to fix their directory.
|
|
|
|
if ira.file_size:
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.warning("ir.attachment #%d does not have a filename, trying the name." %ira.id)
|
2010-03-16 08:53:40 +00:00
|
|
|
sfname = ira.name
|
|
|
|
fpath = os.path.join(boo.path,ira.store_fname or ira.name)
|
|
|
|
if os.path.exists(fpath):
|
|
|
|
return file(fpath,'rb').read()
|
|
|
|
elif not ira.store_fname:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
raise IOError("File not found: %s" % fpath)
|
2010-07-08 22:50:42 +00:00
|
|
|
|
|
|
|
elif boo.type == 'virtual':
|
|
|
|
raise ValueError('Virtual storage does not support static files')
|
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
else:
|
|
|
|
raise TypeError("No %s storage" % boo.type)
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
def set_data(self, cr, uid, id, file_node, data, context=None, fil_obj=None):
|
2009-12-07 13:11:11 +00:00
|
|
|
""" store the data.
|
|
|
|
This function MUST be used from an ir.attachment. It wouldn't make sense
|
|
|
|
to store things persistently for other types (dynamic).
|
|
|
|
"""
|
|
|
|
if not context:
|
|
|
|
context = {}
|
2010-02-24 08:54:04 +00:00
|
|
|
boo = self.browse(cr, uid, id, context)
|
2009-12-07 13:11:11 +00:00
|
|
|
if fil_obj:
|
|
|
|
ira = fil_obj
|
|
|
|
else:
|
2010-02-24 08:54:04 +00:00
|
|
|
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
|
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
if not boo.online:
|
|
|
|
raise RuntimeError('media offline')
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.debug( "Store data for ir.attachment #%d" % ira.id)
|
2009-12-07 13:11:11 +00:00
|
|
|
store_fname = None
|
|
|
|
fname = None
|
|
|
|
if boo.type == 'filestore':
|
|
|
|
path = boo.path
|
|
|
|
try:
|
2010-07-13 20:53:24 +00:00
|
|
|
store_fname = self.__get_random_fname(path)
|
|
|
|
fname = os.path.join(path, store_fname)
|
2010-02-24 08:54:04 +00:00
|
|
|
fp = file(fname, 'wb')
|
2009-12-07 13:11:11 +00:00
|
|
|
fp.write(data)
|
|
|
|
fp.close()
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.debug( "Saved data to %s" % fname)
|
2009-12-07 13:11:11 +00:00
|
|
|
filesize = len(data) # os.stat(fname).st_size
|
2010-07-13 20:53:24 +00:00
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
# TODO Here, an old file would be left hanging.
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2010-06-25 10:29:33 +00:00
|
|
|
except Exception, e:
|
|
|
|
self._doclog.warning( "Couldn't save data to %s", path, exc_info=True)
|
2009-12-07 13:11:11 +00:00
|
|
|
raise except_orm(_('Error!'), str(e))
|
|
|
|
elif boo.type == 'db':
|
|
|
|
filesize = len(data)
|
2010-07-08 22:50:42 +00:00
|
|
|
# will that work for huge data?
|
2010-07-09 08:23:51 +00:00
|
|
|
out = psycopg2.Binary(data)
|
2010-07-08 22:50:42 +00:00
|
|
|
cr.execute('UPDATE ir_attachment SET db_datas = %s WHERE id = %s',
|
2010-07-09 08:23:51 +00:00
|
|
|
(out, file_node.file_id))
|
2010-07-08 22:50:42 +00:00
|
|
|
elif boo.type == 'db64':
|
|
|
|
filesize = len(data)
|
|
|
|
# will that work for huge data?
|
2010-02-24 08:54:04 +00:00
|
|
|
out = base64.encodestring(data)
|
2009-12-07 13:11:11 +00:00
|
|
|
cr.execute('UPDATE ir_attachment SET db_datas = %s WHERE id = %s',
|
2010-02-24 08:54:04 +00:00
|
|
|
(out, file_node.file_id))
|
2010-03-16 08:53:40 +00:00
|
|
|
elif boo.type == 'realstore':
|
|
|
|
try:
|
|
|
|
file_node.fix_ppath(cr, ira)
|
|
|
|
npath = file_node.full_path() or []
|
|
|
|
# npath may contain empty elements, for root directory etc.
|
|
|
|
for i, n in enumerate(npath):
|
|
|
|
if n == None:
|
|
|
|
del npath[i]
|
|
|
|
for n in npath:
|
|
|
|
for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?', '..'):
|
|
|
|
if ch in n:
|
|
|
|
raise ValueError("Invalid char %s in path %s" %(ch, n))
|
|
|
|
dpath = [boo.path,]
|
|
|
|
dpath += npath[:-1]
|
|
|
|
path = os.path.join(*dpath)
|
|
|
|
if not os.path.isdir(path):
|
|
|
|
os.makedirs(path)
|
|
|
|
fname = os.path.join(path, npath[-1])
|
|
|
|
fp = file(fname,'wb')
|
|
|
|
fp.write(data)
|
|
|
|
fp.close()
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.debug("Saved data to %s", fname)
|
2010-03-16 08:53:40 +00:00
|
|
|
filesize = len(data) # os.stat(fname).st_size
|
|
|
|
store_fname = os.path.join(*npath)
|
|
|
|
# TODO Here, an old file would be left hanging.
|
|
|
|
except Exception,e :
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.warning("Couldn't save data:", exc_info=True)
|
2010-03-16 08:53:40 +00:00
|
|
|
raise except_orm(_('Error!'), str(e))
|
2010-07-08 22:50:42 +00:00
|
|
|
|
|
|
|
elif boo.type == 'virtual':
|
|
|
|
raise ValueError('Virtual storage does not support static files')
|
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
else:
|
|
|
|
raise TypeError("No %s storage" % boo.type)
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
# 2nd phase: store the metadata
|
|
|
|
try:
|
|
|
|
icont = ''
|
|
|
|
mime = ira.file_type
|
2010-03-19 08:41:30 +00:00
|
|
|
if not mime:
|
|
|
|
mime = ""
|
2009-12-07 13:11:11 +00:00
|
|
|
try:
|
2010-02-24 08:54:04 +00:00
|
|
|
mime, icont = cntIndex.doIndex(data, ira.datas_fname,
|
|
|
|
ira.file_type or None, fname)
|
2010-06-25 10:29:33 +00:00
|
|
|
except Exception:
|
|
|
|
self._doclog.debug('Cannot index file:', exc_info=True)
|
2009-12-07 13:11:11 +00:00
|
|
|
pass
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2010-07-01 17:51:31 +00:00
|
|
|
try:
|
|
|
|
icont_u = ustr(icont)
|
|
|
|
except UnicodeError:
|
|
|
|
icont_u = ''
|
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
# a hack: /assume/ that the calling write operation will not try
|
|
|
|
# to write the fname and size, and update them in the db concurrently.
|
|
|
|
# We cannot use a write() here, because we are already in one.
|
|
|
|
cr.execute('UPDATE ir_attachment SET store_fname = %s, file_size = %s, index_content = %s, file_type = %s WHERE id = %s',
|
2010-07-01 17:51:31 +00:00
|
|
|
(store_fname, filesize, icont_u, mime, file_node.file_id))
|
2009-12-07 13:11:11 +00:00
|
|
|
file_node.content_length = filesize
|
|
|
|
file_node.content_type = mime
|
|
|
|
return True
|
2010-02-24 08:54:04 +00:00
|
|
|
except Exception, e :
|
2010-07-01 17:51:31 +00:00
|
|
|
self._doclog.warning("Couldn't save data:", exc_info=True)
|
2009-12-07 13:11:11 +00:00
|
|
|
# should we really rollback once we have written the actual data?
|
|
|
|
# at the db case (only), that rollback would be safe
|
|
|
|
raise except_orm(_('Error at doc write!'), str(e))
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
def prepare_unlink(self, cr, uid, storage_bo, fil_bo):
|
2009-12-07 13:11:11 +00:00
|
|
|
""" Before we unlink a file (fil_boo), prepare the list of real
|
|
|
|
files that have to be removed, too. """
|
2010-02-24 08:54:04 +00:00
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
if not storage_bo.online:
|
|
|
|
raise RuntimeError('media offline')
|
2010-02-24 08:54:04 +00:00
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
if storage_bo.type == 'filestore':
|
|
|
|
fname = fil_bo.store_fname
|
|
|
|
if not fname:
|
|
|
|
return None
|
|
|
|
path = storage_bo.path
|
2010-02-24 08:54:04 +00:00
|
|
|
return (storage_bo.id, 'file', os.path.join(path, fname))
|
2010-07-08 22:50:42 +00:00
|
|
|
elif storage_bo.type in ('db', 'db64'):
|
2009-12-07 13:11:11 +00:00
|
|
|
return None
|
2010-03-16 08:53:40 +00:00
|
|
|
elif storage_bo.type == 'realstore':
|
|
|
|
fname = fil_bo.store_fname
|
|
|
|
if not fname:
|
|
|
|
return None
|
|
|
|
path = storage_bo.path
|
2010-06-16 11:51:39 +00:00
|
|
|
return ( storage_bo.id, 'file', os.path.join(path, fname))
|
2009-12-07 13:11:11 +00:00
|
|
|
else:
|
2010-07-01 17:51:33 +00:00
|
|
|
raise TypeError("No %s storage" % storage_bo.type)
|
2009-12-02 05:36:57 +00:00
|
|
|
|
2010-02-24 08:54:04 +00:00
|
|
|
def do_unlink(self, cr, uid, unres):
|
2009-12-07 13:11:11 +00:00
|
|
|
for id, ktype, fname in unres:
|
|
|
|
if ktype == 'file':
|
|
|
|
try:
|
|
|
|
os.unlink(fname)
|
2010-02-24 08:54:04 +00:00
|
|
|
except Exception, e:
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.warning("Could not remove file %s, please remove manually.", fname, exc_info=True)
|
2009-12-07 13:11:11 +00:00
|
|
|
else:
|
2010-06-25 10:29:33 +00:00
|
|
|
self._doclog.warning("Unknown unlink key %s" % ktype)
|
2010-02-24 08:54:04 +00:00
|
|
|
|
2009-12-07 13:11:11 +00:00
|
|
|
return True
|
2010-02-24 08:54:04 +00:00
|
|
|
|
2010-07-01 17:51:33 +00:00
|
|
|
def simple_rename(self, cr, uid, file_node, new_name, context=None):
|
|
|
|
""" A preparation for a file rename.
|
|
|
|
It will not affect the database, but merely check and perhaps
|
|
|
|
rename the realstore file.
|
|
|
|
|
|
|
|
@return the dict of values that can safely be be stored in the db.
|
|
|
|
"""
|
|
|
|
sbro = self.browse(cr, uid, file_node.storage_id, context=context)
|
|
|
|
assert sbro, "The file #%d didn't provide storage" % file_node.file_id
|
|
|
|
|
2010-07-08 22:50:42 +00:00
|
|
|
if sbro.type in ('filestore', 'db', 'db64'):
|
2010-07-01 17:51:33 +00:00
|
|
|
# nothing to do for a rename, allow to change the db field
|
|
|
|
return { 'name': new_name, 'datas_fname': new_name }
|
|
|
|
elif sbro.type == 'realstore':
|
|
|
|
fname = fil_bo.store_fname
|
|
|
|
if not fname:
|
|
|
|
return ValueError("Tried to rename a non-stored file")
|
|
|
|
path = storage_bo.path
|
|
|
|
oldpath = os.path.join(path, fname)
|
|
|
|
|
|
|
|
for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?', '..'):
|
|
|
|
if ch in new_name:
|
|
|
|
raise ValueError("Invalid char %s in name %s" %(ch, new_name))
|
|
|
|
|
|
|
|
file_node.fix_ppath(cr, ira)
|
|
|
|
npath = file_node.full_path() or []
|
|
|
|
dpath = [path,]
|
|
|
|
dpath.extend(npath[:-1])
|
|
|
|
dpath.append(new_name)
|
|
|
|
newpath = os.path.join(*dpath)
|
|
|
|
# print "old, new paths:", oldpath, newpath
|
|
|
|
os.rename(oldpath, newpath)
|
|
|
|
return { 'name': new_name, 'datas_fname': new_name, 'store_fname': new_name }
|
|
|
|
else:
|
|
|
|
raise TypeError("No %s storage" % boo.type)
|
|
|
|
|
2010-07-09 08:23:41 +00:00
|
|
|
def simple_move(self, cr, uid, file_node, ndir_bro, context=None):
|
|
|
|
""" A preparation for a file move.
|
|
|
|
It will not affect the database, but merely check and perhaps
|
|
|
|
move the realstore file.
|
|
|
|
|
|
|
|
@param ndir_bro a browse object of document.directory, where this
|
|
|
|
file should move to.
|
|
|
|
@return the dict of values that can safely be be stored in the db.
|
|
|
|
"""
|
|
|
|
sbro = self.browse(cr, uid, file_node.storage_id, context=context)
|
|
|
|
assert sbro, "The file #%d didn't provide storage" % file_node.file_id
|
|
|
|
|
|
|
|
par = ndir_bro
|
|
|
|
psto = None
|
|
|
|
while par:
|
|
|
|
if par.storage_id:
|
|
|
|
psto = par.storage_id.id
|
|
|
|
break
|
|
|
|
par = par.parent_id
|
|
|
|
if file_node.storage_id != psto:
|
|
|
|
self._doclog.debug('Cannot move file %r from %r to %r', file_node, file_node.parent, ndir_bro.name)
|
|
|
|
raise NotImplementedError('Cannot move files between storage media')
|
|
|
|
|
|
|
|
if sbro.type in ('filestore', 'db', 'db64'):
|
|
|
|
# nothing to do for a rename, allow to change the db field
|
|
|
|
return { 'parent_id': ndir_bro.id }
|
|
|
|
elif sbro.type == 'realstore':
|
|
|
|
raise NotImplementedError("Cannot move in realstore, yet") # TODO
|
|
|
|
fname = fil_bo.store_fname
|
|
|
|
if not fname:
|
|
|
|
return ValueError("Tried to rename a non-stored file")
|
|
|
|
path = storage_bo.path
|
|
|
|
oldpath = os.path.join(path, fname)
|
|
|
|
|
|
|
|
for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?', '..'):
|
|
|
|
if ch in new_name:
|
|
|
|
raise ValueError("Invalid char %s in name %s" %(ch, new_name))
|
|
|
|
|
|
|
|
file_node.fix_ppath(cr, ira)
|
|
|
|
npath = file_node.full_path() or []
|
|
|
|
dpath = [path,]
|
|
|
|
dpath.extend(npath[:-1])
|
|
|
|
dpath.append(new_name)
|
|
|
|
newpath = os.path.join(*dpath)
|
|
|
|
# print "old, new paths:", oldpath, newpath
|
|
|
|
os.rename(oldpath, newpath)
|
|
|
|
return { 'name': new_name, 'datas_fname': new_name, 'store_fname': new_name }
|
|
|
|
else:
|
|
|
|
raise TypeError("No %s storage" % boo.type)
|
|
|
|
|
2009-12-02 05:36:57 +00:00
|
|
|
|
|
|
|
document_storage()
|
|
|
|
|
|
|
|
|
|
|
|
#eof
|