odoo/addons/document/document.py

274 lines
12 KiB
Python

# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from osv import osv, fields
import os
# from psycopg2 import Binary
#from tools import config
import tools
from tools.translate import _
import nodes
import logging
DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
class document_file(osv.osv):
_inherit = 'ir.attachment'
_rec_name = 'datas_fname'
def _get_filestore(self, cr):
return os.path.join(DMS_ROOT_PATH, cr.dbname)
def _data_get(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
fbrl = self.browse(cr, uid, ids, context=context)
nctx = nodes.get_node_context(cr, uid, context={})
# nctx will /not/ inherit the caller's context. Most of
# it would be useless, anyway (like active_id, active_model,
# bin_size etc.)
result = {}
bin_size = context.get('bin_size', False)
for fbro in fbrl:
fnode = nodes.node_file(None, None, nctx, fbro)
if not bin_size:
data = fnode.get_data(cr, fbro)
result[fbro.id] = base64.encodestring(data or '')
else:
result[fbro.id] = fnode.get_data_len(cr, fbro)
return result
#
# This code can be improved
#
def _data_set(self, cr, uid, id, name, value, arg, context=None):
if not value:
return True
fbro = self.browse(cr, uid, id, context=context)
nctx = nodes.get_node_context(cr, uid, context={})
fnode = nodes.node_file(None, None, nctx, fbro)
res = fnode.set_data(cr, base64.decodestring(value), fbro)
return res
_columns = {
# Columns from ir.attachment:
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
'res_id': fields.integer('Attached ID', readonly=True),
# If ir.attachment contained any data before document is installed, preserve
# the data, don't drop the column!
'db_datas': fields.binary('Data', oldname='datas'),
'datas': fields.function(_data_get, method=True, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
# Fields of document:
'user_id': fields.many2one('res.users', 'Owner', select=1),
# 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
# the directory id now is mandatory. It can still be computed automatically.
'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
'index_content': fields.text('Indexed Content'),
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'file_size': fields.integer('File Size', required=True),
'file_type': fields.char('Content Type', size=128),
# fields used for file storage
'store_fname': fields.char('Stored Filename', size=200),
}
_order = "create_date desc"
def __get_def_directory(self, cr, uid, context=None):
dirobj = self.pool.get('document.directory')
return dirobj._get_root_directory(cr, uid, context)
_defaults = {
'user_id': lambda self, cr, uid, ctx:uid,
'file_size': lambda self, cr, uid, ctx:0,
'parent_id': __get_def_directory
}
_sql_constraints = [
# filename_uniq is not possible in pure SQL
]
def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
name = vals.get('name', False)
parent_id = vals.get('parent_id', False)
res_model = vals.get('res_model', False)
res_id = vals.get('res_id', 0)
if op == 'write':
for file in self.browse(cr, uid, ids): # FIXME fields_only
if not name:
name = file.name
if not parent_id:
parent_id = file.parent_id and file.parent_id.id or False
if not res_model:
res_model = file.res_model and file.res_model or False
if not res_id:
res_id = file.res_id and file.res_id or 0
res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
if len(res):
return False
if op == 'create':
res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
if len(res):
return False
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if 'name' not in default:
name = self.read(cr, uid, [id])[0]['name']
default.update({'name': name + " (copy)"})
return super(document_file, self).copy(cr, uid, id, default, context=context)
def write(self, cr, uid, ids, vals, context=None):
result = False
if not isinstance(ids, list):
ids = [ids]
res = self.search(cr, uid, [('id', 'in', ids)])
if not len(res):
return False
if not self._check_duplication(cr, uid, vals, ids, 'write'):
raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
# if nodes call this write(), they must skip the code below
from_node = context and context.get('__from_node', False)
if (('parent_id' in vals) or ('name' in vals)) and not from_node:
# perhaps this file is renaming or changing directory
nctx = nodes.get_node_context(cr,uid,context={})
dirobj = self.pool.get('document.directory')
if 'parent_id' in vals:
dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
dnode = nctx.get_dir_node(cr, dbro)
else:
dbro = None
dnode = None
ids2 = []
for fbro in self.browse(cr, uid, ids, context=context):
if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
and ('name' not in vals or fbro.name == vals['name']) :
ids2.append(fbro.id)
continue
fnode = nctx.get_file_node(cr, fbro)
res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
if isinstance(res, dict):
vals2 = vals.copy()
vals2.update(res)
wid = res.get('id', fbro.id)
result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
# TODO: how to handle/merge several results?
elif res == True:
ids2.append(fbro.id)
elif res == False:
pass
ids = ids2
if 'file_size' in vals: # only write that field using direct SQL calls
del vals['file_size']
if len(ids) and len(vals):
result = super(document_file,self).write(cr, uid, ids, vals, context=context)
cr.commit() # ?
return result
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
if not vals['parent_id']:
vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
if not vals.get('res_id', False) and context.get('default_res_id', False):
vals['res_id'] = context.get('default_res_id', False)
if not vals.get('res_model', False) and context.get('default_res_model', False):
vals['res_model'] = context.get('default_res_model', False)
if vals.get('res_id', False) and vals.get('res_model', False) \
and not vals.get('partner_id', False):
vals['partner_id'] = self.__get_partner_id(cr, uid, \
vals['res_model'], vals['res_id'], context)
datas = None
if vals.get('link', False) :
import urllib
datas = base64.encodestring(urllib.urlopen(vals['link']).read())
else:
datas = vals.get('datas', False)
if datas:
vals['file_size'] = len(datas)
else:
if vals.get('file_size'):
del vals['file_size']
if not self._check_duplication(cr, uid, vals):
raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
result = super(document_file, self).create(cr, uid, vals, context)
cr.commit() # ?
return result
def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record.
"""
obj_model = self.pool.get(res_model)
if obj_model._name == 'res.partner':
return res_id
elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
bro = obj_model.browse(cr, uid, res_id, context=context)
return bro.partner_id.id
elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
bro = obj_model.browse(cr, uid, res_id, context=context)
return bro.address_id.partner_id.id
return False
def unlink(self, cr, uid, ids, context=None):
stor = self.pool.get('document.storage')
unres = []
# We have to do the unlink in 2 stages: prepare a list of actual
# files to be unlinked, update the db (safer to do first, can be
# rolled back) and then unlink the files. The list wouldn't exist
# after we discard the objects
ids = self.search(cr, uid, [('id','in',ids)])
for f in self.browse(cr, uid, ids, context=context):
# TODO: update the node cache
par = f.parent_id
storage_id = None
while par:
if par.storage_id:
storage_id = par.storage_id
break
par = par.parent_id
#assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
if storage_id:
r = stor.prepare_unlink(cr, uid, storage_id, f)
if r:
unres.append(r)
else:
logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
f.id, f.name)
res = super(document_file, self).unlink(cr, uid, ids, context)
stor.do_unlink(cr, uid, unres)
return res
document_file()