2009-10-20 10:52:23 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2006-12-07 13:41:40 +00:00
|
|
|
##############################################################################
|
2010-05-27 12:43:11 +00:00
|
|
|
#
|
2009-10-14 12:32:15 +00:00
|
|
|
# OpenERP, Open Source Management Solution
|
|
|
|
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
2008-06-16 11:00:21 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# This program is free software: you can redistribute it and/or modify
|
2009-10-14 12:32:15 +00:00
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2009-10-14 12:32:15 +00:00
|
|
|
# GNU Affero General Public License for more details.
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2009-10-14 12:32:15 +00:00
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
2010-05-27 12:43:11 +00:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
|
|
|
##############################################################################
|
|
|
|
|
2012-12-16 19:03:17 +00:00
|
|
|
import hashlib
|
2011-02-18 13:14:54 +00:00
|
|
|
import itertools
|
2013-01-31 14:53:48 +00:00
|
|
|
import logging
|
2012-12-16 19:03:17 +00:00
|
|
|
import os
|
|
|
|
import re
|
2011-02-18 13:14:54 +00:00
|
|
|
|
2012-12-20 02:37:21 +00:00
|
|
|
from openerp import tools
|
2014-04-10 15:20:39 +00:00
|
|
|
from openerp.tools.translate import _
|
|
|
|
from openerp.exceptions import AccessError
|
2012-12-10 15:27:23 +00:00
|
|
|
from openerp.osv import fields,osv
|
2013-10-29 17:14:20 +00:00
|
|
|
from openerp import SUPERUSER_ID
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2013-01-31 14:45:25 +00:00
|
|
|
_logger = logging.getLogger(__name__)
|
|
|
|
|
2006-12-07 13:41:40 +00:00
|
|
|
class ir_attachment(osv.osv):
|
2012-06-07 21:35:37 +00:00
|
|
|
"""Attachments are used to link binary files or url to any openerp document.
|
|
|
|
|
|
|
|
External attachment storage
|
|
|
|
---------------------------
|
|
|
|
|
|
|
|
The 'data' function field (_data_get,data_set) is implemented using
|
|
|
|
_file_read, _file_write and _file_delete which can be overridden to
|
|
|
|
implement other storage engines, shuch methods should check for other
|
|
|
|
location pseudo uri (example: hdfs://hadoppserver)
|
|
|
|
|
|
|
|
The default implementation is the file:dirname location that stores files
|
|
|
|
on the local filesystem using name based on their sha1 hash
|
|
|
|
"""
|
2013-09-22 09:31:13 +00:00
|
|
|
_order = 'id desc'
|
2012-06-02 16:25:23 +00:00
|
|
|
def _name_get_resname(self, cr, uid, ids, object, method, context):
|
|
|
|
data = {}
|
|
|
|
for attachment in self.browse(cr, uid, ids, context=context):
|
|
|
|
model_object = attachment.res_model
|
|
|
|
res_id = attachment.res_id
|
|
|
|
if model_object and res_id:
|
2013-03-29 14:07:23 +00:00
|
|
|
model_pool = self.pool[model_object]
|
2012-06-02 16:25:23 +00:00
|
|
|
res = model_pool.name_get(cr,uid,[res_id],context)
|
|
|
|
res_name = res and res[0][1] or False
|
|
|
|
if res_name:
|
|
|
|
field = self._columns.get('res_name',False)
|
|
|
|
if field and len(res_name) > field.size:
|
2014-05-21 09:52:05 +00:00
|
|
|
res_name = res_name[:30] + '...'
|
2012-06-02 16:25:23 +00:00
|
|
|
data[attachment.id] = res_name
|
|
|
|
else:
|
|
|
|
data[attachment.id] = False
|
|
|
|
return data
|
|
|
|
|
2014-01-16 18:54:15 +00:00
|
|
|
def _storage(self, cr, uid, context=None):
|
|
|
|
return self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'ir_attachment.location', 'file')
|
|
|
|
|
|
|
|
@tools.ormcache()
|
|
|
|
def _filestore(self, cr, uid, context=None):
|
2014-03-21 15:56:59 +00:00
|
|
|
return tools.config.filestore(cr.dbname)
|
2014-01-16 18:54:15 +00:00
|
|
|
|
2014-04-10 15:20:39 +00:00
|
|
|
def force_storage(self, cr, uid, context=None):
|
|
|
|
"""Force all attachments to be stored in the currently configured storage"""
|
|
|
|
if not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
|
|
|
|
raise AccessError(_('Only administrators can execute this action.'))
|
|
|
|
|
|
|
|
location = self._storage(cr, uid, context)
|
|
|
|
domain = {
|
|
|
|
'db': [('store_fname', '!=', False)],
|
|
|
|
'file': [('db_datas', '!=', False)],
|
|
|
|
}[location]
|
|
|
|
|
|
|
|
ids = self.search(cr, uid, domain, context=context)
|
|
|
|
for attach in self.browse(cr, uid, ids, context=context):
|
|
|
|
attach.write({'datas': attach.datas})
|
|
|
|
return True
|
|
|
|
|
2012-06-07 21:35:37 +00:00
|
|
|
# 'data' field implementation
|
2014-04-10 15:20:39 +00:00
|
|
|
def _full_path(self, cr, uid, path):
|
2014-01-16 18:54:15 +00:00
|
|
|
# sanitize ath
|
|
|
|
path = re.sub('[.]', '', path)
|
|
|
|
path = path.strip('/\\')
|
|
|
|
return os.path.join(self._filestore(cr, uid), path)
|
2012-06-07 21:35:37 +00:00
|
|
|
|
2014-04-10 15:20:39 +00:00
|
|
|
def _get_path(self, cr, uid, bin_data):
|
2014-01-16 18:54:15 +00:00
|
|
|
sha = hashlib.sha1(bin_data).hexdigest()
|
2012-06-07 21:35:37 +00:00
|
|
|
|
2014-01-16 18:54:15 +00:00
|
|
|
# retro compatibility
|
|
|
|
fname = sha[:3] + '/' + sha
|
2014-04-10 15:20:39 +00:00
|
|
|
full_path = self._full_path(cr, uid, fname)
|
2014-01-16 18:54:15 +00:00
|
|
|
if os.path.isfile(full_path):
|
|
|
|
return fname, full_path # keep existing path
|
|
|
|
|
|
|
|
# scatter files across 256 dirs
|
|
|
|
# we use '/' in the db (even on windows)
|
|
|
|
fname = sha[:2] + '/' + sha
|
2014-04-10 15:20:39 +00:00
|
|
|
full_path = self._full_path(cr, uid, fname)
|
2014-01-16 18:54:15 +00:00
|
|
|
dirname = os.path.dirname(full_path)
|
|
|
|
if not os.path.isdir(dirname):
|
|
|
|
os.makedirs(dirname)
|
|
|
|
return fname, full_path
|
2012-06-07 21:35:37 +00:00
|
|
|
|
2014-04-10 15:20:39 +00:00
|
|
|
def _file_read(self, cr, uid, fname, bin_size=False):
|
|
|
|
full_path = self._full_path(cr, uid, fname)
|
2012-06-07 21:35:37 +00:00
|
|
|
r = ''
|
|
|
|
try:
|
|
|
|
if bin_size:
|
2012-12-20 02:37:21 +00:00
|
|
|
r = os.path.getsize(full_path)
|
2012-06-07 21:35:37 +00:00
|
|
|
else:
|
2013-02-27 22:19:43 +00:00
|
|
|
r = open(full_path,'rb').read().encode('base64')
|
2012-06-07 21:35:37 +00:00
|
|
|
except IOError:
|
|
|
|
_logger.error("_read_file reading %s",full_path)
|
|
|
|
return r
|
|
|
|
|
2014-04-10 15:20:39 +00:00
|
|
|
def _file_write(self, cr, uid, value):
|
2012-06-07 21:35:37 +00:00
|
|
|
bin_value = value.decode('base64')
|
2014-04-10 15:20:39 +00:00
|
|
|
fname, full_path = self._get_path(cr, uid, bin_value)
|
2014-01-16 18:54:15 +00:00
|
|
|
if not os.path.exists(full_path):
|
|
|
|
try:
|
|
|
|
with open(full_path, 'wb') as fp:
|
|
|
|
fp.write(bin_value)
|
|
|
|
except IOError:
|
|
|
|
_logger.error("_file_write writing %s", full_path)
|
2012-06-07 21:35:37 +00:00
|
|
|
return fname
|
|
|
|
|
2014-04-10 15:20:39 +00:00
|
|
|
def _file_delete(self, cr, uid, fname):
|
2012-06-07 21:35:37 +00:00
|
|
|
count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
|
2014-04-10 15:20:39 +00:00
|
|
|
full_path = self._full_path(cr, uid, fname)
|
|
|
|
if count <= 1 and os.path.exists(full_path):
|
2012-06-07 21:35:37 +00:00
|
|
|
try:
|
|
|
|
os.unlink(full_path)
|
2013-01-14 17:43:55 +00:00
|
|
|
except OSError:
|
|
|
|
_logger.error("_file_delete could not unlink %s",full_path)
|
2012-06-07 21:35:37 +00:00
|
|
|
except IOError:
|
2012-12-16 19:03:17 +00:00
|
|
|
# Harmless and needed for race conditions
|
2012-06-07 21:35:37 +00:00
|
|
|
_logger.error("_file_delete could not unlink %s",full_path)
|
|
|
|
|
|
|
|
def _data_get(self, cr, uid, ids, name, arg, context=None):
|
|
|
|
if context is None:
|
|
|
|
context = {}
|
|
|
|
result = {}
|
2012-12-16 19:03:17 +00:00
|
|
|
bin_size = context.get('bin_size')
|
2012-12-16 17:38:56 +00:00
|
|
|
for attach in self.browse(cr, uid, ids, context=context):
|
2014-04-10 15:20:39 +00:00
|
|
|
if attach.store_fname:
|
|
|
|
result[attach.id] = self._file_read(cr, uid, attach.store_fname, bin_size)
|
2012-06-07 21:35:37 +00:00
|
|
|
else:
|
2012-12-16 19:03:17 +00:00
|
|
|
result[attach.id] = attach.db_datas
|
2012-06-07 21:35:37 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
def _data_set(self, cr, uid, id, name, value, arg, context=None):
|
2012-12-16 12:25:55 +00:00
|
|
|
# We dont handle setting data to null
|
2012-06-07 21:35:37 +00:00
|
|
|
if not value:
|
|
|
|
return True
|
|
|
|
if context is None:
|
|
|
|
context = {}
|
2014-01-16 18:54:15 +00:00
|
|
|
location = self._storage(cr, uid, context)
|
2012-12-17 01:16:01 +00:00
|
|
|
file_size = len(value.decode('base64'))
|
2014-04-10 15:20:39 +00:00
|
|
|
attach = self.browse(cr, uid, id, context=context)
|
|
|
|
if attach.store_fname:
|
|
|
|
self._file_delete(cr, uid, attach.store_fname)
|
2014-01-16 22:40:58 +00:00
|
|
|
if location != 'db':
|
2014-04-10 15:20:39 +00:00
|
|
|
fname = self._file_write(cr, uid, value)
|
2013-10-30 08:44:08 +00:00
|
|
|
# SUPERUSER_ID as probably don't have write access, trigger during create
|
2014-04-10 15:20:39 +00:00
|
|
|
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size, 'db_datas': False}, context=context)
|
2012-06-07 21:35:37 +00:00
|
|
|
else:
|
2014-04-10 15:20:39 +00:00
|
|
|
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size, 'store_fname': False}, context=context)
|
2012-06-07 21:35:37 +00:00
|
|
|
return True
|
|
|
|
|
2012-06-02 16:25:23 +00:00
|
|
|
_name = 'ir.attachment'
|
|
|
|
_columns = {
|
2014-05-21 09:52:05 +00:00
|
|
|
'name': fields.char('Attachment Name', required=True),
|
|
|
|
'datas_fname': fields.char('File Name'),
|
2012-06-02 16:25:23 +00:00
|
|
|
'description': fields.text('Description'),
|
2014-05-21 09:52:05 +00:00
|
|
|
'res_name': fields.function(_name_get_resname, type='char', string='Resource Name', store=True),
|
|
|
|
'res_model': fields.char('Resource Model', readonly=True, help="The database object this attachment will be attached to"),
|
2012-06-07 21:35:37 +00:00
|
|
|
'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
|
2012-06-02 16:25:23 +00:00
|
|
|
'create_date': fields.datetime('Date Created', readonly=True),
|
|
|
|
'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
|
|
|
|
'company_id': fields.many2one('res.company', 'Company', change_default=True),
|
2012-06-07 21:35:37 +00:00
|
|
|
'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
|
|
|
|
'Type', help="Binary File or URL", required=True, change_default=True),
|
|
|
|
'url': fields.char('Url', size=1024),
|
2012-12-17 01:16:01 +00:00
|
|
|
# al: We keep shitty field names for backward compatibility with document
|
2012-06-07 21:35:37 +00:00
|
|
|
'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
|
2014-05-21 09:52:05 +00:00
|
|
|
'store_fname': fields.char('Stored Filename'),
|
2012-06-07 21:35:37 +00:00
|
|
|
'db_datas': fields.binary('Database Data'),
|
2012-12-17 01:16:01 +00:00
|
|
|
'file_size': fields.integer('File Size'),
|
2012-06-02 16:25:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_defaults = {
|
|
|
|
'type': 'binary',
|
2012-12-17 01:16:01 +00:00
|
|
|
'file_size': 0,
|
2012-06-02 16:25:23 +00:00
|
|
|
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
|
|
|
|
}
|
|
|
|
|
|
|
|
def _auto_init(self, cr, context=None):
|
|
|
|
super(ir_attachment, self)._auto_init(cr, context)
|
|
|
|
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
|
|
|
|
if not cr.fetchone():
|
|
|
|
cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
|
|
|
|
cr.commit()
|
|
|
|
|
2010-11-23 15:42:12 +00:00
|
|
|
def check(self, cr, uid, ids, mode, context=None, values=None):
|
|
|
|
"""Restricts the access to an ir.attachment, according to referred model
|
|
|
|
In the 'document' module, it is overriden to relax this hard rule, since
|
|
|
|
more complex ones apply there.
|
|
|
|
"""
|
|
|
|
res_ids = {}
|
|
|
|
if ids:
|
|
|
|
if isinstance(ids, (int, long)):
|
|
|
|
ids = [ids]
|
2010-11-23 15:58:20 +00:00
|
|
|
cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
|
2010-11-23 15:42:12 +00:00
|
|
|
for rmod, rid in cr.fetchall():
|
|
|
|
if not (rmod and rid):
|
|
|
|
continue
|
2011-02-18 13:14:54 +00:00
|
|
|
res_ids.setdefault(rmod,set()).add(rid)
|
2010-11-23 15:42:12 +00:00
|
|
|
if values:
|
2013-11-19 14:02:46 +00:00
|
|
|
if values.get('res_model') and values.get('res_id'):
|
2011-02-18 13:14:54 +00:00
|
|
|
res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
|
2011-01-25 14:23:29 +00:00
|
|
|
|
2011-08-24 21:19:48 +00:00
|
|
|
ima = self.pool.get('ir.model.access')
|
2010-11-23 15:42:12 +00:00
|
|
|
for model, mids in res_ids.items():
|
2011-01-27 15:51:58 +00:00
|
|
|
# ignore attachments that are not attached to a resource anymore when checking access rights
|
|
|
|
# (resource was deleted but attachment was not)
|
2013-03-29 14:07:23 +00:00
|
|
|
mids = self.pool[model].exists(cr, uid, mids)
|
2011-08-24 21:19:48 +00:00
|
|
|
ima.check(cr, uid, model, mode)
|
2013-03-29 14:07:23 +00:00
|
|
|
self.pool[model].check_access_rule(cr, uid, mids, mode, context=context)
|
2008-12-13 06:01:18 +00:00
|
|
|
|
2012-03-09 10:04:29 +00:00
|
|
|
def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
|
|
|
|
ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
|
|
|
|
limit=limit, order=order,
|
2012-10-24 12:47:09 +00:00
|
|
|
context=context, count=False,
|
2012-03-09 10:04:29 +00:00
|
|
|
access_rights_uid=access_rights_uid)
|
2008-10-16 18:28:16 +00:00
|
|
|
if not ids:
|
2008-10-20 12:00:08 +00:00
|
|
|
if count:
|
|
|
|
return 0
|
2008-10-16 18:28:16 +00:00
|
|
|
return []
|
2008-10-20 12:00:08 +00:00
|
|
|
|
2012-08-21 13:02:02 +00:00
|
|
|
# Work with a set, as list.remove() is prohibitive for large lists of documents
|
|
|
|
# (takes 20+ seconds on a db with 100k docs during search_count()!)
|
2012-08-21 14:16:05 +00:00
|
|
|
orig_ids = ids
|
2012-08-21 13:02:02 +00:00
|
|
|
ids = set(ids)
|
|
|
|
|
2011-02-18 13:14:54 +00:00
|
|
|
# For attachments, the permissions of the document they are attached to
|
|
|
|
# apply, so we must remove attachments for which the user cannot access
|
|
|
|
# the linked document.
|
2012-08-21 13:02:02 +00:00
|
|
|
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
|
|
|
|
# and the permissions are checked in super() and below anyway.
|
|
|
|
cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
|
|
|
|
targets = cr.dictfetchall()
|
2011-02-18 13:14:54 +00:00
|
|
|
model_attachments = {}
|
|
|
|
for target_dict in targets:
|
2013-07-24 13:09:36 +00:00
|
|
|
if not target_dict['res_model']:
|
2011-02-18 13:14:54 +00:00
|
|
|
continue
|
|
|
|
# model_attachments = { 'model': { 'res_id': [id1,id2] } }
|
2013-07-24 13:09:36 +00:00
|
|
|
model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'] or 0, set()).add(target_dict['id'])
|
2011-02-18 13:14:54 +00:00
|
|
|
|
|
|
|
# To avoid multiple queries for each attachment found, checks are
|
|
|
|
# performed in batch as much as possible.
|
|
|
|
ima = self.pool.get('ir.model.access')
|
|
|
|
for model, targets in model_attachments.iteritems():
|
2013-12-06 17:36:02 +00:00
|
|
|
if not self.pool.get(model):
|
2013-12-06 16:23:14 +00:00
|
|
|
continue
|
2011-08-24 22:03:24 +00:00
|
|
|
if not ima.check(cr, uid, model, 'read', False):
|
2011-02-18 13:14:54 +00:00
|
|
|
# remove all corresponding attachment ids
|
|
|
|
for attach_id in itertools.chain(*targets.values()):
|
|
|
|
ids.remove(attach_id)
|
|
|
|
continue # skip ir.rule processing, these ones are out already
|
|
|
|
|
|
|
|
# filter ids according to what access rules permit
|
|
|
|
target_ids = targets.keys()
|
2013-07-24 13:09:36 +00:00
|
|
|
allowed_ids = [0] + self.pool[model].search(cr, uid, [('id', 'in', target_ids)], context=context)
|
2011-02-18 13:14:54 +00:00
|
|
|
disallowed_ids = set(target_ids).difference(allowed_ids)
|
|
|
|
for res_id in disallowed_ids:
|
|
|
|
for attach_id in targets[res_id]:
|
|
|
|
ids.remove(attach_id)
|
2012-08-21 13:02:02 +00:00
|
|
|
|
2012-08-21 14:16:05 +00:00
|
|
|
# sort result according to the original sort ordering
|
|
|
|
result = [id for id in orig_ids if id in ids]
|
|
|
|
return len(result) if count else list(result)
|
2008-10-16 18:28:16 +00:00
|
|
|
|
2009-08-10 16:24:09 +00:00
|
|
|
def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
|
2013-10-16 10:58:12 +00:00
|
|
|
if isinstance(ids, (int, long)):
|
|
|
|
ids = [ids]
|
2009-08-10 16:24:09 +00:00
|
|
|
self.check(cr, uid, ids, 'read', context=context)
|
|
|
|
return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
|
2008-10-16 18:28:16 +00:00
|
|
|
|
2009-08-10 16:24:09 +00:00
|
|
|
def write(self, cr, uid, ids, vals, context=None):
|
2013-10-16 10:58:12 +00:00
|
|
|
if isinstance(ids, (int, long)):
|
|
|
|
ids = [ids]
|
2010-11-23 15:42:12 +00:00
|
|
|
self.check(cr, uid, ids, 'write', context=context, values=vals)
|
2012-12-17 01:16:01 +00:00
|
|
|
if 'file_size' in vals:
|
|
|
|
del vals['file_size']
|
2009-08-10 16:24:09 +00:00
|
|
|
return super(ir_attachment, self).write(cr, uid, ids, vals, context)
|
2010-05-27 12:43:11 +00:00
|
|
|
|
2009-08-10 16:24:09 +00:00
|
|
|
def copy(self, cr, uid, id, default=None, context=None):
|
|
|
|
self.check(cr, uid, [id], 'write', context=context)
|
|
|
|
return super(ir_attachment, self).copy(cr, uid, id, default, context)
|
2008-10-16 18:28:16 +00:00
|
|
|
|
2009-08-10 16:24:09 +00:00
|
|
|
def unlink(self, cr, uid, ids, context=None):
|
2013-10-16 10:58:12 +00:00
|
|
|
if isinstance(ids, (int, long)):
|
|
|
|
ids = [ids]
|
2009-08-10 16:24:09 +00:00
|
|
|
self.check(cr, uid, ids, 'unlink', context=context)
|
2014-04-10 15:20:39 +00:00
|
|
|
for attach in self.browse(cr, uid, ids, context=context):
|
|
|
|
if attach.store_fname:
|
|
|
|
self._file_delete(cr, uid, attach.store_fname)
|
2009-08-10 16:24:09 +00:00
|
|
|
return super(ir_attachment, self).unlink(cr, uid, ids, context)
|
2008-10-16 18:28:16 +00:00
|
|
|
|
2009-08-10 16:24:09 +00:00
|
|
|
def create(self, cr, uid, values, context=None):
|
2013-10-29 17:14:20 +00:00
|
|
|
self.check(cr, uid, [], mode='write', context=context, values=values)
|
2012-12-17 01:16:01 +00:00
|
|
|
if 'file_size' in values:
|
|
|
|
del values['file_size']
|
2009-08-10 16:24:09 +00:00
|
|
|
return super(ir_attachment, self).create(cr, uid, values, context)
|
2008-10-16 18:28:16 +00:00
|
|
|
|
2008-11-12 14:22:25 +00:00
|
|
|
def action_get(self, cr, uid, context=None):
|
2010-11-25 16:57:20 +00:00
|
|
|
return self.pool.get('ir.actions.act_window').for_xml_id(
|
|
|
|
cr, uid, 'base', 'action_attachment', context=context)
|
2010-05-27 12:43:11 +00:00
|
|
|
|
2008-07-23 15:01:27 +00:00
|
|
|
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|