2013-07-26 12:33:17 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2014-03-04 13:31:17 +00:00
|
|
|
import hashlib
|
2013-11-08 13:26:15 +00:00
|
|
|
import inspect
|
2014-01-28 14:00:17 +00:00
|
|
|
import itertools
|
2013-11-04 11:10:21 +00:00
|
|
|
import logging
|
|
|
|
import math
|
2014-03-25 16:04:13 +00:00
|
|
|
import mimetypes
|
2014-01-28 14:00:17 +00:00
|
|
|
import re
|
2013-11-06 15:17:52 +00:00
|
|
|
import urlparse
|
2013-11-04 11:10:21 +00:00
|
|
|
|
|
|
|
import werkzeug
|
|
|
|
import werkzeug.exceptions
|
2014-03-04 11:07:16 +00:00
|
|
|
import werkzeug.utils
|
2013-11-04 11:10:21 +00:00
|
|
|
import werkzeug.wrappers
|
2014-01-28 14:00:17 +00:00
|
|
|
# optional python-slugify import (https://github.com/un33k/python-slugify)
|
|
|
|
try:
|
|
|
|
import slugify as slugify_lib
|
|
|
|
except ImportError:
|
|
|
|
slugify_lib = None
|
2013-07-26 12:33:17 +00:00
|
|
|
|
|
|
|
import openerp
|
2013-12-02 12:56:52 +00:00
|
|
|
from openerp.osv import orm, osv, fields
|
2013-11-04 11:10:21 +00:00
|
|
|
from openerp.tools.safe_eval import safe_eval
|
2014-02-19 10:30:32 +00:00
|
|
|
from openerp.addons.web.http import request
|
2013-11-08 13:26:15 +00:00
|
|
|
|
2013-08-14 15:11:22 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
2013-07-26 12:33:17 +00:00
|
|
|
|
2014-01-28 20:14:17 +00:00
|
|
|
def url_for(path_or_uri, lang=None):
|
2014-02-06 10:39:29 +00:00
|
|
|
if isinstance(path_or_uri, unicode):
|
|
|
|
path_or_uri = path_or_uri.encode('utf-8')
|
|
|
|
current_path = request.httprequest.path
|
|
|
|
if isinstance(current_path, unicode):
|
|
|
|
current_path = current_path.encode('utf-8')
|
2013-11-06 15:17:52 +00:00
|
|
|
location = path_or_uri.strip()
|
2014-01-30 19:50:43 +00:00
|
|
|
force_lang = lang is not None
|
2013-11-06 15:17:52 +00:00
|
|
|
url = urlparse.urlparse(location)
|
2014-01-30 19:50:43 +00:00
|
|
|
|
|
|
|
if request and not url.netloc and not url.scheme and (url.path or force_lang):
|
2014-02-06 10:39:29 +00:00
|
|
|
location = urlparse.urljoin(current_path, location)
|
|
|
|
|
2013-11-27 17:04:32 +00:00
|
|
|
lang = lang or request.context.get('lang')
|
2013-12-02 17:21:10 +00:00
|
|
|
langs = [lg[0] for lg in request.website.get_languages()]
|
2014-01-30 19:50:43 +00:00
|
|
|
|
|
|
|
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
|
|
|
|
ps = location.split('/')
|
|
|
|
if ps[1] in langs:
|
|
|
|
# Replace the language only if we explicitly provide a language to url_for
|
|
|
|
if force_lang:
|
2014-01-28 20:14:17 +00:00
|
|
|
ps[1] = lang
|
2014-01-30 19:50:43 +00:00
|
|
|
# Remove the default language unless it's explicitly provided
|
|
|
|
elif ps[1] == request.website.default_lang_code:
|
|
|
|
ps.pop(1)
|
|
|
|
# Insert the context language or the provided language
|
|
|
|
elif lang != request.website.default_lang_code or force_lang:
|
|
|
|
ps.insert(1, lang)
|
|
|
|
location = '/'.join(ps)
|
2013-10-14 15:10:16 +00:00
|
|
|
|
2014-02-06 10:39:29 +00:00
|
|
|
return location.decode('utf-8')
|
2013-08-01 12:48:50 +00:00
|
|
|
|
2014-05-09 12:23:23 +00:00
|
|
|
def is_multilang_url(local_url, langs=None):
|
2014-01-30 19:50:43 +00:00
|
|
|
if not langs:
|
|
|
|
langs = [lg[0] for lg in request.website.get_languages()]
|
2014-05-09 12:23:23 +00:00
|
|
|
spath = local_url.split('/')
|
2014-01-30 19:50:43 +00:00
|
|
|
# if a language is already in the path, remove it
|
|
|
|
if spath[1] in langs:
|
|
|
|
spath.pop(1)
|
2014-05-09 12:23:23 +00:00
|
|
|
local_url = '/'.join(spath)
|
2014-01-28 20:14:17 +00:00
|
|
|
try:
|
2014-05-09 12:23:23 +00:00
|
|
|
# Try to match an endpoint in werkzeug's routing table
|
|
|
|
url = local_url.split('?')
|
|
|
|
path = url[0]
|
|
|
|
query_string = url[1] if len(url) > 1 else None
|
2014-01-28 20:14:17 +00:00
|
|
|
router = request.httprequest.app.get_db_router(request.db).bind('')
|
2014-05-09 12:23:23 +00:00
|
|
|
func = router.match(path, query_args=query_string)[0]
|
2014-05-13 09:35:45 +00:00
|
|
|
return func.routing.get('website', False) and func.routing.get('multilang', True)
|
2014-01-28 20:14:17 +00:00
|
|
|
except Exception:
|
|
|
|
return False
|
|
|
|
|
2014-01-28 14:00:17 +00:00
|
|
|
def slugify(s, max_length=None):
|
|
|
|
if slugify_lib:
|
2014-02-06 14:56:38 +00:00
|
|
|
# There are 2 different libraries only python-slugify is supported
|
|
|
|
try:
|
|
|
|
return slugify_lib.slugify(s, max_length=max_length)
|
|
|
|
except TypeError:
|
|
|
|
pass
|
2014-01-28 14:00:17 +00:00
|
|
|
spaceless = re.sub(r'\s+', '-', s)
|
|
|
|
specialless = re.sub(r'[^-_A-Za-z0-9]', '', spaceless)
|
|
|
|
return specialless[:max_length]
|
|
|
|
|
2013-12-02 12:56:52 +00:00
|
|
|
def slug(value):
|
|
|
|
if isinstance(value, orm.browse_record):
|
|
|
|
# [(id, name)] = value.name_get()
|
|
|
|
id, name = value.id, value[value._rec_name]
|
|
|
|
else:
|
|
|
|
# assume name_search result tuple
|
|
|
|
id, name = value
|
2014-03-16 11:37:42 +00:00
|
|
|
slugname = slugify(name or '')
|
2014-02-17 13:07:55 +00:00
|
|
|
if not slugname:
|
|
|
|
return str(id)
|
|
|
|
return "%s-%d" % (slugname, id)
|
2013-12-02 12:56:52 +00:00
|
|
|
|
2013-08-12 13:43:12 +00:00
|
|
|
def urlplus(url, params):
|
2014-01-30 11:24:08 +00:00
|
|
|
return werkzeug.Href(url)(params or None)
|
2013-12-03 17:31:46 +00:00
|
|
|
|
2013-08-05 16:16:00 +00:00
|
|
|
class website(osv.osv):
|
2013-11-30 20:58:34 +00:00
|
|
|
def _get_menu_website(self, cr, uid, ids, context=None):
|
|
|
|
# IF a menu is changed, update all websites
|
|
|
|
return self.search(cr, uid, [], context=context)
|
2014-01-19 17:46:42 +00:00
|
|
|
|
2013-11-30 19:37:39 +00:00
|
|
|
def _get_menu(self, cr, uid, ids, name, arg, context=None):
|
|
|
|
root_domain = [('parent_id', '=', False)]
|
2013-11-30 20:58:34 +00:00
|
|
|
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
|
2013-11-30 19:37:39 +00:00
|
|
|
menu = menus and menus[0] or False
|
|
|
|
return dict( map(lambda x: (x, menu), ids) )
|
|
|
|
|
2013-08-05 16:16:00 +00:00
|
|
|
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
|
|
|
|
_description = "Website"
|
2013-09-10 13:41:33 +00:00
|
|
|
_columns = {
|
|
|
|
'name': fields.char('Domain'),
|
|
|
|
'company_id': fields.many2one('res.company', string="Company"),
|
|
|
|
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
|
|
|
|
'default_lang_id': fields.many2one('res.lang', string="Default language"),
|
2013-12-02 17:21:10 +00:00
|
|
|
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
|
2013-09-29 08:48:53 +00:00
|
|
|
'social_twitter': fields.char('Twitter Account'),
|
|
|
|
'social_facebook': fields.char('Facebook Account'),
|
|
|
|
'social_github': fields.char('GitHub Account'),
|
|
|
|
'social_linkedin': fields.char('LinkedIn Account'),
|
|
|
|
'social_youtube': fields.char('Youtube Account'),
|
|
|
|
'social_googleplus': fields.char('Google+ Account'),
|
2014-01-21 16:34:56 +00:00
|
|
|
'google_analytics_key': fields.char('Google Analytics Key'),
|
2014-01-30 22:58:53 +00:00
|
|
|
'user_id': fields.many2one('res.users', string='Public User'),
|
2014-01-30 23:39:51 +00:00
|
|
|
'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
|
2013-11-30 20:58:34 +00:00
|
|
|
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
|
|
|
|
store= {
|
|
|
|
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
|
|
|
|
})
|
2013-09-10 13:41:33 +00:00
|
|
|
}
|
2013-08-05 16:16:00 +00:00
|
|
|
|
2014-02-21 17:28:16 +00:00
|
|
|
_defaults = {
|
|
|
|
'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.public_user'),
|
|
|
|
}
|
|
|
|
|
2014-01-23 15:03:17 +00:00
|
|
|
# cf. Wizard hack in website_views.xml
|
|
|
|
def noop(self, *args, **kwargs):
|
|
|
|
pass
|
|
|
|
|
2013-12-02 17:21:10 +00:00
|
|
|
def write(self, cr, uid, ids, vals, context=None):
|
2013-12-03 07:34:29 +00:00
|
|
|
self._get_languages.clear_cache(self)
|
2013-12-02 17:21:10 +00:00
|
|
|
return super(website, self).write(cr, uid, ids, vals, context)
|
|
|
|
|
2013-11-24 21:02:21 +00:00
|
|
|
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
|
2014-01-28 14:00:17 +00:00
|
|
|
context = context or {}
|
2013-11-24 21:02:21 +00:00
|
|
|
imd = self.pool.get('ir.model.data')
|
|
|
|
view = self.pool.get('ir.ui.view')
|
2014-01-28 14:00:17 +00:00
|
|
|
template_module, template_name = template.split('.')
|
2013-11-24 21:02:21 +00:00
|
|
|
|
2014-01-28 14:00:17 +00:00
|
|
|
# completely arbitrary max_length
|
|
|
|
page_name = slugify(name, max_length=50)
|
|
|
|
page_xmlid = "%s.%s" % (template_module, page_name)
|
2013-11-24 21:02:21 +00:00
|
|
|
|
2013-11-26 10:23:21 +00:00
|
|
|
try:
|
2014-01-28 14:00:17 +00:00
|
|
|
# existing page
|
|
|
|
imd.get_object_reference(cr, uid, template_module, page_name)
|
|
|
|
except ValueError:
|
|
|
|
# new page
|
|
|
|
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
|
|
|
|
page_id = view.copy(cr, uid, template_id, context=context)
|
|
|
|
page = view.browse(cr, uid, page_id, context=context)
|
|
|
|
page.write({
|
|
|
|
'arch': page.arch.replace(template, page_xmlid),
|
|
|
|
'name': page_name,
|
2013-11-26 10:23:21 +00:00
|
|
|
'page': ispage,
|
|
|
|
})
|
|
|
|
imd.create(cr, uid, {
|
2014-01-28 14:00:17 +00:00
|
|
|
'name': page_name,
|
|
|
|
'module': template_module,
|
2013-11-26 10:23:21 +00:00
|
|
|
'model': 'ir.ui.view',
|
2014-01-28 14:00:17 +00:00
|
|
|
'res_id': page_id,
|
2013-11-26 10:23:21 +00:00
|
|
|
'noupdate': True
|
|
|
|
}, context=context)
|
2014-01-28 14:00:17 +00:00
|
|
|
return page_xmlid
|
2013-08-06 14:06:36 +00:00
|
|
|
|
2013-11-26 16:01:48 +00:00
|
|
|
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
|
|
|
|
# whatever
|
|
|
|
return '%s.%s' % (module, slugify(name, max_length=50))
|
|
|
|
|
|
|
|
def page_exists(self, cr, uid, ids, name, module='website', context=None):
|
|
|
|
try:
|
2014-02-19 10:55:02 +00:00
|
|
|
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
|
2013-11-26 16:01:48 +00:00
|
|
|
except:
|
|
|
|
return False
|
|
|
|
|
2013-12-02 17:21:10 +00:00
|
|
|
@openerp.tools.ormcache(skiparg=3)
|
|
|
|
def _get_languages(self, cr, uid, id, context=None):
|
|
|
|
website = self.browse(cr, uid, id)
|
|
|
|
return [(lg.code, lg.name) for lg in website.language_ids]
|
2014-01-19 17:46:42 +00:00
|
|
|
|
2013-12-02 17:21:10 +00:00
|
|
|
def get_languages(self, cr, uid, ids, context=None):
|
|
|
|
return self._get_languages(cr, uid, ids[0])
|
|
|
|
|
2013-11-27 17:04:32 +00:00
|
|
|
def get_current_website(self, cr, uid, context=None):
|
|
|
|
# TODO: Select website, currently hard coded
|
|
|
|
return self.pool['website'].browse(cr, uid, 1, context=context)
|
|
|
|
|
2014-05-05 16:38:41 +00:00
|
|
|
def is_publisher(self, cr, uid, ids, context=None):
|
|
|
|
Access = self.pool['ir.model.access']
|
|
|
|
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context)
|
|
|
|
return is_website_publisher
|
2013-08-27 17:07:07 +00:00
|
|
|
|
2014-01-23 13:54:08 +00:00
|
|
|
def get_template(self, cr, uid, ids, template, context=None):
|
2014-04-09 10:44:27 +00:00
|
|
|
if isinstance(template, (int, long)):
|
|
|
|
view_id = template
|
|
|
|
else:
|
|
|
|
if '.' not in template:
|
|
|
|
template = 'website.%s' % template
|
|
|
|
module, xmlid = template.split('.', 1)
|
|
|
|
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
|
2014-01-23 13:54:08 +00:00
|
|
|
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
|
|
|
|
|
2013-11-25 16:59:58 +00:00
|
|
|
def _render(self, cr, uid, ids, template, values=None, context=None):
|
2014-02-06 14:39:19 +00:00
|
|
|
# TODO: remove this. (just kept for backward api compatibility for saas-3)
|
|
|
|
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
|
2013-11-04 11:10:21 +00:00
|
|
|
|
2013-12-04 14:57:28 +00:00
|
|
|
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
|
2014-02-19 10:30:32 +00:00
|
|
|
# TODO: remove this. (just kept for backward api compatibility for saas-3)
|
|
|
|
return request.render(template, values, uid=uid)
|
2013-11-20 20:28:42 +00:00
|
|
|
|
2013-11-07 15:14:00 +00:00
|
|
|
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
|
2013-08-12 13:43:12 +00:00
|
|
|
# Compute Pager
|
2013-08-21 14:04:31 +00:00
|
|
|
page_count = int(math.ceil(float(total) / step))
|
2013-08-12 13:43:12 +00:00
|
|
|
|
2013-08-21 14:04:31 +00:00
|
|
|
page = max(1, min(int(page), page_count))
|
2013-08-12 13:43:12 +00:00
|
|
|
scope -= 1
|
|
|
|
|
|
|
|
pmin = max(page - int(math.floor(scope/2)), 1)
|
2013-08-21 14:04:31 +00:00
|
|
|
pmax = min(pmin + scope, page_count)
|
2013-08-12 13:43:12 +00:00
|
|
|
|
|
|
|
if pmax - pmin < scope:
|
2013-08-21 14:04:31 +00:00
|
|
|
pmin = pmax - scope if pmax - scope > 0 else 1
|
2013-08-12 13:43:12 +00:00
|
|
|
|
2013-08-14 10:54:00 +00:00
|
|
|
def get_url(page):
|
2014-03-14 16:23:46 +00:00
|
|
|
_url = "%s/page/%s" % (url, page) if page > 1 else url
|
2013-08-14 10:54:00 +00:00
|
|
|
if url_args:
|
2014-01-29 12:13:16 +00:00
|
|
|
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
|
2013-08-14 10:54:00 +00:00
|
|
|
return _url
|
2013-08-12 13:43:12 +00:00
|
|
|
|
2013-08-21 14:04:31 +00:00
|
|
|
return {
|
|
|
|
"page_count": page_count,
|
|
|
|
"offset": (page - 1) * step,
|
2013-10-23 14:51:52 +00:00
|
|
|
"page": {
|
|
|
|
'url': get_url(page),
|
|
|
|
'num': page
|
|
|
|
},
|
|
|
|
"page_start": {
|
|
|
|
'url': get_url(pmin),
|
|
|
|
'num': pmin
|
|
|
|
},
|
|
|
|
"page_previous": {
|
|
|
|
'url': get_url(max(pmin, page - 1)),
|
|
|
|
'num': max(pmin, page - 1)
|
|
|
|
},
|
|
|
|
"page_next": {
|
|
|
|
'url': get_url(min(pmax, page + 1)),
|
|
|
|
'num': min(pmax, page + 1)
|
|
|
|
},
|
|
|
|
"page_end": {
|
|
|
|
'url': get_url(pmax),
|
|
|
|
'num': pmax
|
|
|
|
},
|
2013-08-21 14:04:31 +00:00
|
|
|
"pages": [
|
|
|
|
{'url': get_url(page), 'num': page}
|
|
|
|
for page in xrange(pmin, pmax+1)
|
|
|
|
]
|
|
|
|
}
|
2013-08-12 13:43:12 +00:00
|
|
|
|
2013-11-08 13:26:15 +00:00
|
|
|
def rule_is_enumerable(self, rule):
|
|
|
|
""" Checks that it is possible to generate sensible GET queries for
|
|
|
|
a given rule (if the endpoint matches its own requirements)
|
|
|
|
|
|
|
|
:type rule: werkzeug.routing.Rule
|
|
|
|
:rtype: bool
|
|
|
|
"""
|
|
|
|
endpoint = rule.endpoint
|
|
|
|
methods = rule.methods or ['GET']
|
2013-11-15 15:48:40 +00:00
|
|
|
converters = rule._converters.values()
|
2014-05-11 11:52:31 +00:00
|
|
|
if not ('GET' in methods
|
2014-01-20 15:37:33 +00:00
|
|
|
and endpoint.routing['type'] == 'http'
|
|
|
|
and endpoint.routing['auth'] in ('none', 'public')
|
|
|
|
and endpoint.routing.get('website', False)
|
2013-11-19 12:29:06 +00:00
|
|
|
and all(hasattr(converter, 'generate') for converter in converters)
|
2014-05-11 11:52:31 +00:00
|
|
|
and endpoint.routing.get('website')):
|
2013-11-08 13:26:15 +00:00
|
|
|
return False
|
|
|
|
|
2014-05-11 11:52:31 +00:00
|
|
|
# dont't list routes without argument having no default value or converter
|
|
|
|
spec = inspect.getargspec(endpoint.method.original_func)
|
2013-11-08 13:26:15 +00:00
|
|
|
|
2014-05-11 11:52:31 +00:00
|
|
|
# remove self and arguments having a default value
|
|
|
|
defaults_count = len(spec.defaults or [])
|
|
|
|
args = spec.args[1:(-defaults_count or None)]
|
|
|
|
|
|
|
|
# check that all args have a converter
|
|
|
|
return all( (arg in rule._converters) for arg in args)
|
2013-11-08 13:26:15 +00:00
|
|
|
|
2013-11-19 10:21:17 +00:00
|
|
|
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
|
2013-08-21 13:59:17 +00:00
|
|
|
""" Available pages in the website/CMS. This is mostly used for links
|
|
|
|
generation and can be overridden by modules setting up new HTML
|
|
|
|
controllers for dynamic pages (e.g. blog).
|
|
|
|
|
|
|
|
By default, returns template views marked as pages.
|
|
|
|
|
2013-11-19 10:21:17 +00:00
|
|
|
:param str query_string: a (user-provided) string, fetches pages
|
|
|
|
matching the string
|
2013-08-21 13:59:17 +00:00
|
|
|
:returns: a list of mappings with two keys: ``name`` is the displayable
|
|
|
|
name of the resource (page), ``url`` is the absolute URL
|
|
|
|
of the same.
|
|
|
|
:rtype: list({name: str, url: str})
|
|
|
|
"""
|
2013-11-08 13:26:15 +00:00
|
|
|
router = request.httprequest.app.get_db_router(request.db)
|
2013-11-25 10:08:13 +00:00
|
|
|
# Force enumeration to be performed as public user
|
2014-02-21 17:28:16 +00:00
|
|
|
uid = request.website.user_id.id
|
2014-02-19 10:55:02 +00:00
|
|
|
url_list = []
|
2013-11-08 13:26:15 +00:00
|
|
|
for rule in router.iter_rules():
|
|
|
|
if not self.rule_is_enumerable(rule):
|
|
|
|
continue
|
|
|
|
|
2014-05-11 11:52:31 +00:00
|
|
|
converters = rule._converters or {}
|
|
|
|
values = [{}]
|
2014-05-11 13:40:37 +00:00
|
|
|
convitems = converters.items()
|
|
|
|
# converters with a domain are processed after the other ones
|
|
|
|
gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
|
|
|
|
convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
|
|
|
|
for (name, converter) in convitems:
|
2014-05-11 11:52:31 +00:00
|
|
|
newval = []
|
|
|
|
for val in values:
|
|
|
|
for v in converter.generate(request.cr, uid, query=query_string, args=val, context=context):
|
|
|
|
newval.append( val.copy() )
|
|
|
|
v[name] = v['loc']
|
|
|
|
del v['loc']
|
|
|
|
newval[-1].update(v)
|
|
|
|
values = newval
|
|
|
|
|
|
|
|
for value in values:
|
|
|
|
domain_part, url = rule.build(value, append_unknown=False)
|
|
|
|
page = {'loc': url}
|
|
|
|
for key,val in value.items():
|
|
|
|
if key.startswith('__'):
|
|
|
|
page[key[2:]] = val
|
|
|
|
if url in ('/sitemap.xml',):
|
|
|
|
continue
|
2014-02-19 10:55:02 +00:00
|
|
|
if url in url_list:
|
|
|
|
continue
|
|
|
|
url_list.append(url)
|
2014-05-11 11:52:31 +00:00
|
|
|
if query_string and not self.page_matches(cr, uid, page, query_string, context=context):
|
2013-11-19 10:21:17 +00:00
|
|
|
continue
|
|
|
|
yield page
|
2013-11-13 10:18:24 +00:00
|
|
|
|
2013-11-19 10:21:17 +00:00
|
|
|
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
|
|
|
|
return list(itertools.islice(
|
|
|
|
self.enumerate_pages(cr, uid, ids, query_string=needle, context=context),
|
|
|
|
limit))
|
2013-11-13 10:18:24 +00:00
|
|
|
|
|
|
|
def page_matches(self, cr, uid, page, needle, context=None):
|
|
|
|
""" Checks that a "page" matches a user-provide search string.
|
|
|
|
|
|
|
|
The default implementation attempts to perform a non-contiguous
|
|
|
|
substring match of the page's name.
|
|
|
|
|
|
|
|
:param page: {'name': str, 'url': str}
|
|
|
|
:param needle: str
|
|
|
|
:rtype: bool
|
|
|
|
"""
|
|
|
|
haystack = page['name'].lower()
|
|
|
|
|
|
|
|
needle = iter(needle.lower())
|
|
|
|
n = next(needle)
|
|
|
|
end = object()
|
|
|
|
|
|
|
|
for char in haystack:
|
|
|
|
if char != n: continue
|
|
|
|
|
|
|
|
n = next(needle, end)
|
|
|
|
# found all characters of needle in haystack in order
|
|
|
|
if n is end:
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2013-11-07 15:14:00 +00:00
|
|
|
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
|
2013-08-21 16:06:32 +00:00
|
|
|
step = step and int(step) or 10
|
|
|
|
scope = scope and int(scope) or 5
|
|
|
|
orderby = orderby or "name"
|
|
|
|
|
|
|
|
get_args = dict(request.httprequest.args or {})
|
2013-11-07 15:14:00 +00:00
|
|
|
model_obj = self.pool[model]
|
2013-08-21 16:06:32 +00:00
|
|
|
relation = model_obj._columns.get(column)._obj
|
2013-11-07 15:14:00 +00:00
|
|
|
relation_obj = self.pool[relation]
|
2013-08-21 16:06:32 +00:00
|
|
|
|
|
|
|
get_args.setdefault('kanban', "")
|
|
|
|
kanban = get_args.pop('kanban')
|
2014-01-29 12:13:16 +00:00
|
|
|
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
|
2013-08-21 16:06:32 +00:00
|
|
|
|
|
|
|
pages = {}
|
|
|
|
for col in kanban.split(","):
|
|
|
|
if col:
|
|
|
|
col = col.split("-")
|
|
|
|
pages[int(col[0])] = int(col[1])
|
|
|
|
|
|
|
|
objects = []
|
2013-09-10 13:41:33 +00:00
|
|
|
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
|
2013-08-21 16:06:32 +00:00
|
|
|
obj = {}
|
|
|
|
|
|
|
|
# browse column
|
|
|
|
relation_id = group[column][0]
|
2013-09-10 13:41:33 +00:00
|
|
|
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
|
2013-08-21 16:06:32 +00:00
|
|
|
|
|
|
|
obj['kanban_url'] = kanban_url
|
|
|
|
for k, v in pages.items():
|
|
|
|
if k != relation_id:
|
|
|
|
obj['kanban_url'] += "%s-%s" % (k, v)
|
|
|
|
|
|
|
|
# pager
|
2013-09-10 13:41:33 +00:00
|
|
|
number = model_obj.search(cr, uid, group['__domain'], count=True)
|
2013-08-21 16:06:32 +00:00
|
|
|
obj['page_count'] = int(math.ceil(float(number) / step))
|
2013-08-22 09:18:12 +00:00
|
|
|
obj['page'] = pages.get(relation_id) or 1
|
|
|
|
if obj['page'] > obj['page_count']:
|
|
|
|
obj['page'] = obj['page_count']
|
2013-08-21 16:06:32 +00:00
|
|
|
offset = (obj['page']-1) * step
|
|
|
|
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
|
|
|
|
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
|
|
|
|
|
|
|
|
# view data
|
|
|
|
obj['domain'] = group['__domain']
|
|
|
|
obj['model'] = model
|
|
|
|
obj['step'] = step
|
|
|
|
obj['orderby'] = orderby
|
|
|
|
|
|
|
|
# browse objects
|
2013-09-10 13:41:33 +00:00
|
|
|
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
|
|
|
|
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
|
2013-08-21 16:06:32 +00:00
|
|
|
|
|
|
|
objects.append(obj)
|
|
|
|
|
2013-09-04 15:32:36 +00:00
|
|
|
values = {
|
2013-08-21 16:06:32 +00:00
|
|
|
'objects': objects,
|
|
|
|
'range': range,
|
2013-08-22 09:18:12 +00:00
|
|
|
'template': template,
|
2013-09-04 15:32:36 +00:00
|
|
|
}
|
2013-11-21 15:42:13 +00:00
|
|
|
return request.website._render("website.kanban_contain", values)
|
2013-08-21 16:06:32 +00:00
|
|
|
|
2013-11-07 15:14:00 +00:00
|
|
|
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
|
2013-08-21 16:06:32 +00:00
|
|
|
html = ""
|
2013-11-07 15:14:00 +00:00
|
|
|
model_obj = self.pool[model]
|
2013-08-21 16:06:32 +00:00
|
|
|
domain = safe_eval(domain)
|
|
|
|
step = int(step)
|
|
|
|
offset = (int(page)-1) * step
|
2013-09-10 13:41:33 +00:00
|
|
|
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
|
|
|
|
object_ids = model_obj.browse(cr, uid, object_ids)
|
2013-08-21 16:06:32 +00:00
|
|
|
for object_id in object_ids:
|
2013-11-20 20:28:42 +00:00
|
|
|
html += request.website._render(template, {'object_id': object_id})
|
2013-08-21 16:06:32 +00:00
|
|
|
return html
|
2013-08-12 16:31:23 +00:00
|
|
|
|
2013-10-24 14:18:40 +00:00
|
|
|
class website_menu(osv.osv):
|
|
|
|
_name = "website.menu"
|
|
|
|
_description = "Website Menu"
|
|
|
|
_columns = {
|
|
|
|
'name': fields.char('Menu', size=64, required=True, translate=True),
|
2014-02-05 10:15:00 +00:00
|
|
|
'url': fields.char('Url', translate=True),
|
2013-10-29 11:22:21 +00:00
|
|
|
'new_window': fields.boolean('New Window'),
|
2013-10-24 14:18:40 +00:00
|
|
|
'sequence': fields.integer('Sequence'),
|
|
|
|
# TODO: support multiwebsite once done for ir.ui.views
|
|
|
|
'website_id': fields.many2one('website', 'Website'),
|
2013-10-30 14:02:16 +00:00
|
|
|
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
|
2013-11-05 15:16:51 +00:00
|
|
|
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
|
2013-10-24 14:18:40 +00:00
|
|
|
'parent_left': fields.integer('Parent Left', select=True),
|
|
|
|
'parent_right': fields.integer('Parent Right', select=True),
|
|
|
|
}
|
2014-02-14 10:37:44 +00:00
|
|
|
|
|
|
|
def __defaults_sequence(self, cr, uid, context):
|
|
|
|
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
|
|
|
|
return menu and menu[0]["sequence"] or 0
|
|
|
|
|
2013-10-25 15:24:46 +00:00
|
|
|
_defaults = {
|
|
|
|
'url': '',
|
2014-02-14 10:37:44 +00:00
|
|
|
'sequence': __defaults_sequence,
|
2013-11-24 21:02:21 +00:00
|
|
|
'new_window': False,
|
2013-10-25 15:24:46 +00:00
|
|
|
}
|
2013-11-05 15:16:51 +00:00
|
|
|
_parent_store = True
|
2013-12-09 21:05:11 +00:00
|
|
|
_parent_order = 'sequence'
|
|
|
|
_order = "sequence"
|
2013-10-25 15:24:46 +00:00
|
|
|
|
2013-11-30 19:37:39 +00:00
|
|
|
# would be better to take a menu_id as argument
|
2013-11-05 15:16:51 +00:00
|
|
|
def get_tree(self, cr, uid, website_id, context=None):
|
|
|
|
def make_tree(node):
|
|
|
|
menu_node = dict(
|
|
|
|
id=node.id,
|
|
|
|
name=node.name,
|
|
|
|
url=node.url,
|
|
|
|
new_window=node.new_window,
|
|
|
|
sequence=node.sequence,
|
|
|
|
parent_id=node.parent_id.id,
|
|
|
|
children=[],
|
|
|
|
)
|
|
|
|
for child in node.child_id:
|
|
|
|
menu_node['children'].append(make_tree(child))
|
|
|
|
return menu_node
|
2013-11-30 19:37:39 +00:00
|
|
|
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
|
2013-11-05 15:16:51 +00:00
|
|
|
return make_tree(menu)
|
2013-10-25 15:24:46 +00:00
|
|
|
|
2013-10-30 14:02:16 +00:00
|
|
|
def save(self, cr, uid, website_id, data, context=None):
|
2013-10-29 14:33:46 +00:00
|
|
|
def replace_id(old_id, new_id):
|
2013-10-30 14:02:16 +00:00
|
|
|
for menu in data['data']:
|
2013-10-29 14:33:46 +00:00
|
|
|
if menu['id'] == old_id:
|
|
|
|
menu['id'] = new_id
|
|
|
|
if menu['parent_id'] == old_id:
|
|
|
|
menu['parent_id'] = new_id
|
2013-10-30 14:02:16 +00:00
|
|
|
to_delete = data['to_delete']
|
|
|
|
if to_delete:
|
|
|
|
self.unlink(cr, uid, to_delete, context=context)
|
|
|
|
for menu in data['data']:
|
2013-10-30 10:58:03 +00:00
|
|
|
mid = menu['id']
|
2013-10-30 14:02:16 +00:00
|
|
|
if isinstance(mid, str):
|
2013-10-29 14:33:46 +00:00
|
|
|
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
|
2013-10-30 10:58:03 +00:00
|
|
|
replace_id(mid, new_id)
|
2013-10-30 14:02:16 +00:00
|
|
|
for menu in data['data']:
|
2013-10-29 14:33:46 +00:00
|
|
|
self.write(cr, uid, [menu['id']], menu, context=context)
|
|
|
|
return True
|
|
|
|
|
2013-09-20 05:10:29 +00:00
|
|
|
class ir_attachment(osv.osv):
|
|
|
|
_inherit = "ir.attachment"
|
|
|
|
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
|
|
|
|
result = {}
|
|
|
|
for attach in self.browse(cr, uid, ids, context=context):
|
2014-03-25 16:04:13 +00:00
|
|
|
if attach.url:
|
2013-09-20 05:10:29 +00:00
|
|
|
result[attach.id] = attach.url
|
|
|
|
else:
|
2013-10-24 10:34:01 +00:00
|
|
|
result[attach.id] = urlplus('/website/image', {
|
|
|
|
'model': 'ir.attachment',
|
|
|
|
'field': 'datas',
|
2014-05-01 11:40:03 +00:00
|
|
|
'id': attach.id
|
2013-10-24 10:34:01 +00:00
|
|
|
})
|
2013-09-20 05:10:29 +00:00
|
|
|
return result
|
2014-03-04 13:31:17 +00:00
|
|
|
def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
|
|
|
|
return dict(
|
|
|
|
(attach['id'], self._compute_checksum(attach))
|
|
|
|
for attach in self.read(
|
|
|
|
cr, uid, ids, ['res_model', 'res_id', 'type', 'datas'],
|
|
|
|
context=context)
|
|
|
|
)
|
|
|
|
|
|
|
|
def _compute_checksum(self, attachment_dict):
|
|
|
|
if attachment_dict.get('res_model') == 'ir.ui.view'\
|
2014-03-24 15:14:47 +00:00
|
|
|
and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
|
2014-03-04 13:31:17 +00:00
|
|
|
and attachment_dict.get('type', 'binary') == 'binary'\
|
|
|
|
and attachment_dict.get('datas'):
|
|
|
|
return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
|
|
|
|
return None
|
|
|
|
|
2014-03-17 10:42:51 +00:00
|
|
|
def _datas_big(self, cr, uid, ids, name, arg, context=None):
|
|
|
|
result = dict.fromkeys(ids, False)
|
|
|
|
if context and context.get('bin_size'):
|
|
|
|
return result
|
|
|
|
|
|
|
|
for record in self.browse(cr, uid, ids, context=context):
|
|
|
|
if not record.datas: continue
|
|
|
|
try:
|
|
|
|
result[record.id] = openerp.tools.image_resize_image_big(record.datas)
|
|
|
|
except IOError: # apparently the error PIL.Image.open raises
|
|
|
|
pass
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2013-09-20 05:10:29 +00:00
|
|
|
_columns = {
|
2014-03-04 13:31:17 +00:00
|
|
|
'datas_checksum': fields.function(_datas_checksum, size=40,
|
|
|
|
string="Datas checksum", type='char', store=True, select=True),
|
2014-03-17 10:42:51 +00:00
|
|
|
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
|
|
|
|
'datas_big': fields.function (_datas_big, type='binary', store=True,
|
|
|
|
string="Resized file content"),
|
2014-03-25 16:04:13 +00:00
|
|
|
'mimetype': fields.char('Mime Type', readonly=True),
|
2013-09-20 05:10:29 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 16:04:13 +00:00
|
|
|
def _add_mimetype_if_needed(self, values):
|
|
|
|
if values.get('datas_fname'):
|
|
|
|
values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
|
|
|
|
|
2014-03-04 13:31:17 +00:00
|
|
|
def create(self, cr, uid, values, context=None):
|
|
|
|
chk = self._compute_checksum(values)
|
|
|
|
if chk:
|
|
|
|
match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
|
|
|
|
if match:
|
|
|
|
return match[0]
|
2014-03-25 16:04:13 +00:00
|
|
|
self._add_mimetype_if_needed(values)
|
2014-03-04 13:31:17 +00:00
|
|
|
return super(ir_attachment, self).create(
|
|
|
|
cr, uid, values, context=context)
|
|
|
|
|
2014-03-25 16:04:13 +00:00
|
|
|
def write(self, cr, uid, ids, values, context=None):
|
|
|
|
self._add_mimetype_if_needed(values)
|
|
|
|
return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
|
|
|
|
|
2014-03-04 11:07:16 +00:00
|
|
|
def try_remove(self, cr, uid, ids, context=None):
|
|
|
|
""" Removes a web-based image attachment if it is used by no view
|
|
|
|
(template)
|
|
|
|
|
|
|
|
Returns a dict mapping attachments which would not be removed (if any)
|
|
|
|
mapped to the views preventing their removal
|
|
|
|
"""
|
|
|
|
Views = self.pool['ir.ui.view']
|
|
|
|
attachments_to_remove = []
|
|
|
|
# views blocking removal of the attachment
|
|
|
|
removal_blocked_by = {}
|
|
|
|
|
|
|
|
for attachment in self.browse(cr, uid, ids, context=context):
|
|
|
|
# in-document URLs are html-escaped, a straight search will not
|
|
|
|
# find them
|
|
|
|
url = werkzeug.utils.escape(attachment.website_url)
|
2014-04-07 07:49:22 +00:00
|
|
|
ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
|
2014-03-04 11:07:16 +00:00
|
|
|
|
|
|
|
if ids:
|
|
|
|
removal_blocked_by[attachment.id] = Views.read(
|
|
|
|
cr, uid, ids, ['name'], context=context)
|
|
|
|
else:
|
|
|
|
attachments_to_remove.append(attachment.id)
|
|
|
|
if attachments_to_remove:
|
|
|
|
self.unlink(cr, uid, attachments_to_remove, context=context)
|
|
|
|
return removal_blocked_by
|
|
|
|
|
2013-08-12 16:31:23 +00:00
|
|
|
class res_partner(osv.osv):
|
|
|
|
_inherit = "res.partner"
|
|
|
|
|
|
|
|
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
|
|
|
|
partner = self.browse(cr, uid, ids[0], context=context)
|
|
|
|
params = {
|
2014-02-28 17:09:00 +00:00
|
|
|
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
|
2013-08-12 16:31:23 +00:00
|
|
|
'size': "%sx%s" % (height, width),
|
|
|
|
'zoom': zoom,
|
|
|
|
'sensor': 'false',
|
|
|
|
}
|
|
|
|
return urlplus('http://maps.googleapis.com/maps/api/staticmap' , params)
|
|
|
|
|
|
|
|
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
|
|
|
|
partner = self.browse(cr, uid, ids[0], context=context)
|
|
|
|
params = {
|
2014-01-30 11:13:49 +00:00
|
|
|
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
|
|
|
|
'z': 10
|
2013-08-12 16:31:23 +00:00
|
|
|
}
|
2014-01-30 11:13:49 +00:00
|
|
|
return urlplus('https://maps.google.com/maps' , params)
|
2013-10-09 12:09:04 +00:00
|
|
|
|
2013-11-04 16:59:54 +00:00
|
|
|
class res_company(osv.osv):
|
|
|
|
_inherit = "res.company"
|
|
|
|
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
|
2013-11-14 09:02:23 +00:00
|
|
|
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
|
2013-11-04 16:59:54 +00:00
|
|
|
return partner and partner.google_map_img(zoom, width, height, context=context) or None
|
|
|
|
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
|
2013-11-14 09:02:23 +00:00
|
|
|
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
|
2013-11-04 16:59:54 +00:00
|
|
|
return partner and partner.google_map_link(zoom, context=context) or None
|
|
|
|
|
2014-01-14 09:36:03 +00:00
|
|
|
class base_language_install(osv.osv_memory):
|
2013-10-09 12:09:04 +00:00
|
|
|
_inherit = "base.language.install"
|
|
|
|
_columns = {
|
|
|
|
'website_ids': fields.many2many('website', string='Websites to translate'),
|
|
|
|
}
|
|
|
|
|
2013-10-22 12:18:40 +00:00
|
|
|
def default_get(self, cr, uid, fields, context=None):
|
|
|
|
if context is None:
|
|
|
|
context = {}
|
|
|
|
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
|
|
|
|
website_id = context.get('params', {}).get('website_id')
|
|
|
|
if website_id:
|
|
|
|
if 'website_ids' not in defaults:
|
|
|
|
defaults['website_ids'] = []
|
|
|
|
defaults['website_ids'].append(website_id)
|
|
|
|
return defaults
|
|
|
|
|
2013-10-09 12:09:04 +00:00
|
|
|
def lang_install(self, cr, uid, ids, context=None):
|
|
|
|
if context is None:
|
|
|
|
context = {}
|
|
|
|
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
|
|
|
|
language_obj = self.browse(cr, uid, ids)[0]
|
|
|
|
website_ids = [website.id for website in language_obj['website_ids']]
|
|
|
|
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
|
|
|
|
if website_ids and lang_id:
|
|
|
|
data = {'language_ids': [(4, lang_id[0])]}
|
|
|
|
self.pool['website'].write(cr, uid, website_ids, data)
|
|
|
|
params = context.get('params', {})
|
|
|
|
if 'url_return' in params:
|
|
|
|
return {
|
|
|
|
'url': params['url_return'].replace('[lang]', language_obj['lang']),
|
|
|
|
'type': 'ir.actions.act_url',
|
|
|
|
'target': 'self'
|
|
|
|
}
|
|
|
|
return action
|
2013-10-18 09:28:20 +00:00
|
|
|
|
2014-01-29 05:14:03 +00:00
|
|
|
class website_seo_metadata(osv.Model):
|
2013-10-18 09:28:20 +00:00
|
|
|
_name = 'website.seo.metadata'
|
|
|
|
_description = 'SEO metadata'
|
|
|
|
|
|
|
|
_columns = {
|
2014-01-29 05:14:03 +00:00
|
|
|
'website_meta_title': fields.char("Website meta title", translate=True),
|
|
|
|
'website_meta_description': fields.text("Website meta description", translate=True),
|
2013-10-18 09:28:20 +00:00
|
|
|
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
|
|
|
|
}
|
2014-01-19 17:46:42 +00:00
|
|
|
|
|
|
|
# vim:et:
|