bzr revid: rvalyi@gmail.com-20091217232137-y7gb1lvi3vyjbvtn
This commit is contained in:
Raphaël Valyi 2009-12-17 21:21:37 -02:00
commit a638f8aaba
106 changed files with 30224 additions and 35036 deletions

View File

@ -3,3 +3,18 @@
.bzrignore
bin/addons/*
bin/filestore*
.Python
include
lib
bin/activate
bin/activate_this.py
bin/easy_install
bin/easy_install-2.6
bin/pip
bin/python
bin/python2.6
*.pyc
*.pyo
build/
bin/yolk
bin/pil*.py

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,14 +7,14 @@ msgstr ""
"Project-Id-Version: OpenERP Server 5.0.4\n"
"Report-Msgid-Bugs-To: support@openerp.com\n"
"POT-Creation-Date: 2009-12-09 10:13+0000\n"
"PO-Revision-Date: 2009-12-13 17:20+0000\n"
"PO-Revision-Date: 2009-12-14 06:10+0000\n"
"Last-Translator: Jordi Esteve - http://www.zikzakmedia.com "
"<jesteve@zikzakmedia.com>\n"
"Language-Team: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-14 04:35+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 04:59+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-12 04:42+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:02+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-12 04:41+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:00+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-13 04:37+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:01+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-14 04:35+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:02+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-12 04:42+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-12 04:42+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. module: base

File diff suppressed because it is too large Load Diff

View File

@ -43,13 +43,13 @@
</record>
<record id="action_res_bank_form" model="ir.actions.act_window">
<field name="name">Banks</field>
<field name="name">Bank List</field>
<field name="res_model">res.bank</field>
<field name="view_type">form</field>
<field name="view_mode">tree,form</field>
</record>
<menuitem action="action_res_bank_form" groups="group_extended" id="menu_action_res_bank_form" parent="menu_base_config" sequence="12"/>
<menuitem action="action_res_bank_form" groups="group_extended" id="menu_action_res_bank_form" parent="menu_base_config" sequence="40"/>
</data>
</openerp>

View File

@ -1,11 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<openerp>
<data>
<!--
Country
-->
<record id="view_country_tree" model="ir.ui.view">
<field name="name">res.country.tree</field>
<field name="model">res.country</field>
@ -17,7 +17,7 @@
</tree>
</field>
</record>
<record id="view_country_form" model="ir.ui.view">
<field name="name">res.country.form</field>
<field name="model">res.country</field>
@ -29,21 +29,21 @@
</form>
</field>
</record>
<record id="action_country" model="ir.actions.act_window">
<field name="name">Countries</field>
<field name="type">ir.actions.act_window</field>
<field name="res_model">res.country</field>
<field name="view_type">form</field>
</record>
<menuitem id="menu_localisation" name="Localisation" parent="menu_base_config"/>
<menuitem id="menu_localisation" name="Localisation" parent="menu_base_config" sequence="30"/>
<menuitem action="action_country" id="menu_country_partner" parent="menu_localisation"/>
<!--
State
-->
State
-->
<record id="view_country_state_tree" model="ir.ui.view">
<field name="name">res.country.state.tree</field>
<field name="model">res.country.state</field>
@ -56,7 +56,7 @@
</tree>
</field>
</record>
<record id="view_country_state_form" model="ir.ui.view">
<field name="name">res.country.state.form</field>
<field name="model">res.country.state</field>
@ -69,7 +69,7 @@
</form>
</field>
</record>
<record id="action_country_state" model="ir.actions.act_window">
<field name="name">Fed. States</field>
<field name="type">ir.actions.act_window</field>
@ -77,8 +77,8 @@
<field name="view_type">form</field>
<field name="view_id" ref="view_country_state_tree"/>
</record>
<menuitem action="action_country_state" id="menu_country_state_partner" parent="menu_localisation"/>
</data>
</openerp>

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<openerp>
<data>
<record id="res_partner_canal-view" model="ir.ui.view">
<field name="name">res.partner.canal.form</field>
<field name="model">res.partner.canal</field>
@ -19,8 +19,9 @@
<field name="view_type">form</field>
<field name="view_mode">tree,form</field>
</record>
<menuitem id="next_id_14" name="Partner Events" parent="base.menu_base_config"/><menuitem action="res_partner_canal-act" id="menu_res_partner_canal-act" parent="next_id_14"/>
<menuitem id="next_id_14" name="Partner Events" parent="base.menu_base_config_partner"/>
<menuitem action="res_partner_canal-act" id="menu_res_partner_canal-act" parent="next_id_14"/>
<record id="res_partner_event_type-view" model="ir.ui.view">
<field name="name">res.partner.event.type.form</field>
<field name="model">res.partner.event.type</field>
@ -33,7 +34,7 @@
</form>
</field>
</record>
<record id="res_partner_event_type_view_tree" model="ir.ui.view">
<field name="name">res.partner.event.type.tree</field>
<field name="model">res.partner.event.type</field>
@ -45,7 +46,7 @@
</tree>
</field>
</record>
<record id="res_partner_event_type-act" model="ir.actions.act_window">
<field name="name">Active Partner Events</field>
<field name="res_model">res.partner.event.type</field>
@ -54,7 +55,7 @@
<field name="context">{'active_test': False}</field>
</record>
<menuitem action="res_partner_event_type-act" id="menu_res_partner_event_type-act" parent="base.next_id_14"/>
<record id="res_partner_som_tree-view" model="ir.ui.view">
<field name="name">res.partner.som.tree</field>
<field name="model">res.partner.som</field>
@ -66,7 +67,7 @@
</tree>
</field>
</record>
<record id="res_partner_som-view" model="ir.ui.view">
<field name="name">res.partner.som.form</field>
<field name="model">res.partner.som</field>
@ -85,7 +86,7 @@
<field name="view_mode">tree,form</field>
</record>
<menuitem action="res_partner_som-act" id="menu_res_partner_som-act" parent="base.next_id_14"/>
<record id="res_partner_event-wopartner-view_form" model="ir.ui.view">
<field name="name">res.partner.event.form</field>
<field name="model">res.partner.event</field>
@ -110,7 +111,7 @@
</form>
</field>
</record>
<record id="res_partner_event-wopartner-view_tree" model="ir.ui.view">
<field name="name">res.partner.event.tree</field>
<field name="model">res.partner.event</field>

View File

@ -2,21 +2,23 @@
<openerp>
<data>
<menuitem icon="terp-partner" id="menu_base_partner" name="Partners" sequence="0"/>
<menuitem id="menu_base_config" name="Configuration" parent="menu_base_partner" sequence="1"
groups="group_system"/>
<menuitem id="menu_base_config_partner" name="Partners" parent="menu_base_config" sequence="10" />
<menuitem id="menu_base_config_contact" name="Contacts" parent="menu_base_config" sequence="20" />
<!--
================================
Function
================================
-->
================================
Function
================================
-->
<record id="view_partner_function_form" model="ir.ui.view">
<field name="name">res.partner.function.form</field>
<field name="model">res.partner.function</field>
<field name="type">form</field>
<field name="arch" type="xml">
<form string="Partner Functions">
<form string="Contact Functions">
<field name="name" select="1"/>
<field name="code" select="1"/>
</form>
@ -27,38 +29,38 @@
<field name="model">res.partner.function</field>
<field name="type">tree</field>
<field name="arch" type="xml">
<tree string="Partner Functions">
<tree string="Contact Functions">
<field name="code"/>
<field name="name"/>
</tree>
</field>
</record>
<record id="action_partner_function_form" model="ir.actions.act_window">
<field name="name">Partner Functions</field>
<field name="name">Contact Functions</field>
<field name="type">ir.actions.act_window</field>
<field name="res_model">res.partner.function</field>
<field name="view_type">form</field>
</record>
<menuitem action="action_partner_function_form" id="menu_partner_function_form" parent="base.menu_base_config"/>
<menuitem action="action_partner_function_form" id="menu_partner_function_form" parent="base.menu_base_config_contact" sequence="20"/>
<!--
=====================
Partner Address
=====================
-->
=====================
Partner Address
=====================
-->
<record id="view_res_partner_address_filter" model="ir.ui.view">
<field name="name">res.partner.address.select</field>
<field name="model">res.partner.address</field>
<field name="type">search</field>
<field name="arch" type="xml">
<search string="Search Contact">
<field name="name" select='1'/>
<field name="partner_id" select='1'/>
<field name="country_id" select='1'/>
</search>
</field>
</record>
<record id="view_res_partner_address_filter" model="ir.ui.view">
<field name="name">res.partner.address.select</field>
<field name="model">res.partner.address</field>
<field name="type">search</field>
<field name="arch" type="xml">
<search string="Search Contact">
<field name="name" select='1'/>
<field name="partner_id" select='1'/>
<field name="country_id" select='1'/>
</search>
</field>
</record>
<record id="view_partner_address_tree" model="ir.ui.view">
<field name="name">res.partner.address.tree</field>
@ -92,7 +94,7 @@
<field colspan="4" name="partner_id" select="1"/>
<newline/>
<field name="name" select="1"/>
<field domain="[('domain', '=', 'contact')]" name="title"/>
<field domain="[('domain', '=', 'contact')]" name="title" />
<field name="function"/>
<newline/>
<group string="Postal Address" colspan="2" col="2">
@ -110,7 +112,6 @@
<field name="mobile" select="2"/>
<field name="email" select="2" widget="email"/>
</group>
</form>
</field>
</record>
@ -136,10 +137,10 @@
<menuitem action="action_partner_address_form" id="menu_partner_address_form" parent="base.menu_base_partner"/>
<!--
=========================================
the short form used in the partner form
=========================================
-->
=========================================
the short form used in the partner form
=========================================
-->
<record id="view_partner_address_form2" model="ir.ui.view">
<field name="name">res.partner.address.form2</field>
<field name="model">res.partner.address</field>
@ -169,10 +170,10 @@
</record>
<!--
=======================
Partner Titles
=======================
-->
=======================
Partner Titles
=======================
-->
<record id="view_partner_title_form" model="ir.ui.view">
<field name="name">res.partner.title.form</field>
<field name="model">res.partner.title</field>
@ -185,6 +186,8 @@
</form>
</field>
</record>
<!--
<record id="action_partner_title" model="ir.actions.act_window">
<field name="name">Titles</field>
<field name="type">ir.actions.act_window</field>
@ -192,6 +195,7 @@
<field name="view_type">form</field>
</record>
<menuitem action="action_partner_title" id="menu_partner_title" parent="base.menu_base_config"/>
-->
<record id="action_partner_title_partner" model="ir.actions.act_window">
<field name="name">Partner Titles</field>
@ -200,7 +204,7 @@
<field name="view_type">form</field>
<field name="domain">[('domain','=','partner')]</field>
</record>
<menuitem action="action_partner_title_partner" id="menu_partner_title_partner" parent="menu_partner_title"/>
<menuitem action="action_partner_title_partner" id="menu_partner_title_partner" parent="menu_base_config_partner" sequence="10"/>
<record id="action_partner_title_contact" model="ir.actions.act_window">
<field name="name">Contact Titles</field>
@ -209,13 +213,13 @@
<field name="view_type">form</field>
<field name="domain">[('domain','=','contact')]</field>
</record>
<menuitem action="action_partner_title_contact" id="menu_partner_title_contact" parent="menu_partner_title"/>
<menuitem action="action_partner_title_contact" id="menu_partner_title_contact" parent="menu_base_config_contact" sequence="10"/>
<!--
=======================
Partner
=======================
-->
=======================
Partner
=======================
-->
<record id="view_partner_tree" model="ir.ui.view">
<field name="name">res.partner.tree</field>
<field name="model">res.partner</field>
@ -255,14 +259,14 @@
<page string="General">
<field colspan="4" mode="form,tree" name="address" nolabel="1" select="1" height="260">
<form string="Partner Contacts">
<group colspan="4" col="4">
<group colspan="4" col="6">
<field name="name" select="2"/>
<field domain="[('domain', '=', 'contact')]" name="title" string="Type"/>
<field domain="[('domain', '=', 'contact')]" name="title" size="0"/>
<field name="function"/>
<field name="type" select="2" colspan="4"/>
</group>
<newline/>
<group string="Postal Address" colspan="2" col="4">
<group colspan="2" col="4">
<separator string="Postal Address" colspan="4" col="4" />
<field name="street" select="2" colspan="4"/>
<field name="street2" colspan="4"/>
<field name="zip" select="2"/>
@ -270,13 +274,14 @@
<field name="country_id" select="2" completion="1"/>
<field name="state_id" select="2"/>
</group>
<group string="Communication" colspan="2" col="2">
<group colspan="2" col="2">
<separator string="Communication" colspan="2" col="2" />
<field name="type" select="2" string="Type" />
<field name="phone"/>
<field name="mobile"/>
<field name="fax"/>
<field name="email" select="2" widget="email"/>
</group>
</form>
<tree string="Partner Contacts">
<field name="name"/>
@ -478,10 +483,10 @@
</record>
<!--
======================
Company Architecture
======================
-->
======================
Company Architecture
======================
-->
<record id="view_partner_tree2" model="ir.ui.view">
<field name="name">res.partner.tree</field>
<field name="model">res.partner</field>
@ -512,10 +517,10 @@
</record>
<!--
======================
Categories
======================
-->
======================
Categories
======================
-->
<record id="view_partner_category_form" model="ir.ui.view">
<field name="name">res.partner.category.form</field>
<field name="model">res.partner.category</field>
@ -560,10 +565,10 @@
<field name="view_id" ref="view_partner_category_tree"/>
<field name="domain">[('parent_id','=',False)]</field>
</record>
<menuitem action="action_partner_category" id="menu_partner_category_main" parent="base.menu_partner_form" sequence="1"/>
<menuitem action="action_partner_category" id="menu_partner_category_main" parent="base.menu_base_partner" sequence="1"/>
<record id="action_partner_by_category" model="ir.actions.act_window">
<field name="name">Partner Categories</field>
<field name="name">Partner Categories</field>
<field name="res_model">res.partner</field>
<field name="view_type">form</field>
<field name="view_mode">tree,form</field>
@ -584,7 +589,7 @@
<field name="res_model">res.partner.category</field>
<field name="view_type">form</field>
</record>
<menuitem action="action_partner_category_form" id="menu_partner_category_form" parent="base.menu_base_config"/>
<menuitem action="action_partner_category_form" id="menu_partner_category_form" parent="base.menu_base_config_partner"/>
<act_window domain="[('partner_id', '=', active_id)]"
id="act_res_partner_event" name="Events"

View File

@ -1,7 +0,0 @@
# -*- coding: utf-8 -*-
# __init__.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@ -1,834 +0,0 @@
# -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)

View File

@ -1,126 +0,0 @@
# -*- coding: utf-8 -*-
# ast.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, util
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = util.Set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = util.Set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames, code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if its declared later in the same block of code
# - AST is less likely to break with version changes (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, basestring):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = util.Set()
self.undeclared_identifiers = util.Set()
if isinstance(code, basestring):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException("Fragment '%s' is not a partial control statement" % code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
else:
raise exceptions.CompileException("Unsupported control keyword: '%s'" % keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException("Code '%s' is not a function declaration" % code, **exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException("'**%s' keyword argument not allowed here" % self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable list."""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg
kwargs = False
elif varargs:
arg = "*" + arg
varargs = False
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" % (arg, pyparser.ExpressionGenerator(default).value()))
else:
namedecls.insert(0, arg)
return namedecls
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)

View File

@ -1,56 +0,0 @@
# -*- coding: utf-8 -*-
from mako import exceptions
try:
from beaker import cache
cache = cache.CacheManager()
except ImportError:
cache = None
class Cache(object):
def __init__(self, id, starttime):
self.id = id
self.starttime = starttime
self.def_regions = {}
def put(self, key, value, **kwargs):
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).put_value(key, starttime=self.starttime, expiretime=expiretime)
def get(self, key, **kwargs):
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
return self._get_cache(defname, **kwargs).get_value(key, starttime=self.starttime, expiretime=expiretime, createfunc=createfunc)
def invalidate(self, key, **kwargs):
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).remove_value(key, starttime=self.starttime, expiretime=expiretime)
def invalidate_body(self):
self.invalidate('render_body', defname='render_body')
def invalidate_def(self, name):
self.invalidate('render_%s' % name, defname='render_%s' % name)
def invalidate_closure(self, name):
self.invalidate(name, defname=name)
def _get_cache(self, defname, type=None, **kw):
if not cache:
raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.")
if type == 'memcached':
type = 'ext:memcached'
if not type:
(type, kw) = self.def_regions.get(defname, ('memory', {}))
else:
self.def_regions[defname] = (type, kw)
return cache.get_cache(self.id, type=type, **kw)

View File

@ -1,707 +0,0 @@
# -*- coding: utf-8 -*-
# codegen.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters
MAGIC_NUMBER = 5
def compile(node, uri, filename=None, default_filters=None, buffer_filters=None, imports=None, source_encoding=None, generate_unicode=True):
"""generate module source code given a parsetree node, uri, and optional source filename"""
buf = util.FastEncodingBuffer(unicode=generate_unicode)
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer, _CompileContext(uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode), node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self, uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_unicode = generate_unicode
class _GenerateRenderMethod(object):
"""a template visitor object which generates the full module source for a template."""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, parsetree.DefTag)
if self.in_def:
name = "render_" + node.name
args = node.function_decl.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(pagetag or node, name, args, buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
identifiers = property(lambda self:self.identifier_stack[-1])
def write_toplevel(self):
"""traverse a template structure for module-level directives and generate the
start of module-level code."""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = util.Set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if not self.compiler.generate_unicode and self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" % self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %s" % repr(MAGIC_NUMBER))
self.printer.writeline("_modified_time = %s" % repr(time.time()))
self.printer.writeline("_template_filename=%s" % repr(self.compiler.filename))
self.printer.writeline("_template_uri=%s" % repr(self.compiler.uri))
self.printer.writeline("_template_cache=cache.Cache(__name__, _modified_time)")
self.printer.writeline("_source_encoding=%s" % repr(self.compiler.source_encoding))
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(buf, source='', lineno=0, pos=0, filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = module_identifiers.topleveldefs.union(main_identifiers.topleveldefs)
[module_identifiers.declared.add(x) for x in ["UNDEFINED"]]
if impcode:
[module_identifiers.declared.add(x) for x in impcode.declared_identifiers]
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %s" % repr([n.name for n in main_identifiers.topleveldefs.values()]))
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if not self.in_def and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared)>0):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % ','.join(["%s=%s" % (x, x) for x in self.identifiers.argument_declared]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(node, name, args, buffered, self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which is enclosed in <%! %> tags
in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" % (node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
class NSDefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, identifiers, nested=False)
export.append(node.name)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
callable_name = "make_namespace()"
else:
callable_name = "None"
self.printer.writeline("ns = runtime.Namespace(%s, context._clean_inheritance_tokens(), templateuri=%s, callables=%s, calling_uri=_template_uri, module=%s)" % (repr(node.name), node.parsed_attributes.get('file', 'None'), callable_name, node.parsed_attributes.get('module', 'None')))
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable definitions for defs and/or
name lookup within the function's context argument. the names declared are based on the
names that are referenced in the function body, which don't otherwise have any explicit
assignment operation. names that are assigned within the body are assumed to be
locally-scoped variables and are not separately declared.
for def callable definitions, if the def is a top-level callable then a
'stub' callable is generated which wraps the current Context into a closure. if the def
is not top-level, it is fully rendered as a local closure."""
# collection of all defs available to us in this scope
comp_idents = dict([(c.name, c) for c in identifiers.defs])
to_write = util.Set()
# write "context.get()" for all variables we are going to need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define right here
to_write = to_write.union(util.Set([c.name for c in identifiers.closuredefs.values()]))
# remove identifiers that are declared in the argument signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to. in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline("_mako_get_namespace(context, %s)._populate(_import_ns, %s)" % (repr(ident), repr(re.split(r'\s*,\s*', ns.attributes['import']))))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline("%s = _mako_get_namespace(context, %s)" % (ident, repr(ident)))
else:
if getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("%s = _import_ns.get(%s, context.get(%s, UNDEFINED))" % (ident, repr(ident), repr(ident)))
else:
self.printer.writeline("%s = context.get(%s, UNDEFINED)" % (ident, repr(ident)))
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.function_decl.funcname
namedecls = node.function_decl.get_argument_expressions()
nameargs = node.function_decl.get_argument_expressions(include_defaults=False)
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.function_decl.get_argument_expressions()
self.printer.writeline("def %s(%s):" % (node.name, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.name, namedecls, False, identifiers, inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve captured content,
apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name, args, buffered, identifiers, inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cacheargs = {}
for arg in (('cache_type', 'type'), ('cache_dir', 'data_dir'), ('cache_timeout', 'expiretime'), ('cache_url', 'url')):
val = node_or_pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] = int(eval(val))
else:
cacheargs[arg[1]] = val
else:
if self.compiler.pagetag is not None:
val = self.compiler.pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] == int(eval(val))
else:
cacheargs[arg[1]] = val
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [ '=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a for a in args]
self.write_variable_declares(identifiers, toplevel=toplevel, limit=node_or_pagetag.undeclared_identifiers())
if buffered:
s = "context.get('local').get_cached(%s, defname=%r, %screatefunc=lambda:__M_%s(%s))" % (cachekey, name, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args))
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local').get_cached(%s, defname=%r, %screatefunc=lambda:__M_%s(%s)))" % (cachekey, name, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args)),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters present in the given
filter names, adjusting for the global 'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or (self.compiler.pagetag is not None and len(self.compiler.pagetag.filter_args.args)) or len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" % self.create_filter_callable(node.filter_args.args, "__M_buf.getvalue()", False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template, to simulate "enclosing scope"
self.printer.writeline('__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin()[__M_key]) for __M_key in [%s] if __M_key in __M_locals_builtin()]))' % ','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline("runtime._include_file(context, %s, _template_uri, %s)" % (node.parsed_attributes['file'], args))
else:
self.printer.writeline("runtime._include_file(context, %s, _template_uri)" % (node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used for the body() function,
# but for other non-body() <%def>s within <%call> we want the current caller off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
export.append(node.name)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.name in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.name]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = runtime.Namespace('caller', context, callables=ccall(caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# things that have already been declared in an enclosing namespace (i.e. names we can just use)
self.declared = util.Set(parent.declared).union([c.name for c in parent.closuredefs.values()]).union(parent.locally_declared).union(parent.argument_declared)
# if these identifiers correspond to a "nested" scope, it means whatever the
# parent identifiers had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = util.Set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they are declared (e.g. assigned to)
self.undeclared = util.Set()
# things that are declared locally. some of these things could be in the "undeclared"
# list as well if they are referenced before declared
self.locally_declared = util.Set()
# assignments made in explicit python blocks. these will be propigated to
# the context of local def calls.
self.locally_assigned = util.Set()
# things that are declared in the argument signature of the def callable
self.argument_declared = util.Set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
defs = property(lambda self:util.Set(self.topleveldefs.union(self.closuredefs).values()))
def __repr__(self):
return "Identifiers(declared=%s, locally_declared=%s, undeclared=%s, topleveldefs=%s, closuredefs=%s, argumenetdeclared=%s)" % (repr(list(self.declared)), repr(list(self.locally_declared)), repr(list(self.undeclared)), repr([c.name for c in self.topleveldefs.values()]), repr([c.name for c in self.closuredefs.values()]), repr(self.argument_declared))
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitDefTag(self, node):
if node.is_root():
self.topleveldefs[node.name] = node
elif node is not self.node:
self.closuredefs[node.name] = node
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)

View File

@ -1,256 +0,0 @@
# -*- coding: utf-8 -*-
# exceptions.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import traceback, sys, re
from mako import util
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""pulls the current exception from the sys traceback and extracts Mako-specific
template information.
Usage:
RichTraceback()
Properties:
error - the exception instance.
source - source code of the file where the error occured. if the error occured within a compiled template,
this is the template source.
lineno - line number where the error occured. if the error occured within a compiled template, the line number
is adjusted to that of the template source
records - a list of 8-tuples containing the original python traceback elements, plus the
filename, line number, source line, and full template source for the traceline mapped back to its originating source
template, if any for that traceline (else the fields are None).
reverse_records - the list of records in reverse
traceback - a list of 4-tuples, in the same format as a regular python traceback, with template-corresponding
traceback records replacing the originals
reverse_traceback - the traceback list in reverse
"""
def __init__(self, traceback=None):
(self.source, self.lineno) = ("", 0)
(t, self.error, self.records) = self._init(traceback)
if self.error is None:
self.error = t
if isinstance(self.error, CompileException) or isinstance(self.error, SyntaxException):
import mako.template
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self.reverse_records = [r for r in self.records]
self.reverse_records.reverse()
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
traceback = property(lambda self:self._get_reformatted_records(self.records), doc="""
return a list of 4-tuple traceback records (i.e. normal python format)
with template-corresponding lines remapped to the originating template
""")
reverse_traceback = property(lambda self:self._get_reformatted_records(self.reverse_records), doc="""
return the same data as traceback, except in reverse order
""")
def _init(self, trcback):
"""format a traceback from sys.exc_info() into 7-item tuples, containing
the regular four traceback tuple items, plus the original template
filename, the line number adjusted relative to the template source, and
code line from that line number of the template."""
import mako.template
mods = {}
if not trcback:
(type, value, trcback) = sys.exc_info()
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
try:
(line_map, template_lines) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = info.template_filename or filename
except KeyError:
new_trcback.append((filename, lineno, function, line, None, None, None, None))
continue
template_ln = module_ln = 1
line_map = {}
for line in module_source.split("\n"):
match = re.match(r'\s*# SOURCE LINE (\d+)', line)
if match:
template_ln = int(match.group(1))
else:
template_ln += 1
module_ln += 1
line_map[module_ln] = template_ln
template_lines = [line for line in template_source.split("\n")]
mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append((filename, lineno, function, line, template_filename, template_ln, template_line, template_source))
if not self.source:
for l in range(len(new_trcback)-1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
try:
# A normal .py file (not a Template)
fp = open(new_trcback[-1][0])
encoding = util.parse_encoding(fp)
fp.seek(0)
self.source = fp.read()
fp.close()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
self.source = ''
self.lineno = new_trcback[-1][1]
return (type, value, new_trcback)
def text_error_template(lookup=None):
"""provides a template that renders a stack trace in a similar format to the Python interpreter,
substituting source template filenames, line numbers and code for that of the originating
source template, as applicable."""
import mako.template
return mako.template.Template(r"""
<%page args="traceback=None"/>
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback(traceback=traceback)
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | unicode.strip}
% endfor
${str(tback.error.__class__.__name__)}: ${str(tback.error)}
""")
def html_error_template():
"""provides a template that renders a stack trace in an HTML format, providing an excerpt of
code as well as substituting source template filenames, line numbers and code
for that of the originating source template, as applicable.
the template's default encoding_errors value is 'htmlentityreplace'. the template has
two options:
with the full option disabled, only a section of an HTML document is returned.
with the css option disabled, the default stylesheet won't be included."""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
%>
<%page args="full=True, css=True, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${str(tback.error.__class__.__name__)}: ${str(tback.error)}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
% if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div>
% else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div>
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')

View File

@ -1,20 +0,0 @@
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################

View File

@ -1,58 +0,0 @@
# -*- coding: utf-8 -*-
"""adds autohandler functionality to Mako templates.
requires that the TemplateLookup class is used with templates.
usage:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context)}"/>
or with custom autohandler filename:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context, name='somefilename')}"/>
"""
import posixpath, os, re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
_template_uri = template.module._template_uri
if not lookup.filesystem_checks:
try:
return lookup._uri_cache[(autohandler, _template_uri, name)]
except KeyError:
pass
tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name]
while len(tokens):
path = '/' + '/'.join(tokens)
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault((autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault((autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '',path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False

View File

@ -1,119 +0,0 @@
# -*- coding: utf-8 -*-
"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from StringIO import StringIO
from babel.messages.extract import extract_python
from mako import lexer, parsetree
def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(fileobj.read(),
input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(),
keywords, comment_tags, options):
yield extracted
def extract_nodes(nodes, keywords, comment_tags, options):
"""Extract messages from Mako's lexer node objects
:param nodes: an iterable of Mako parsetree.Node objects to extract from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
translator_comments = []
in_translator_comments = False
for node in nodes:
child_nodes = None
if in_translator_comments and isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(_split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(_split_comment(node.lineno,
value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.ControlLine):
if node.isend:
translator_comments = []
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
# <% and <%! blocks would provide their own translator comments
translator_comments = []
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
translator_comments = []
in_translator_comments = False
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
else:
translator_comments = \
[comment[1] for comment in translator_comments]
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
code = StringIO(code)
for lineno, funcname, messages, python_translator_comments \
in extract_python(code, keywords, comment_tags, options):
yield (node.lineno + (lineno - 1), funcname, messages,
translator_comments + python_translator_comments)
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in extract_nodes(child_nodes, keywords, comment_tags,
options):
yield extracted
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of comment line
numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]

View File

@ -1,21 +0,0 @@
# -*- coding: utf-8 -*-
"""preprocessing functions, used with the 'preprocessor' argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=preprocess_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)
# TODO
def create_tag(callable):
"""given a callable, extract the *args and **kwargs, and produce a preprocessor
that will parse for <%<funcname> <args>> and convert to an appropriate <%call> statement.
this allows any custom tag to be created which looks like a pure Mako-style tag."""
raise NotImplementedError("Future functionality....")

View File

@ -1,102 +0,0 @@
# -*- coding: utf-8 -*-
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Literal
from pygments.util import html_doctype_matches, looks_like_xml
class MakoLexer(RegexLexer):
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
tokens = {
'root': [
(r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%)([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)\s*(".*?")',
bygroups(Name.Attribute, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
name = 'HTML+Mako'
aliases = ['html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
aliases = ['xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
aliases = ['css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)

View File

@ -1,51 +0,0 @@
# -*- coding: utf-8 -*-
import re, inspect
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
self.extra_vars_func = extra_vars_func
self.extension = extension
if not options:
options = {}
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.iteritems():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in inspect.getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' + self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, basestring):
template = self.load_template(template)
# Load extra vars func if provided
if self.extra_vars_func:
info.update(self.extra_vars_func())
return template.render(**info)

View File

@ -1,171 +0,0 @@
# -*- coding: utf-8 -*-
# filters.py
# Copyright (C) 2006, 2007, 2008 Geoffrey T. Dairiki <dairiki@dairiki.org> and Michael Bayer <mike_mp@zzzcomputing.com>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, cgi, urllib, htmlentitydefs, codecs
from StringIO import StringIO
xml_escapes = {
'&' : '&amp;',
'>' : '&gt;',
'<' : '&lt;',
'"' : '&#34;', # also &quot; in html-only
"'" : '&#39;' # also &apos; in html-only
}
# XXX: &quot; is valid in HTML and XML
# &apos; is not valid HTML, but is valid XML
def html_escape(string):
return cgi.escape(string, True)
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
return x
elif not isinstance(x, str):
return unicode(str(x), encoding=key)
else:
return unicode(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(r'''& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xfffd)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was &euro;12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'str':'str',
'n':'n'
}

View File

@ -1,329 +0,0 @@
# -*- coding: utf-8 -*-
# lexer.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None, disable_unicode=False, input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
exception_kwargs = property(lambda self:{'source':self.text, 'lineno':self.matched_lineno, 'pos':self.matched_charpos, 'filename':self.filename})
def match(self, regexp, flags=None):
"""match the given regular expression string and flags to the current text position.
if a match occurs, update the current text and line position."""
mp = self.match_position
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:", self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')')
if match:
m = self.match(r'.*?%s' % match.group(1), re.S)
if not m:
raise exceptions.SyntaxException("Unmatched '%s'" % match.group(1), **self.exception_kwargs)
else:
match = self.match(r'(%s)' % r'|'.join(text))
if match:
return (self.text[startpos:self.match_position-len(match.group(1))], match.group(1))
else:
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S)
if not match:
raise exceptions.SyntaxException("Expected: %s" % ','.join(text), **self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
elif node.is_primary:
self.control_line.append(node)
elif len(self.control_line) and not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException("Keyword '%s' not a legal ternary for keyword '%s'" % (node.keyword, self.control_line[-1].keyword), **self.exception_kwargs)
def escape_code(self, text):
if not self.disable_unicode and self.encoding:
return text.encode('ascii', 'backslashreplace')
else:
return text
def parse(self):
for preproc in self.preprocessor:
self.text = preproc(self.text)
if not isinstance(self.text, unicode) and self.text.startswith(codecs.BOM_UTF8):
self.text = self.text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
me = self.match_encoding()
if me is not None and me != 'utf-8':
raise exceptions.CompileException("Found utf-8 BOM in file, with conflicting magic encoding comment of '%s'" % me, self.text.decode('utf-8', 'ignore'), 0, 0, self.filename)
else:
parsed_encoding = self.match_encoding()
if parsed_encoding:
self.encoding = parsed_encoding
if not self.disable_unicode and not isinstance(self.text, unicode):
if self.encoding:
try:
self.text = self.text.decode(self.encoding)
except UnicodeDecodeError, e:
raise exceptions.CompileException("Unicode decode operation of encoding '%s' failed" % self.encoding, self.text.decode('utf-8', 'ignore'), 0, 0, self.filename)
else:
try:
self.text = self.text.decode()
except UnicodeDecodeError, e:
raise exceptions.CompileException("Could not read template using encoding of 'ascii'. Did you forget a magic encoding comment?", self.text.decode('utf-8', 'ignore'), 0, 0, self.filename)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" % self.control_line[-1].keyword, self.text, self.control_line[-1].lineno, self.control_line[-1].pos, self.filename)
return self.template
def match_encoding(self):
match = self.match(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
if match:
return match.group(1)
else:
return None
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|=|".*?"|'.*?')*) # attrname, = sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
(keyword, attr, isend) = (match.group(1).lower(), match.group(2), match.group(3))
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
(key, val1, val2) = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = self.escape_code(text)
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException("Closing tag without opening tag: </%%%s>" % match.group(1), **self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException("Closing tag </%%%s> does not match tag: <%%%s>" % (match.group(1), self.tag[-1].keyword), **self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based comment preceded by a consumed \n and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
(line, pos) = (self.matched_lineno, self.matched_charpos)
(text, end) = self.parse_until_text(r'%>')
text = adjust_whitespace(text) + "\n" # the trailing newline helps compiler.parse() not complain about indentation
self.append_node(parsetree.Code, self.escape_code(text), match.group(1)=='!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
(line, pos) = (self.matched_lineno, self.matched_charpos)
(text, end) = self.parse_until_text(r'\|', r'}')
if end == '|':
(escapes, end) = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(parsetree.Expression, self.escape_code(text), escapes.strip(), lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException("Invalid control line: '%s'" % text, **self.exception_kwargs)
(isend, keyword) = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException("No starting keyword '%s' for '%s'" % (keyword, text), **self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException("Keyword '%s' doesn't match keyword '%s'" % (text, self.control_line[-1].keyword), **self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, self.escape_code(text))
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False

View File

@ -1,152 +0,0 @@
# -*- coding: utf-8 -*-
# lookup.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import os, stat, posixpath, re
from mako import exceptions, util
from mako.template import Template
try:
import threading
except:
import dummy_threading as threading
class TemplateCollection(object):
def has_template(self, uri):
try:
self.get_template(uri)
return True
except exceptions.TemplateLookupException, e:
return False
def get_template(self, uri, relativeto=None):
raise NotImplementedError()
def filename_to_uri(self, uri, filename):
"""convert the given filename to a uri relative to this TemplateCollection."""
return uri
def adjust_uri(self, uri, filename):
"""adjust the given uri based on the calling filename.
when this method is called from the runtime, the 'filename' parameter
is taken directly to the 'filename' attribute of the calling
template. Therefore a custom TemplateCollection subclass can place any string
identifier desired in the "filename" parameter of the Template objects it constructs
and have them come back here."""
return uri
class TemplateLookup(TemplateCollection):
def __init__(self, directories=None, module_directory=None, filesystem_checks=True, collection_size=-1, format_exceptions=False,
error_handler=None, disable_unicode=False, output_encoding=None, encoding_errors='strict', cache_type=None, cache_dir=None, cache_url=None,
cache_enabled=True, modulename_callable=None, default_filters=None, buffer_filters=[], imports=None, input_encoding=None, preprocessor=None):
if isinstance(directories, basestring):
directories = [directories]
self.directories = [posixpath.normpath(d) for d in directories or []]
self.module_directory = module_directory
self.modulename_callable = modulename_callable
self.filesystem_checks = filesystem_checks
self.collection_size = collection_size
self.template_args = {
'format_exceptions':format_exceptions,
'error_handler':error_handler,
'disable_unicode':disable_unicode,
'output_encoding':output_encoding,
'encoding_errors':encoding_errors,
'input_encoding':input_encoding,
'module_directory':module_directory,
'cache_type':cache_type,
'cache_dir':cache_dir or module_directory,
'cache_url':cache_url,
'cache_enabled':cache_enabled,
'default_filters':default_filters,
'buffer_filters':buffer_filters,
'imports':imports,
'preprocessor':preprocessor}
if collection_size == -1:
self.__collection = {}
self._uri_cache = {}
else:
self.__collection = util.LRUCache(collection_size)
self._uri_cache = util.LRUCache(collection_size)
self._mutex = threading.Lock()
def get_template(self, uri):
try:
if self.filesystem_checks:
return self.__check(uri, self.__collection[uri])
else:
return self.__collection[uri]
except KeyError:
u = re.sub(r'^\/+', '', uri)
for dir in self.directories:
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.exists(srcfile):
return self.__load(srcfile, uri)
else:
raise exceptions.TopLevelLookupException("Cant locate template for uri '%s'" % uri)
def adjust_uri(self, uri, relativeto):
"""adjust the given uri based on the calling filename."""
if uri[0] != '/':
if relativeto is not None:
return posixpath.join(posixpath.dirname(relativeto), uri)
else:
return '/' + uri
else:
return uri
def filename_to_uri(self, filename):
try:
return self._uri_cache[filename]
except KeyError:
value = self.__relativeize(filename)
self._uri_cache[filename] = value
return value
def __relativeize(self, filename):
"""return the portion of a filename that is 'relative' to the directories in this lookup."""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None
def __load(self, filename, uri):
self._mutex.acquire()
try:
try:
# try returning from collection one more time in case concurrent thread already loaded
return self.__collection[uri]
except KeyError:
pass
try:
self.__collection[uri] = Template(uri=uri, filename=posixpath.normpath(filename), lookup=self, module_filename=(self.modulename_callable is not None and self.modulename_callable(filename, uri) or None), **self.template_args)
return self.__collection[uri]
except:
self.__collection.pop(uri, None)
raise
finally:
self._mutex.release()
def __check(self, uri, template):
if template.filename is None:
return template
if not os.path.exists(template.filename):
self.__collection.pop(uri, None)
raise exceptions.TemplateLookupException("Cant locate template for uri '%s'" % uri)
elif template.module._modified_time < os.stat(template.filename)[stat.ST_MTIME]:
self.__collection.pop(uri, None)
return self.__load(template.filename, uri)
else:
return template
def put_string(self, uri, text):
self.__collection[uri] = Template(text, lookup=self, uri=uri, **self.template_args)
def put_template(self, uri, template):
self.__collection[uri] = template

View File

@ -1,422 +0,0 @@
# -*- coding: utf-8 -*-
# parsetree.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters
import re
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
def exception_kwargs(self):
return {'source':self.source, 'lineno':self.lineno, 'pos':self.pos, 'filename':self.filename}
exception_kwargs = property(exception_kwargs)
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (util.sorted_dict_repr(self.page_attributes), self.nodes)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ['for','if', 'while', 'try']
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword for this ControlLine"""
return keyword in {
'if':util.Set(['else', 'elif']),
'try':util.Set(['except', 'finally']),
'for':util.Set(['else'])
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos)
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos)
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
util.Set(filters.DEFAULT_ESCAPES.keys())
)
)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos)
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict):
if cls.__keyword__ is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname, attributes, **kwargs)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
filename=kwargs['filename']
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(Node):
"""abstract base class for tags.
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__metaclass__ = _TagMeta
__keyword__ = None
def __init__(self, keyword, attributes, expressions, nonexpressions, required, **kwargs):
"""construct a new Tag instance.
this constructor not called directly, and is only called by subclasses.
keyword - the tag keyword
attributes - raw dictionary of attribute key/value pairs
expressions - a util.Set of identifiers that are legal attributes, which can also contain embedded expressions
nonexpressions - a util.Set of identifiers that are legal attributes, which cannot contain embedded expressions
**kwargs - other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s" % ",".join([repr(m) for m in missing]),
**self.exception_kwargs)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = util.Set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.split(r'(\${.+?})', self.attributes[key]):
m = re.match(r'^\${(.+?)}$', x)
if m:
code = ast.PythonCode(m.group(1), **self.exception_kwargs)
undeclared_identifiers = undeclared_identifiers.union(code.undeclared_identifiers)
expr.append("(%s)" % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr('')
elif key in nonexpressions:
if re.search(r'${.+?}', self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded expressions" % (key, self.keyword),
**self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException("Invalid attribute for tag '%s': '%s'" %(self.keyword, key), **self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
[repr(x) for x in self.nodes]
)
class IncludeTag(Tag):
__keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(keyword, attributes, ('file', 'import', 'args'), (), ('file',), **kwargs)
self.page_args = ast.PythonCode("__DUMMY(%s)" % attributes.get('args', ''), **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.difference(util.Set(["__DUMMY"]))
return identifiers.union(super(IncludeTag, self).undeclared_identifiers())
class NamespaceTag(Tag):
__keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(keyword, attributes, (), ('name','inheritable','file','import','module'), (), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes:
raise exceptions.CompileException("'name' and/or 'import' attributes are required for <%namespace>", **self.exception_kwargs)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(keyword, attributes, (), ('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList(attributes.get('filter', ''), **self.exception_kwargs)
class DefTag(Tag):
__keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs):
super(DefTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout', 'cache_type', 'cache_dir', 'cache_url'),
('name','filter'),
('name',),
**kwargs)
name = attributes['name']
if re.match(r'^[\w_]+$',name):
raise exceptions.CompileException("Missing parenthesis in %def", **self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass", **self.exception_kwargs)
self.name = self.function_decl.funcname
self.filter_args = ast.ArgumentList(attributes.get('filter', ''), **self.exception_kwargs)
def declared_identifiers(self):
return self.function_decl.argnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs).undeclared_identifiers)
return res + list(self.filter_args.undeclared_identifiers.difference(util.Set(filters.DEFAULT_ESCAPES.keys())))
class CallTag(Tag):
__keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes, ('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
self.expression = "%s.%s(%s)" % (namespace, defname, ",".join(["%s=%s" % (k, v) for k, v in self.parsed_attributes.iteritems() if k != 'args']))
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
class InheritTag(Tag):
__keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(keyword, attributes, ('file',), (), ('file',), **kwargs)
class PageTag(Tag):
__keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs):
super(PageTag, self).__init__(
keyword,
attributes,
('cached', 'cache_key', 'cache_timeout', 'cache_type', 'cache_dir', 'cache_url', 'args', 'expression_filter'),
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs)
self.filter_args = ast.ArgumentList(attributes.get('expression_filter', ''), **self.exception_kwargs)
def declared_identifiers(self):
return self.body_decl.argnames

View File

@ -1,268 +0,0 @@
# -*- coding: utf-8 -*-
# pygen.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re, string
from StringIO import StringIO
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
def write(self, text):
self.stream.write(text)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current indent level.
this also adjusts the indentation counter according to the content of the line."""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
decreased_indent = False
if (line is None or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not decreased_indent and
not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -=1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise "Too many whitespace closures"
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent +=1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor', relative to the last 'indent' event received."""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
#keyword = match.group(1)
# match the original indent keyword
#for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
#]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword): return True
#return False
def _indent_line(self, line, stripspace = ''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the start of the line
before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring * self.indent, line)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed or triple-quoted section."""
(self.backslashed, self.triplequoted) = (False, False)
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block, via backslash or triple-quote."""
# we are only looking for explicitly joined lines here,
# not implicit ones (i.e. brackets, braces etc.). this is just
# to guard against the possibility of modifying the space inside
# of a literal multiline string with unfortunately placed whitespace
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = string.expandtabs(entry)
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace = ''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = string.expandtabs(line)
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)

View File

@ -1,372 +0,0 @@
# -*- coding: utf-8 -*-
# ast.py
# Copyright (C) Mako developers
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
# words that cannot be assigned to (notably smaller than the total keys in __builtins__)
reserved = util.Set(['True', 'False', 'None'])
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
def visit_FunctionDef(self, node):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg.id in self.local_ident_stack:
saved[arg.id] = True
else:
self.local_ident_stack[arg.id] = True
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg.id not in saved:
del self.local_ident_stack[arg.id]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and node.id not in self.listener.declared_identifiers and node.id not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg.id for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitFunction(self,node, *args):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name not in self.listener.declared_identifiers and node.name not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) #, walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write("(")
self.visit(node.left, *args)
self.buf.write(" %s " % op)
self.visit(node.right, *args)
self.buf.write(")")
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(" " + op + " ")
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator("*", node, *args)
def visitAnd(self, node, *args):
self.booleanop("and", node, *args)
def visitOr(self, node, *args):
self.booleanop("or", node, *args)
def visitBitand(self, node, *args):
self.booleanop("&", node, *args)
def visitBitor(self, node, *args):
self.booleanop("|", node, *args)
def visitBitxor(self, node, *args):
self.booleanop("^", node, *args)
def visitAdd(self, node, *args):
self.operator("+", node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write(".%s" % node.attrname)
def visitSub(self, node, *args):
self.operator("-", node, *args)
def visitNot(self, node, *args):
self.buf.write("not ")
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator("/", node, *args)
def visitFloorDiv(self, node, *args):
self.operator("//", node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
[self.visit(x) for x in node.subs]
self.buf.write("]")
def visitUnarySub(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
if node.lower is not None:
self.visit(node.lower)
self.buf.write(":")
if node.upper is not None:
self.visit(node.upper)
self.buf.write("]")
def visitDict(self, node):
self.buf.write("{")
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(": ")
self.visit(c[i+1])
if i<len(c) -2:
self.buf.write(", ")
self.buf.write("}")
def visitTuple(self, node):
self.buf.write("(")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write(")")
def visitList(self, node):
self.buf.write("[")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write("]")
def visitListComp(self, node):
self.buf.write("[")
self.visit(node.expr)
self.buf.write(" ")
for n in node.quals:
self.visit(n)
self.buf.write("]")
def visitListCompFor(self, node):
self.buf.write(" for ")
self.visit(node.assign)
self.buf.write(" in ")
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(" if ")
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write("(")
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(", ")
self.visit(a)
self.buf.write(")")
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print "Node:", str(node)
#print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)

View File

@ -1,398 +0,0 @@
# -*- coding: utf-8 -*-
# runtime.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context, Namespace, and various helper functions."""
from mako import exceptions, util
import __builtin__, inspect, sys
class Context(object):
"""provides runtime namespace, output buffer, and various callstacks for templates."""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._orig = data # original data, minus the builtins
self._data = __builtin__.__dict__.copy() # the context data which includes builtins
self._data.update(data)
self._kwargs = data.copy()
self._with_template = None
self.namespaces = {}
# "capture" function which proxies to the generic "capture" function
self._data['capture'] = lambda x, *args, **kwargs: capture(self, x, *args, **kwargs)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
lookup = property(lambda self:self._with_template.lookup)
kwargs = property(lambda self:self._kwargs.copy())
def push_caller(self, caller):
self.caller_stack.append(caller)
def pop_caller(self):
del self.caller_stack[-1]
def keys(self):
return self._data.keys()
def __getitem__(self, key):
return self._data[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return the new Writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
return self._data.get(key, default)
def write(self, string):
"""write a string to this Context's underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""return the current writer function"""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._orig = self._orig
c._kwargs = self._kwargs
c._with_template = self._with_template
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def locals_(self, d):
"""create a new Context with a copy of this Context's current state, updated with the given dictionary."""
if len(d) == 0:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this Context with tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self._get_caller() and True or False
def _get_caller(self):
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
self.append(self.nextcaller or None)
self.nextcaller = None
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""represents an undefined value in a template."""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return False
UNDEFINED = Undefined()
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""provides access to collections of rendering methods, which can be local, from other templates, or from imported modules"""
def __init__(self, name, context, module=None, template=None, templateuri=None, callables=None, inherits=None, populate_self=True, calling_uri=None):
self.name = name
if module is not None:
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self._module = mod
else:
self._module = None
if templateuri is not None:
self.template = _lookup_template(context, templateuri, calling_uri)
self._templateuri = self.template.module._template_uri
else:
self.template = template
if self.template is not None:
self._templateuri = self.template.module._template_uri
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.func_name, c) for c in callables])
else:
self.callables = None
if populate_self and self.template is not None:
(lclcallable, lclcontext) = _populate_self_namespace(context, self.template, self_ns=self)
module = property(lambda s:s._module or s.template.module)
filename = property(lambda s:s._module and s._module.__file__ or s.template.filename)
uri = property(lambda s:s.template.uri)
def attr(self):
if not hasattr(self, '_attr'):
self._attr = _NSAttr(self)
return self._attr
attr = property(attr)
def get_namespace(self, uri):
"""return a namespace corresponding to the given template uri.
if a relative uri, it is adjusted to that of the template of this namespace"""
key = (self, uri)
if self.context.namespaces.has_key(key):
return self.context.namespaces[key]
else:
ns = Namespace(uri, self.context._copy(), templateuri=uri, calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
if self.template:
if not self.template.cache_enabled:
createfunc = kwargs.get('createfunc', None)
if createfunc:
return createfunc()
else:
return None
if self.template.cache_dir:
kwargs.setdefault('data_dir', self.template.cache_dir)
if self.template.cache_type:
kwargs.setdefault('type', self.template.cache_type)
if self.template.cache_url:
kwargs.setdefault('url', self.template.cache_url)
return self.cache.get(key, **kwargs)
def cache(self):
return self.template.cache
cache = property(cache)
def include_file(self, uri, **kwargs):
"""include a file at the given uri"""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
if self.template:
def get(key):
callable_ = self.template.get_def(key).callable_
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
for k in self.template.module._exports:
yield (k, get(k))
if self._module:
def get(key):
callable_ = getattr(self._module, key)
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
for k in dir(self._module):
if k[0] != '_':
yield (k, get(k))
def __getattr__(self, key):
if self.callables and key in self.callables:
return self.callables[key]
if self.template and self.template.has_def(key):
callable_ = self.template.get_def(key).callable_
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
if self._module and hasattr(self._module, key):
callable_ = getattr(self._module, key)
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
if self.inherits is not None:
return getattr(self.inherits, key)
raise exceptions.RuntimeException("Namespace '%s' has no member '%s'" % (self.name, key))
def supports_caller(func):
"""apply a caller_stack compatibility decorator to a plain Python function."""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""execute the given template def, capturing the output into a buffer."""
if not callable(callable_):
raise exceptions.RuntimeException("capture() function expects a callable as its argument (i.e. capture(func, *args, **kwargs))")
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(context._clean_inheritance_tokens(), template)
callable_(ctx, **_kwargs_for_callable(callable_, context._orig, **kwargs))
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set up the inheritance chain at the start
of a template's execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context.locals_({'next':ih})
ih.inherits = Namespace("self:%s" % template.uri, lclcontext, template = template, populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException("Template '%s' has no TemplateLookup associated" % context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException, e:
raise exceptions.TemplateLookupException(str(e))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = Namespace('self:%s' % template.uri, context, template=template, populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_mako_inherit'):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(unicode=True)
elif template.output_encoding:
buf = util.FastEncodingBuffer(unicode=as_unicode, encoding=template.output_encoding, errors=template.encoding_errors)
else:
buf = util.StringIO()
context = Context(buf, **data)
context._with_template = template
_render_context(template, callable_, context, *args, **_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data, **kwargs):
argspec = inspect.getargspec(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import mako.template as template
# create polymorphic 'self' namespace for this template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a Context, and optional explicit arguments
the contextual Template will be located if it exists, and the error handling options specified
on that Template will be interpreted here.
"""
template = context._with_template
if template is not None and (template.format_exceptions or template.error_handler):
error = None
try:
callable_(context, *args, **kwargs)
except Exception, e:
error = e
except:
e = sys.exc_info()[0]
error = e
if error:
if template.error_handler:
result = template.error_handler(context, error)
if not result:
raise error
else:
error_template = exceptions.html_error_template()
context._buffer_stack[:] = [util.FastEncodingBuffer(error_template.output_encoding, error_template.encoding_errors)]
context._with_template = error_template
error_template.render_context(context, error=error)
else:
callable_(context, *args, **kwargs)

View File

@ -1,277 +0,0 @@
# -*- coding: utf-8 -*-
# template.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Template class, a facade for parsing, generating and executing template strings,
as well as template runtime operations."""
from mako.lexer import Lexer
from mako import codegen
from mako import runtime, util, exceptions
import imp, os, re, shutil, stat, sys, tempfile, time, types, weakref
class Template(object):
"""a compiled template"""
def __init__(self, text=None, filename=None, uri=None, format_exceptions=False, error_handler=None,
lookup=None, output_encoding=None, encoding_errors='strict', module_directory=None, cache_type=None,
cache_dir=None, cache_url=None, module_filename=None, input_encoding=None, disable_unicode=False, default_filters=None,
buffer_filters=[], imports=None, preprocessor=None, cache_enabled=True):
"""construct a new Template instance using either literal template text, or a previously loaded template module
text - textual template source, or None if a module is to be provided
uri - the uri of this template, or some identifying string. defaults to the
full filename given, or "memory:(hex id of this Template)" if no filename
filename - filename of the source template, if any
format_exceptions - catch exceptions and format them into an error display template
"""
if uri:
self.module_id = re.sub(r'\W', "_", uri)
self.uri = uri
elif filename:
self.module_id = re.sub(r'\W', "_", filename)
self.uri = filename
else:
self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
if default_filters is None:
if self.disable_unicode:
self.default_filters = ['str']
else:
self.default_filters = ['unicode']
else:
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.preprocessor = preprocessor
# if plain text, compile code in memory only
if text is not None:
(code, module) = _compile_text(self, text, filename)
self._code = code
self._source = text
ModuleInfo(module, None, self, filename, code, text)
elif filename is not None:
# if template filename and a module directory, load
# a filesystem-based module file, generating if needed
if module_filename is not None:
path = module_filename
elif module_directory is not None:
u = self.uri
if u[0] == '/':
u = u[1:]
path = os.path.abspath(os.path.join(module_directory.replace('/', os.path.sep), u + ".py"))
else:
path = None
if path is not None:
util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME]
if not os.path.exists(path) or os.stat(path)[stat.ST_MTIME] < filemtime:
_compile_module_file(self, file(filename).read(), filename, path)
module = imp.load_source(self.module_id, path, file(path))
del sys.modules[self.module_id]
if module._magic_number != codegen.MAGIC_NUMBER:
_compile_module_file(self, file(filename).read(), filename, path)
module = imp.load_source(self.module_id, path, file(path))
del sys.modules[self.module_id]
ModuleInfo(module, path, self, filename, None, None)
else:
# template filename and no module directory, compile code
# in memory
(code, module) = _compile_text(self, file(filename).read(), filename)
self._source = None
self._code = code
ModuleInfo(module, None, self, filename, code, None)
else:
raise exceptions.RuntimeException("Template requires text or filename")
self.module = module
self.filename = filename
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
def source(self):
"""return the template source code for this Template."""
return _get_module_info_from_callable(self.callable_).source
source = property(source)
def code(self):
"""return the module source code for this Template"""
return _get_module_info_from_callable(self.callable_).code
code = property(code)
def cache(self):
return self.module._template_cache
cache = property(cache)
def render(self, *args, **data):
"""render the output of this template as a string.
if the template specifies an output encoding, the string will be encoded accordingly, else the output
is raw (raw output uses cStringIO and can't handle multibyte characters).
a Context object is created corresponding to the given data. Arguments that are explictly
declared by this template's internal rendering method are also pulled from the given *args, **data
members."""
return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data):
"""render the output of this template as a unicode object."""
return runtime._render(self, self.callable_, args, data, as_unicode=True)
def render_context(self, context, *args, **kwargs):
"""render this Template with the given context.
the data is written to the context's buffer."""
if getattr(context, '_with_template', None) is None:
context._with_template = self
runtime._render_context(self, self.callable_, context, *args, **kwargs)
def has_def(self, name):
return hasattr(self.module, "render_%s" % name)
def get_def(self, name):
"""return a def of this template as an individual Template of its own."""
return DefTemplate(self, getattr(self.module, "render_%s" % name))
class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module.
e.g.::
t = Template("this is a template")
f = file("mymodule.py", "w")
f.write(t.code)
f.close()
import mymodule
t = ModuleTemplate(mymodule)
print t.render()
"""
def __init__(self, module,
module_filename=None,
template=None, template_filename=None,
module_source=None, template_source=None,
output_encoding=None, encoding_errors='strict', disable_unicode=False, format_exceptions=False,
error_handler=None, lookup=None, cache_type=None, cache_dir=None, cache_url=None, cache_enabled=True
):
self.module_id = re.sub(r'\W', "_", module._template_uri)
self.uri = module._template_uri
self.input_encoding = module._source_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.module = module
self.filename = template_filename
ModuleInfo(module, module_filename, self, template_filename, module_source, template_source)
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
class DefTemplate(Template):
"""a Template which represents a callable def in a parent template."""
def __init__(self, parent, callable_):
self.parent = parent
self.callable_ = callable_
self.output_encoding = parent.output_encoding
self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler
self.lookup = parent.lookup
def get_def(self, name):
return self.parent.get_def(name)
class ModuleInfo(object):
"""stores information about a module currently loaded into memory,
provides reverse lookups of template source, module source code based on
a module's identifier."""
_modules = weakref.WeakValueDictionary()
def __init__(self, module, module_filename, template, template_filename, module_source, template_source):
self.module = module
self.module_filename = module_filename
self.template_filename = template_filename
self.module_source = module_source
self.template_source = template_source
self._modules[module.__name__] = template._mmarker = self
if module_filename:
self._modules[module_filename] = self
def _get_code(self):
if self.module_source is not None:
return self.module_source
else:
return file(self.module_filename).read()
code = property(_get_code)
def _get_source(self):
if self.template_source is not None:
if self.module._source_encoding and not isinstance(self.template_source, unicode):
return self.template_source.decode(self.module._source_encoding)
else:
return self.template_source
else:
if self.module._source_encoding:
return file(self.template_filename).read().decode(self.module._source_encoding)
else:
return file(self.template_filename).read()
source = property(_get_source)
def _compile_text(template, text, filename):
identifier = template.module_id
lexer = Lexer(text, filename, disable_unicode=template.disable_unicode, input_encoding=template.input_encoding, preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node, template.uri, filename, default_filters=template.default_filters, buffer_filters=template.buffer_filters, imports=template.imports, source_encoding=lexer.encoding, generate_unicode=not template.disable_unicode)
#print source
cid = identifier
if isinstance(cid, unicode):
cid = cid.encode()
module = types.ModuleType(cid)
code = compile(source, cid, 'exec')
exec code in module.__dict__, module.__dict__
return (source, module)
def _compile_module_file(template, text, filename, outputpath):
identifier = template.module_id
lexer = Lexer(text, filename, disable_unicode=template.disable_unicode, input_encoding=template.input_encoding, preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node, template.uri, filename, default_filters=template.default_filters, buffer_filters=template.buffer_filters, imports=template.imports, source_encoding=lexer.encoding, generate_unicode=not template.disable_unicode)
(dest, name) = tempfile.mkstemp()
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_):
return _get_module_info(callable_.func_globals['__name__'])
def _get_module_info(filename):
return ModuleInfo._modules[filename]

View File

@ -1,267 +0,0 @@
# -*- coding: utf-8 -*-
# util.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
try:
Set = set
except:
import sets
Set = sets.Set
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import codecs, re, weakref, os, time
try:
import threading
import thread
except ImportError:
import dummy_threading as threading
import dummy_thread as thread
if sys.platform.startswith('win') or sys.platform.startswith('java'):
time_func = time.clock
else:
time_func = time.time
def verify_directory(dir):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir):
try:
tries += 1
os.makedirs(dir, 0750)
except:
if tries > 5:
raise
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO, but doesnt crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False):
self.data = []
self.encoding = encoding
if unicode:
self.delim = u''
else:
self.delim = ''
self.unicode = unicode
self.errors = errors
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items, discarding
lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = time_func()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = time_func()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = self._Item(key, value)
dict.__setitem__(self, key, item)
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = dict.values(self)
bytime.sort(lambda a, b: cmp(b.timestamp, a.timestamp))
for item in bytime[self.capacity:]:
try:
del self[item.key]
except KeyError:
# if we couldnt find a key, most likely some other thread broke in
# on us. loop around and try again
break
# Regexp to match python magic encoding line
_PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)',
re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = _PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import parser
parser.suite(line1)
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
raise SyntaxError, \
"python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment"
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = d.keys()
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
"""
if hasattr(_ast, 'AST'):
return
_ast.PyCF_ONLY_AST = 2 << 9
m = compile("""\
def foo(): pass
class Bar(object): pass
if False: pass
baz = 'mako'
1 + 2 - 3 * 4 / 5
6 // 7 % 8 << 9 >> 10
11 & 12 ^ 13 | 14
15 and 16 or 17
-baz + (not +18) - ~17
baz and 'foo' or 'bar'
(mako is baz == baz) is not baz != mako
mako > baz < mako >= baz <= mako
mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
_ast.Module = type(m)
for cls in _ast.Module.__mro__:
if cls.__name__ == 'mod':
_ast.mod = cls
elif cls.__name__ == 'AST':
_ast.AST = cls
_ast.FunctionDef = type(m.body[0])
_ast.ClassDef = type(m.body[1])
_ast.If = type(m.body[2])
_ast.Name = type(m.body[3].targets[0])
_ast.Store = type(m.body[3].targets[0].ctx)
_ast.Str = type(m.body[3].value)
_ast.Sub = type(m.body[4].value.op)
_ast.Add = type(m.body[4].value.left.op)
_ast.Div = type(m.body[4].value.right.op)
_ast.Mult = type(m.body[4].value.right.left.op)
_ast.RShift = type(m.body[5].value.op)
_ast.LShift = type(m.body[5].value.left.op)
_ast.Mod = type(m.body[5].value.left.left.op)
_ast.FloorDiv = type(m.body[5].value.left.left.left.op)
_ast.BitOr = type(m.body[6].value.op)
_ast.BitXor = type(m.body[6].value.left.op)
_ast.BitAnd = type(m.body[6].value.left.left.op)
_ast.Or = type(m.body[7].value.op)
_ast.And = type(m.body[7].value.values[0].op)
_ast.Invert = type(m.body[8].value.right.op)
_ast.Not = type(m.body[8].value.left.right.op)
_ast.UAdd = type(m.body[8].value.left.right.operand.op)
_ast.USub = type(m.body[8].value.left.left.op)
_ast.Or = type(m.body[9].value.op)
_ast.And = type(m.body[9].value.values[0].op)
_ast.IsNot = type(m.body[10].value.ops[0])
_ast.NotEq = type(m.body[10].value.ops[1])
_ast.Is = type(m.body[10].value.left.ops[0])
_ast.Eq = type(m.body[10].value.left.ops[1])
_ast.Gt = type(m.body[11].value.ops[0])
_ast.Lt = type(m.body[11].value.ops[1])
_ast.GtE = type(m.body[11].value.ops[2])
_ast.LtE = type(m.body[11].value.ops[3])
_ast.In = type(m.body[12].value.ops[0])
_ast.NotIn = type(m.body[12].value.ops[1])

View File

@ -203,4 +203,3 @@ while True:
time.sleep(60)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -1205,6 +1205,26 @@ class orm_template(object):
return arch
def __get_default_search_view(self, cr, uid, context={}):
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
view = self.fields_view_get(cr, uid, False, 'form', context)
root = etree.fromstring(encode(view['arch']))
res = etree.XML("<search string='%s'></search>" % root.get("string", ""))
node = etree.Element("group")
res.append(node)
fields = root.xpath("//field[@select=1]")
for field in fields:
node.append(field)
return etree.tostring(res, encoding="utf-8").replace('\t', '')
#
# if view_id, view_type is not required
#
@ -1257,7 +1277,7 @@ class orm_template(object):
if pos == 'replace':
parent = node.getparent()
if parent is None:
src = copy.deepcopy(node2.getchildren()[0])
src = copy.deepcopy(node2[0])
else:
for child in node2:
node.addprevious(child)
@ -1323,19 +1343,10 @@ class orm_template(object):
inherit_id IS NULL
ORDER BY priority''', (self._name, view_type))
sql_res = cr.fetchone()
if not sql_res and view_type == 'search':
cr.execute('''SELECT
arch,name,field_parent,id,type,inherit_id
FROM
ir_ui_view
WHERE
model=%s AND
type=%s AND
inherit_id IS NULL
ORDER BY priority''', (self._name, 'form'))
sql_res = cr.fetchone()
if not sql_res:
break
ok = sql_res[5]
view_id = ok or sql_res[3]
model = False
@ -1361,6 +1372,7 @@ class orm_template(object):
result['name'] = sql_res[1]
result['field_parent'] = sql_res[2] or False
else:
# otherwise, build some kind of default view
if view_type == 'form':
res = self.fields_get(cr, user, context=context)
@ -1372,6 +1384,7 @@ class orm_template(object):
if res[x]['type'] == 'text':
xml += "<newline/>"
xml += "</form>"
elif view_type == 'tree':
_rec_name = self._rec_name
if _rec_name not in self._columns:
@ -1379,8 +1392,13 @@ class orm_template(object):
xml = '<?xml version="1.0" encoding="utf-8"?>' \
'<tree string="%s"><field name="%s"/></tree>' \
% (self._description, self._rec_name)
elif view_type == 'calendar':
xml = self.__get_default_calendar_view()
elif view_type == 'search':
xml = self.__get_default_search_view(cr, user, context)
else:
xml = '<?xml version="1.0"?>' # what happens here, graph case?
# raise except_orm(_('Invalid Architecture!'),_("There is no view of type '%s' defined for the structure!") % view_type)

View File

@ -171,9 +171,12 @@ class osv_pool(netsvc.Service):
res.append(klass.createInstance(self, module, cr))
return res
class osv_base(object):
def __init__(self, pool, cr):
pool.add(self._name, self)
self.pool = pool
super(osv_base, self).__init__(cr)
class osv_memory(orm.orm_memory):
#__metaclass__ = inheritor
def __new__(cls):
module = str(cls)[6:]
module = module[:len(module)-1]
@ -186,92 +189,70 @@ class osv_memory(orm.orm_memory):
module_list.append(cls._module)
return None
class osv_memory(osv_base, orm.orm_memory):
#
# Goal: try to apply inheritancy at the instanciation level and
# put objects in the pool var
#
def createInstance(cls, pool, module, cr):
name = hasattr(cls, '_name') and cls._name or cls._inherit
parent_name = hasattr(cls, '_inherit') and cls._inherit
if parent_name:
parent_class = pool.get(parent_name).__class__
assert pool.get(parent_name), "parent class %s does not exist in module %s !" % (parent_name, module)
nattr = {}
for s in ('_columns', '_defaults'):
new = copy.copy(getattr(pool.get(parent_name), s))
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
name = hasattr(cls, '_name') and cls._name or cls._inherit
cls = type(name, (cls, parent_class), nattr)
parent_names = hasattr(cls, '_inherit') and cls._inherit
if parent_names:
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
parent_class = pool.get(parent_name).__class__
assert pool.get(parent_name), "parent class %s does not exist in module %s !" % (parent_name, module)
nattr = {}
for s in ('_columns', '_defaults'):
new = copy.copy(getattr(pool.get(parent_name), s))
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
name = hasattr(cls, '_name') and cls._name or cls._inherit
cls = type(name, (cls, parent_class), nattr)
obj = object.__new__(cls)
obj.__init__(pool, cr)
return obj
createInstance = classmethod(createInstance)
def __init__(self, pool, cr):
pool.add(self._name, self)
self.pool = pool
orm.orm_memory.__init__(self, cr)
class osv(orm.orm):
#__metaclass__ = inheritor
def __new__(cls):
module = str(cls)[6:]
module = module[:len(module)-1]
module = module.split('.')[0][2:]
if not hasattr(cls, '_module'):
cls._module = module
module_class_list.setdefault(cls._module, []).append(cls)
class_pool[cls._name] = cls
if module not in module_list:
module_list.append(cls._module)
return None
class osv(osv_base, orm.orm):
#
# Goal: try to apply inheritancy at the instanciation level and
# put objects in the pool var
#
def createInstance(cls, pool, module, cr):
parent_name = hasattr(cls, '_inherit') and cls._inherit
if parent_name:
parent_class = pool.get(parent_name).__class__
assert pool.get(parent_name), "parent class %s does not exist in module %s !" % (parent_name, module)
nattr = {}
for s in ('_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'):
new = copy.copy(getattr(pool.get(parent_name), s))
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
else:
if s=='_constraints':
for c in cls.__dict__.get(s, []):
exist = False
for c2 in range(len(new)):
if new[c2][2]==c[2]:
new[c2] = c
exist = True
break
if not exist:
new.append(c)
parent_names = hasattr(cls, '_inherit') and cls._inherit
if parent_names:
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
parent_class = pool.get(parent_name).__class__
assert pool.get(parent_name), "parent class %s does not exist in module %s !" % (parent_name, module)
nattr = {}
for s in ('_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'):
new = copy.copy(getattr(pool.get(parent_name), s))
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
name = hasattr(cls, '_name') and cls._name or cls._inherit
cls = type(name, (cls, parent_class), nattr)
if s=='_constraints':
for c in cls.__dict__.get(s, []):
exist = False
for c2 in range(len(new)):
if new[c2][2]==c[2]:
new[c2] = c
exist = True
break
if not exist:
new.append(c)
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
name = hasattr(cls, '_name') and cls._name or cls._inherit
cls = type(name, (cls, parent_class), nattr)
obj = object.__new__(cls)
obj.__init__(pool, cr)
return obj
createInstance = classmethod(createInstance)
def __init__(self, pool, cr):
pool.add(self._name, self)
self.pool = pool
orm.orm.__init__(self, cr)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -32,12 +32,10 @@ from osv.osv import except_osv
from osv.orm import browse_null
from osv.orm import browse_record_list
import pooler
from xml.dom import minidom
import libxml2
import libxslt
from pychart import *
import misc
import cStringIO
from lxml import etree
class external_pdf(render.render):
def __init__(self, pdf):
@ -286,17 +284,13 @@ class report_custom(report_int):
if report['print_orientation']=='landscape':
pageSize=[pageSize[1],pageSize[0]]
impl = minidom.getDOMImplementation()
new_doc = impl.createDocument(None, "report", None)
new_doc = etree.Element('report')
# build header
config = new_doc.createElement("config")
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = new_doc.createElement(name)
t = new_doc.createTextNode(text)
n.appendChild(t)
config.appendChild(n)
n = etree.SubElement(config, name)
t.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageFormat', '%s' % report['print_format'])
@ -316,45 +310,33 @@ class report_custom(report_int):
_append_node('report-header', '%s' % (report['title'],))
_append_node('report-footer', '%s' % (report['footer'],))
new_doc.childNodes[0].appendChild(config)
header = new_doc.createElement("header")
header = etree.SubElement(new_doc, 'header')
for f in fields:
field = new_doc.createElement("field")
field_txt = new_doc.createTextNode('%s' % (f['name'],))
field.appendChild(field_txt)
header.appendChild(field)
new_doc.childNodes[0].appendChild(header)
field = etree.SubElement(header, 'field')
field.text = f['name']
lines = new_doc.createElement("lines")
lines = etree.SubElement(new_doc, 'lines')
level.reverse()
for line in results:
shift = level.pop()
node_line = new_doc.createElement("row")
node_line = etree.SubElement(lines, 'row')
prefix = '+'
for f in range(len(fields)):
col = new_doc.createElement("col")
col = etree.SubElement(node_line, 'col')
if f == 0:
col.setAttribute('para','yes')
col.setAttribute('tree','yes')
col.setAttribute('space',str(3*shift)+'mm')
col.attrib.update(para='yes',
tree='yes',
space=str(3*shift)+'mm')
if line[f] != None:
txt = new_doc.createTextNode(prefix+str(line[f]) or '')
col.text = prefix+str(line[f]) or ''
else:
txt = new_doc.createTextNode('/')
col.appendChild(txt)
node_line.appendChild(col)
col.text = '/'
prefix = ''
lines.appendChild(node_line)
new_doc.childNodes[0].appendChild(lines)
styledoc = libxml2.parseFile(os.path.join(tools.config['root_path'],'addons/base/report/custom_new.xsl'))
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseDoc(new_doc.toxml())
rml_obj = style.applyStylesheet(doc, None)
rml = style.saveResultToString(rml_obj)
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml)
self.obj.render()
@ -591,17 +573,12 @@ class report_custom(report_int):
if report['print_orientation']=='landscape':
pageSize=[pageSize[1],pageSize[0]]
impl = minidom.getDOMImplementation()
new_doc = impl.createDocument(None, "report", None)
# build header
config = new_doc.createElement("config")
new_doc = etree.Element('report')
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = new_doc.createElement(name)
t = new_doc.createTextNode(text)
n.appendChild(t)
config.appendChild(n)
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
@ -621,40 +598,25 @@ class report_custom(report_int):
_append_node('report-header', '%s' % (report['title'],))
_append_node('report-footer', '%s' % (report['footer'],))
new_doc.childNodes[0].appendChild(config)
header = new_doc.createElement("header")
header = etree.SubElement(new_doc, 'header')
for f in fields:
field = new_doc.createElement("field")
field_txt = new_doc.createTextNode('%s' % (f['name'],))
field.appendChild(field_txt)
header.appendChild(field)
new_doc.childNodes[0].appendChild(header)
field = etree.SubElement(header, 'field')
field.text = f['name']
lines = new_doc.createElement("lines")
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = new_doc.createElement("row")
node_line = etree.SubElement(lines, 'row')
for f in range(len(fields)):
col = new_doc.createElement("col")
col.setAttribute('tree','no')
col = etree.SubElement(node_line, 'col', tree='no')
if line[f] != None:
txt = new_doc.createTextNode(str(line[f] or ''))
col.text = line[f] or ''
else:
txt = new_doc.createTextNode('/')
col.appendChild(txt)
node_line.appendChild(col)
lines.appendChild(node_line)
new_doc.childNodes[0].appendChild(lines)
col.text = '/'
# file('/tmp/terp.xml','w+').write(new_doc.toxml())
styledoc = libxml2.parseFile(os.path.join(tools.config['root_path'],'addons/base/report/custom_new.xsl'))
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseDoc(new_doc.toxml())
rml_obj = style.applyStylesheet(doc, None)
rml = style.saveResultToString(rml_obj)
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml)
self.obj.render()

View File

@ -22,8 +22,6 @@
import os
import re
import libxml2
import libxslt
from lxml import etree
import netsvc
import pooler
@ -146,48 +144,34 @@ class report_rml(report_int):
if not self.xsl:
return xml
# load XSL (parse it to the XML level)
styledoc = libxml2.parseDoc(tools.file_open(self.xsl).read())
xsl_path, tail = os.path.split(self.xsl)
for child in styledoc.children:
if child.name == 'import':
if child.hasProp('href'):
imp_file = child.prop('href')
_x, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True)
child.setProp('href', urllib.quote(str(imp_file)))
stylesheet = etree.parse(tools.file_open(self.xsl))
xsl_path, _ = os.path.split(self.xsl)
for import_child in stylesheet.findall('./import'):
if 'href' in import_child.attrib:
imp_file = import_child.get('href')
_, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True)
import_child.set('href', urllib.quote(str(imp_file)))
#TODO: get all the translation in one query. That means we have to:
# * build a list of items to translate,
# * issue the query to translate them,
# * (re)build/update the stylesheet with the translated items
# translate the XSL stylesheet
def look_down(child, lang):
while child is not None:
if (child.type == "element") and child.hasProp('t'):
#FIXME: use cursor
res = service.execute(cr.dbname, uid, 'ir.translation',
'_get_source', self.name2, 'xsl', lang, child.content)
if res:
child.setContent(res.encode('utf-8'))
look_down(child.children, lang)
child = child.next
def translate(doc, lang):
for node in doc.xpath('//*[@t]'):
translation = service.execute(
cr.dbname, uid, 'ir.translation', '_get_source',
self.name2, 'xsl', lang, node.text)
if translation:
node.text = translation
if context.get('lang', False):
look_down(styledoc.children, context['lang'])
translate(stylesheet, context['lang'])
# parse XSL
style = libxslt.parseStylesheetDoc(styledoc)
# load XML (data)
doc = libxml2.parseMemory(xml,len(xml))
# create RML (apply XSL to XML data)
result = style.applyStylesheet(doc, None)
# save result to string
xml = style.saveResultToString(result)
transform = etree.XSLT(stylesheet)
xml = etree.tostring(
transform(etree.fromstring(xml)))
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
return xml
def create_pdf(self, rml, localcontext = None, logo=None, title=None):

View File

@ -121,8 +121,7 @@ class document(object):
#TODO: test this
if value == '' and 'default' in attrs:
value = attrs['default']
el = etree.Element(node.tag)
parent.append(el)
el = etree.SubElement(parent, node.tag)
el.text = tounicode(value)
#TODO: test this
for key, value in attrs.iteritems():
@ -154,31 +153,27 @@ class document(object):
fp.write(dt)
i = str(len(self.bin_datas))
self.bin_datas[i] = fp
el = etree.Element(node.tag)
el = etree.SubElement(parent, node.tag)
el.text = i
parent.append(el)
elif attrs['type']=='data':
#TODO: test this
txt = self.datas.get('form', {}).get(attrs['name'], '')
el = etree.Element(node.tag)
el = etree.SubElement(parent, node.tag)
el.text = txt
parent.append(el)
elif attrs['type']=='function':
if attrs['name'] in self.func:
txt = self.func[attrs['name']](node)
else:
txt = print_fnc.print_fnc(attrs['name'], node)
el = etree.Element(node.tag)
el = etree.SubElement(parent, node.tag)
el.text = txt
parent.append(el)
elif attrs['type']=='eval':
value = self.eval(browser, attrs['expr'])
el = etree.Element(node.tag)
el = etree.SubElement(parent, node.tag)
el.text = str(value)
parent.append(el)
elif attrs['type']=='fields':
fields = attrs['name'].split(',')
@ -196,8 +191,7 @@ class document(object):
v_list = [vals[k] for k in keys]
for v in v_list:
el = etree.Element(node.tag)
parent.append(el)
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
@ -231,8 +225,7 @@ class document(object):
def parse_result_tree(node, parent, datas):
if not node.tag == etree.Comment:
el = etree.Element(node.tag)
parent.append(el)
el = etree.SubElement(parent, node.tag)
atr = self.node_attrs_get(node)
if 'value' in atr:
if not isinstance(datas[atr['value']], (str, unicode)):
@ -256,8 +249,7 @@ class document(object):
else:
v_list = value
for v in v_list:
el = etree.Element(node.tag)
parent.append(el)
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
else:
@ -266,8 +258,7 @@ class document(object):
if node.tag == parent.tag:
el = parent
else:
el = etree.Element(node.tag)
parent.append(el)
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld,el, browser)
def xml_get(self):

View File

@ -25,8 +25,6 @@ import tools
from report import render
from lxml import etree
import libxml2
import libxslt
import time, os
@ -77,21 +75,18 @@ class report_printscreen_list(report_int):
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
pageSize=[297.0,210.0]
impl = minidom.getDOMImplementation()
new_doc = etree.Element("report")
config = etree.Element("config")
config = etree.SubElement(new_doc, 'config')
# build header
def _append_node(name, text):
n = etree.Element(name)
n = etree.SubElement(config, name)
n.text = text
config.append(n)
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('report-header', title)
l = []
@ -110,17 +105,15 @@ class report_printscreen_list(report_int):
s = fields[fields_order[pos]].get('size', 56) / 28 + 1
l[pos] = strmax * s / t
_append_node('tableSize', ','.join(map(str,l)) )
new_doc.append(config)
header=etree.Element("header")
header = etree.SubElement(new_doc, 'header')
for f in fields_order:
field = etree.Element("field")
field = etree.SubElement(header, 'field')
field.text = fields[f]['string'] or ''
header.append(field)
new_doc.append(header)
lines = etree.Element("lines")
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.Element("row")
node_line = etree.SubElement(lines, 'row')
for f in fields_order:
if fields[f]['type']=='many2one' and line[f]:
line[f] = line[f][1]
@ -129,21 +122,17 @@ class report_printscreen_list(report_int):
if fields[f]['type'] == 'float':
precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2
line[f]=round(line[f],precision)
col = etree.Element("col")
col.set('tree','no')
col = etree.SubElement(node_line, 'col', tree='no')
if line[f] != None:
col.text = tools.ustr(line[f] or '')
else:
col.text = '/'
node_line.append(col)
lines.append(node_line)
new_doc.append(node_line)
styledoc = libxml2.parseFile(os.path.join(tools.config['root_path'],'addons/base/report/custom_new.xsl'))
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseDoc(new_doc.toxml())
rml_obj = style.applyStylesheet(doc, None)
rml = style.saveResultToString(rml_obj)
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml, self.title)
self.obj.render()
return True

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
##############################################################################
#
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
@ -15,7 +15,7 @@
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
@ -24,8 +24,6 @@ import pooler
import tools
from lxml import etree
from report import render
import libxml2
import libxslt
import locale
import time, os
@ -37,14 +35,9 @@ class report_printscreen_list(report_int):
def _parse_node(self, root_node):
result = []
for node in root_node.getchildren():
for node in root_node:
if node.tag == 'field':
attrsa = node.attrib
attrs = {}
if not attrsa is None:
for key,val in attrsa.items():
attrs[key] = val
result.append(attrs['name'])
result.append(node.get('name'))
else:
result.extend(self._parse_node(node))
return result
@ -53,7 +46,7 @@ class report_printscreen_list(report_int):
try:
dom = etree.XML(view.encode('utf-8'))
except:
dom = etree.XML(view)
dom = etree.XML(view)
return self._parse_node(dom)
def create(self, cr, uid, ids, datas, context=None):
@ -76,44 +69,42 @@ class report_printscreen_list(report_int):
fields_order = self._parse_string(result['arch'])
rows = model.read(cr, uid, datas['ids'], result['fields'].keys(), context)
ids2 = [x['id'] for x in rows] # getting the ids from read result
if datas['ids'] != ids2: # sorted ids were not taken into consideration for print screen
rows_new = []
for id in datas['ids']:
element = [elem for elem in rows if elem['id']==id]
rows_new.append(element[0])
rows = rows_new
res = self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model_desc)
return (self.obj.get(), 'pdf')
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
pageSize=[297.0, 210.0]
new_doc = etree.Element("report")
config = etree.Element("config")
# build header
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.Element(name)
n = etree.SubElement(config, name)
n.text = text
config.append(n)
#_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('report-header', title)
l = []
t = 0
rowcount=0;
strmax = (pageSize[0]-40) * 2.8346
temp = []
count = len(fields_order)
for i in range(0,count):
for i in range(0, count):
temp.append(0)
ince = -1;
@ -135,24 +126,20 @@ class report_printscreen_list(report_int):
l[pos] = strmax * s / t
_append_node('tableSize', ','.join(map(str,l)) )
new_doc.append(config)
header=etree.Element("header")
header = etree.SubElement(new_doc, 'header')
for f in fields_order:
field = etree.Element("field")
field = etree.SubElement(header, 'field')
field.text = tools.ustr(fields[f]['string'] or '')
header.append(field)
new_doc.append(header)
lines = etree.Element("lines")
lines = etree.SubElement(new_doc, 'lines')
tsum = []
count = len(fields_order)
for i in range(0,count):
tsum.append(0)
for line in results:
node_line = etree.Element("row")
node_line = etree.SubElement(lines, 'row')
count = -1
for f in fields_order:
float_flag = 0
@ -181,41 +168,32 @@ class report_printscreen_list(report_int):
d1 = datetime.strptime(line[f],'%y-%m-%d')
new_d1 = d1.strftime(format)
line[f] = new_d1
if fields[f]['type'] == 'time' and line[f]:
format = str(locale.nl_langinfo(locale.T_FMT))
d1 = datetime.strptime(line[f], '%H:%M:%S')
new_d1 = d1.strftime(format)
line[f] = new_d1
if fields[f]['type'] == 'datetime' and line[f]:
format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))+' '+str(locale.nl_langinfo(locale.T_FMT))
d1 = datetime.strptime(line[f], '%Y-%m-%d %H:%M:%S')
new_d1 = d1.strftime(format)
line[f] = new_d1
col = etree.Element("col")
col.set('para','yes')
col.set('tree','no')
col = etree.SubElement(node_line, 'col', para='yes', tree='no')
if line[f] != None:
col.text = tools.ustr(line[f] or '')
if float_flag:
col.set('tree','float')
if temp[count] == 1:
col.set('tree','float')
if f != 'id' and temp[count] == 1:
tsum[count] = float(tsum[count]) + float(line[f]);
else:
col.text = '/'
node_line.append(col)
lines.append(node_line)
node_line = etree.Element("row")
lines.append(node_line)
node_line = etree.SubElement(lines, 'row')
for f in range(0,count+1):
col = etree.Element("col")
col.set('para','yes')
col.set('tree','no')
col = etree.SubElement(node_line, 'col', para='yes', tree='no')
if tsum[f] != None:
if tsum[f] >= 0.01 :
prec = '%.' + str(tools.config['price_accuracy']) + 'f'
@ -230,16 +208,12 @@ class report_printscreen_list(report_int):
txt ='Total'
col.text = tools.ustr(txt or '')
node_line.append(col)
lines.append(node_line)
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
new_doc.append(lines)
styledoc = libxml2.parseFile(os.path.join(tools.config['root_path'],'addons/base/report/custom_new.xsl'))
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseDoc(etree.tostring(new_doc))
rml_obj = style.applyStylesheet(doc, None)
rml = style.saveResultToString(rml_obj)
self.obj = render.rml(rml, title=self.title)
self.obj.render()
return True

View File

@ -74,7 +74,7 @@ class html2html(object):
return self._node
def url_modify(self,root):
for n in root.getchildren():
for n in root:
if (n.text.find('<a ')>=0 or n.text.find('&lt;a')>=0) and n.text.find('href')>=0 and n.text.find('style')<=0 :
node = (n.tag=='span' and n.getparent().tag=='u') and n.getparent().getparent() or ((n.tag=='span') and n.getparent()) or n
style = node.get('color') and "style='color:%s; text-decoration: none;'"%node.get('color') or ''

View File

@ -18,7 +18,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mako
from lxml import etree
from mako.template import Template
from mako.lookup import TemplateLookup

View File

@ -103,12 +103,10 @@ class _flowable(object):
process(node,new_node)
if new_node.get('colWidths',False):
sizes = map(lambda x: utils.unit_get(x), new_node.get('colWidths').split(','))
tr = etree.Element('tr')
tr = etree.SubElement(new_node, 'tr')
for s in sizes:
td = etree.Element('td')
td.set("width", str(s))
tr.append(td)
new_node.append(tr)
etree.SubElement(tr, 'td', width=str(s))
return etree.tostring(new_node)
def _tag_para(self, node):
@ -297,7 +295,7 @@ class _rml_template(object):
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in pt.findall('pageGraphics'):
for n in tmpl.getchildren():
for n in tmpl:
if n.tag == 'image':
self.data = rc + utils._process_text(self, n.text)
if n.tag in self._tags:

View File

@ -87,7 +87,7 @@ class _rml_styles(object,):
def _table_style_get(self, style_node):
styles = []
for node in style_node.getchildren():
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
@ -185,7 +185,7 @@ class _rml_doc(object):
def _textual_image(self, node):
rc = ''
for n in node.getchildren():
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
@ -776,7 +776,8 @@ class _rml_template(object):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics') or pt.getchildren()[1].findall('pageGraphics')
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except :
gr=''
if len(gr):

View File

@ -55,7 +55,7 @@ def xml2str(s):
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop', False):
if self and self.localcontext and n.get('rml_loop'):
oldctx = self.localcontext
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
@ -79,12 +79,12 @@ def _child_get(node, self=None, tagname=None):
yield n
self.localcontext = oldctx
continue
if self and self.localcontext and n.get('rml_except', False):
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except:
continue
if self and self.localcontext and n.get('rml_tag', False):
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
@ -122,10 +122,7 @@ def _process_text(self, txt):
return result
def text_get(node):
rc = ''
for node in node.getchildren():
rc = rc + tools.ustr(node.text)
return rc
return ''.join([tools.ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
@ -155,8 +152,7 @@ def unit_get(size):
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
res = [int(x) for x in node.get(attr_name).split(',')]
return res
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')

View File

@ -302,7 +302,7 @@ class rml_parse(object):
rml_head = rml_head.replace('<image','<!--image')
rml_head = rml_head.replace('</image>','</image-->')
head_dom = etree.XML(rml_head)
for tag in head_dom.getchildren():
for tag in head_dom:
found = rml_dom.find('.//'+tag.tag)
if found is not None and len(found):
if tag.get('position'):
@ -466,13 +466,14 @@ class report_sxw(report_rml, preprocess.report):
for pe in elements:
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name"):
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 3":
pe.getchildren()[0].text=data['id']
pe[0].text=data['id']
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 4":
pe.getchildren()[0].text=data['model']
meta = etree.tostring(rml_dom_meta)
pe[0].text=data['model']
meta = etree.tostring(rml_dom_meta, encoding='utf-8',
xml_declaration=True)
rml_dom = etree.XML(rml)
body = rml_dom.getchildren()[-1]
body = rml_dom[-1]
elements = []
key1 = rml_parser.localcontext['name_space']["text"]+"p"
key2 = rml_parser.localcontext['name_space']["text"]+"drop-down"
@ -486,7 +487,7 @@ class report_sxw(report_rml, preprocess.report):
pp=de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de.getchildren():
for cnd in de:
if cnd.text or cnd.tail:
if pe.text:
pe.text += cnd.text or cnd.tail
@ -512,12 +513,11 @@ class report_sxw(report_rml, preprocess.report):
rml_dom = self.preprocess_rml(rml_dom,report_type)
create_doc = self.generators[report_type]
odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext))
odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext),
encoding='utf-8', xml_declaration=True)
sxw_z = zipfile.ZipFile(sxw_io, mode='a')
sxw_z.writestr('content.xml', "<?xml version='1.0' encoding='UTF-8'?>" + \
odt)
sxw_z.writestr('meta.xml', "<?xml version='1.0' encoding='UTF-8'?>" + \
meta)
sxw_z.writestr('content.xml', odt)
sxw_z.writestr('meta.xml', meta)
if report_xml.header:
#Add corporate header/footer
@ -532,9 +532,9 @@ class report_sxw(report_rml, preprocess.report):
odt = create_doc(rml_dom,rml_parser.localcontext)
if report_xml.header:
rml_parser._add_header(odt)
odt = etree.tostring(odt)
sxw_z.writestr('styles.xml',"<?xml version='1.0' encoding='UTF-8'?>" + \
odt)
odt = etree.tostring(odt, encoding='utf-8',
xml_declaration=True)
sxw_z.writestr('styles.xml', odt)
sxw_z.close()
final_op = sxw_io.getvalue()
sxw_io.close()

View File

@ -108,77 +108,69 @@ class SecureMultiHandler2(SecureMultiHTTPHandler):
def log_error(self, format, *args):
netsvc.Logger().notifyChannel('http',netsvc.LOG_ERROR,format % args)
class HttpDaemon(threading.Thread, netsvc.Server):
def __init__(self, interface, port):
class BaseHttpDaemon(threading.Thread, netsvc.Server):
def __init__(self, interface, port, handler):
threading.Thread.__init__(self)
netsvc.Server.__init__(self)
self.__port = port
self.__interface = interface
try:
self.server = ThreadedHTTPServer((interface, port), MultiHandler2)
self.server = ThreadedHTTPServer((interface, port), handler)
self.server.vdirs = []
self.server.logRequests = True
netsvc.Logger().notifyChannel("web-services", netsvc.LOG_INFO,
"starting HTTP service at %s port %d" % (interface or '0.0.0.0', port,))
netsvc.Logger().notifyChannel(
"web-services", netsvc.LOG_INFO,
"starting HTTPS service at %s port %d" %
(interface or '0.0.0.0', port,))
except Exception, e:
netsvc.Logger().notifyChannel('httpd', netsvc.LOG_CRITICAL, "Error occur when starting the server daemon: %s" % (e,))
netsvc.Logger().notifyChannel(
'httpd', netsvc.LOG_CRITICAL,
"Error occur when starting the server daemon: %s" % (e,))
raise
def attach(self, path, gw):
pass
def stop(self):
self.running = False
if os.name != 'nt':
self.server.socket.shutdown( hasattr(socket, 'SHUT_RDWR') and socket.SHUT_RDWR or 2 )
try:
self.server.socket.shutdown(
hasattr(socket, 'SHUT_RDWR') and socket.SHUT_RDWR or 2)
except socket.error, e:
if e.errno != 57: raise
# OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
netsvc.Logger().notifyChannel(
'server', netsvc.LOG_DEBUG,
'"%s" when shutting down server socket, '
'this is normal under OS X'%e)
self.server.socket.close()
def run(self):
#self.server.register_introspection_functions()
self.running = True
while self.running:
self.server.handle_request()
return True
class HttpDaemon(BaseHttpDaemon):
def __init__(self, interface, port):
super(HttpDaemon, self).__init__(interface, port,
handler=MultiHandler2)
class HttpSDaemon(threading.Thread, netsvc.Server):
def __init__(self, interface, port):
threading.Thread.__init__(self)
netsvc.Server.__init__(self)
self.__port = port
self.__interface = interface
try:
self.server = ThreadedHTTPServer((interface, port), SecureMultiHandler2)
self.server.vdirs = []
self.server.logRequests = True
netsvc.Logger().notifyChannel("web-services", netsvc.LOG_INFO,
"starting HTTPS service at %s port %d" % (interface or '0.0.0.0', port,))
super(HttpDaemon, self).__init__(interface, port,
handler=SecureMultiHandler2)
except SSLError, e:
netsvc.Logger().notifyChannel('httpd-ssl', netsvc.LOG_CRITICAL, "Can not load the certificate and/or the private key files")
netsvc.Logger().notifyChannel(
'httpd-ssl', netsvc.LOG_CRITICAL,
"Can not load the certificate and/or the private key files")
raise
except Exception, e:
netsvc.Logger().notifyChannel('httpd-ssl', netsvc.LOG_CRITICAL, "Error occur when starting the server daemon: %s" % (e,))
raise
def attach(self, path, gw):
pass
def stop(self):
self.running = False
if os.name != 'nt':
self.server.socket.shutdown( hasattr(socket, 'SHUT_RDWR') and socket.SHUT_RDWR or 2 )
self.server.socket.close()
def run(self):
#self.server.register_introspection_functions()
self.running = True
while self.running:
self.server.handle_request()
return True
httpd = None
httpsd = None

View File

@ -78,7 +78,7 @@ class TinySocketClientThread(threading.Thread, netsvc.OpenERPDispatcher):
break
except Exception, e:
# this code should not be reachable, therefore we warn
netsvc.Logger().notifyChannel("net-rpc", netsvc.LOG_WARNING, "exception: %" % str(e))
netsvc.Logger().notifyChannel("net-rpc", netsvc.LOG_WARNING, "exception: %s" % str(e))
break
self.threads.remove(self)

View File

@ -211,7 +211,8 @@ class configmanager(object):
else:
rcfilepath = os.path.expanduser('~/.openerp_serverrc')
self.rcfile = fname or opt.config or os.environ.get('OPENERP_SERVER') or rcfilepath
self.rcfile = os.path.abspath(
fname or opt.config or os.environ.get('OPENERP_SERVER') or rcfilepath)
self.load()

View File

@ -35,12 +35,6 @@ from config import config
import logging
import sys
try:
from lxml import etree
except:
sys.stderr.write("ERROR: pythonic binding for the libxml2 and libxslt libraries is missing\n")
sys.stderr.write("ERROR: Try to install python-lxml package\n")
sys.exit(2)
import pickle
@ -63,17 +57,16 @@ def _eval_xml(self,node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','') or 'char'
f_model = node.get("model", '').encode('ascii')
if len(node.get('search','')):
t = node.get('type','char')
f_model = node.get('model', '').encode('ascii')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'').encode('ascii')
f_use = node.get("use",'id').encode('ascii')
f_name = node.get("name",'').encode('utf-8')
if len(f_use)==0:
f_use = "id"
q = eval(f_search, idref)
ids = pool.get(f_model).search(cr, uid, q)
if f_use<>'id':
if f_use != 'id':
ids = map(lambda x: x[f_use], pool.get(f_model).read(cr, uid, ids, [f_use]))
_cols = pool.get(f_model)._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
@ -85,7 +78,7 @@ def _eval_xml(self,node, pool, cr, uid, idref, context=None):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if len(a_eval):
if a_eval:
import time
from datetime import datetime
idref2 = idref.copy()
@ -118,52 +111,53 @@ def _eval_xml(self,node, pool, cr, uid, idref, context=None):
if not id in idref:
idref[id]=self.id_get(cr, False, id)
return s % idref
txt = '<?xml version="1.0"?>\n'+_process("".join([etree.tostring(i).encode("utf8") for i in node.getchildren()]), idref)
return txt
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]),
idref)
if t in ('char', 'int', 'float'):
d = node.text
if t == 'int':
d = d.strip()
if d=='None':
if d == 'None':
return None
else:
d=int(d.strip())
elif t=='float':
d=float(d.strip())
return int(d.strip())
elif t == 'float':
return float(d.strip())
return d
elif t in ('list','tuple'):
res=[]
for n in [i for i in node.getchildren() if (i.tag=='value')]:
for n in node.findall('./value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "getitem":
for n in [i for i in node.getchildren()]:
for n in node:
res=_eval_xml(self,n,pool,cr,uid,idref)
if not res:
raise LookupError
elif node.get('type','') in ("int", "list"):
return res[int(node.get('index',''))]
elif node.get('type') in ("int", "list"):
return res[int(node.get('index'))]
else:
return res[node.get('index','').encode("utf8")]
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
if len(a_eval):
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, False, x)
args = eval(a_eval, idref)
for n in [i for i in node.getchildren()]:
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val != None:
if return_val is not None:
args.append(return_val)
model = pool.get(node.get('model',''))
method = node.get('name','')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
d = node.text
return d
return node.text
escape_re = re.compile(r'(?<!\\)/')
def escape(x):
@ -220,14 +214,14 @@ class xml_import(object):
context = {}
node_context = node.get("context",'').encode('utf8')
if len(node_context):
if node_context:
context.update(eval(node_context, eval_dict))
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if len(node_uid):
if node_uid:
return self.id_get(cr, None, node_uid)
return uid
@ -250,18 +244,17 @@ form: module.record_id""" % (xml_id,)
d_search = rec.get("search",'')
d_id = rec.get("id",'')
ids = []
if len(d_search):
if d_search:
ids = self.pool.get(d_model).search(cr,self.uid,eval(d_search))
if len(d_id):
if d_id:
try:
ids.append(self.id_get(cr, d_model, d_id))
except:
# d_id cannot be found. doesn't matter in this case
pass
if len(ids):
if ids:
self.pool.get(d_model).unlink(cr, self.uid, ids)
self.pool.get('ir.model.data')._unlink(cr, self.uid, d_model, ids, direct=True)
return False
def _tag_report(self, cr, rec, data_node=None):
res = {}
@ -270,20 +263,19 @@ form: module.record_id""" % (xml_id,)
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')):
if rec.get(field):
res[dest] = rec.get(field,'').encode('utf8')
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = eval(rec.get('auto',''))
res['auto'] = eval(rec.get('auto'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw','')).read()
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = eval(rec.get('header',''))
res['header'] = eval(rec.get('header'))
if rec.get('report_type'):
res['report_type'] = rec.get('report_type','')
res['multi'] = rec.get('multi','') and eval(rec.get('multi',''))
xml_id = rec.get('id','').encode('utf8')
res['report_type'] = rec.get('report_type')
res['multi'] = rec.get('multi') and eval(rec.get('multi'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
@ -303,10 +295,10 @@ form: module.record_id""" % (xml_id,)
self.idref[xml_id] = int(id)
if not rec.get('menu') or eval(rec.get('menu','')):
keyword = str(rec.get('keyword','') or 'client_print_multi')
keyword = str(rec.get('keyword', 'client_print_multi'))
keys = [('action',keyword),('res_model',res['model'])]
value = 'ir.actions.report.xml,'+str(id)
replace = rec.get("replace",'') or True
replace = rec.get('replace', True)
self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id)
return False
@ -316,7 +308,7 @@ form: module.record_id""" % (xml_id,)
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return False
return
def _tag_wizard(self, cr, rec, data_node=None):
string = rec.get("string",'').encode('utf8')
@ -349,7 +341,6 @@ form: module.record_id""" % (xml_id,)
value = 'ir.actions.wizard,'+str(id)
replace = rec.get("replace",'') or True
self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, string, [model], value, replace=replace, isobject=True, xml_id=xml_id)
return False
def _tag_url(self, cr, rec, data_node=None):
url = rec.get("string",'').encode('utf8')
@ -369,7 +360,6 @@ form: module.record_id""" % (xml_id,)
value = 'ir.actions.url,'+str(id)
replace = rec.get("replace",'') or True
self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, url, ["ir.actions.url"], value, replace=replace, isobject=True, xml_id=xml_id)
return False
def _tag_act_window(self, cr, rec, data_node=None):
name = rec.get('name','').encode('utf-8')
@ -438,38 +428,37 @@ form: module.record_id""" % (xml_id,)
replace = rec.get('replace','') or True
self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
return False
def _tag_ir_set(self, cr, rec, data_node=None):
if not self.mode=='init':
return False
if self.mode != 'init':
return
res = {}
for field in [i for i in rec.getchildren() if (i.tag=="field")]:
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool.get('ir.model.data').ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
return False
def _tag_workflow(self, cr, rec, data_node=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = str(rec.get('model',''))
w_ref = rec.get('ref','')
if len(w_ref):
if w_ref:
id = self.id_get(cr, model, w_ref)
else:
assert rec.getchildren(), 'You must define a child node if you dont give a ref'
element_childs = [i for i in rec.getchildren()]
assert len(element_childs) == 1, 'Only one child node is accepted (%d given)' % len(rec.getchildren())
id = _eval_xml(self, element_childs[0], self.pool, cr, self.uid, self.idref)
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, model,
id,
str(rec.get('action','')), cr)
return False
#
# Support two types of notation:
@ -509,7 +498,7 @@ form: module.record_id""" % (xml_id,)
menu_parent_id = self.id_get(cr, 'ir.ui.menu', rec.get('parent',''))
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name','')
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, 'ir.ui.menu', rec.get('id','')) ]
except:
@ -557,9 +546,10 @@ form: module.record_id""" % (xml_id,)
if (not values.get('name', False)) and resw:
values['name'] = resw[0]
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence',''))
values['sequence'] = int(rec.get('sequence'))
if rec.get('icon'):
values['icon'] = str(rec.get('icon',''))
values['icon'] = str(rec.get('icon'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
@ -581,7 +571,7 @@ form: module.record_id""" % (xml_id,)
self.idref[rec_id] = int(pid)
if rec.get('action') and pid:
a_action = rec.get('action','').encode('utf8')
a_action = rec.get('action').encode('utf8')
a_type = rec.get('type','').encode('utf8') or 'act_window'
a_id = self.id_get(cr, 'ir.actions.%s' % a_type, a_action)
action = "ir.actions.%s,%d" % (a_type, a_id)
@ -601,7 +591,7 @@ form: module.record_id""" % (xml_id,)
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count",'')
rec_src_count = rec.get("count")
severity = rec.get("severity",'').encode('ascii') or netsvc.LOG_ERROR
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
@ -610,12 +600,12 @@ form: module.record_id""" % (xml_id,)
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if len(rec_id):
if rec_id:
ids = [self.id_get(cr, rec_model, rec_id)]
elif len(rec_src):
elif rec_src:
q = eval(rec_src, eval_dict)
ids = self.pool.get(rec_model).search(cr, uid, q, context=context)
if len(rec_src_count):
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assert_report.record_assertion(False, severity)
@ -631,8 +621,8 @@ form: module.record_id""" % (xml_id,)
raise Exception('Severe assertion failure')
return
assert ids != None, 'You must give either an id or a search criteria'
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
@ -645,7 +635,7 @@ form: module.record_id""" % (xml_id,)
globals['floatEqual'] = self._assert_equals
globals['ref'] = ref
globals['_ref'] = ref
for test in [i for i in rec.getchildren() if (i.tag=="test")]:
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = eval(f_expr, globals)
@ -700,7 +690,7 @@ form: module.record_id""" % (xml_id,)
# otherwise it is skipped
return None
res = {}
for field in [i for i in rec.getchildren() if (i.tag == "field")]:
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name",'').encode('utf-8')
f_ref = field.get("ref",'').encode('ascii')
@ -711,7 +701,7 @@ form: module.record_id""" % (xml_id,)
f_use = field.get("use",'').encode('ascii') or 'id'
f_val = False
if len(f_search):
if f_search:
q = eval(f_search, self.idref)
field = []
assert f_model, 'Define an attribute model="..." in your .XML file !'
@ -727,7 +717,7 @@ form: module.record_id""" % (xml_id,)
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif len(f_ref):
elif f_ref:
if f_ref=="null":
f_val = False
else:
@ -755,9 +745,7 @@ form: module.record_id""" % (xml_id,)
result = self.pool.get('ir.model.data')._get_id(cr, self.uid, mod, id_str)
return int(self.pool.get('ir.model.data').read(cr, self.uid, [result], ['res_id'])[0]['res_id'])
def parse(self, xmlstr):
de = etree.XML(xmlstr)
def parse(self, de):
if not de.tag in ['terp', 'openerp']:
self.logger.notifyChannel("init", netsvc.LOG_ERROR, "Mismatch xml format" )
raise Exception( "Mismatch xml format: only terp or openerp as root tag" )
@ -765,8 +753,8 @@ form: module.record_id""" % (xml_id,)
if de.tag == 'terp':
self.logger.notifyChannel("init", netsvc.LOG_WARNING, "The tag <terp/> is deprecated, use <openerp/>")
for n in [i for i in de.getchildren() if (i.tag=="data")]:
for rec in n.getchildren():
for n in de.findall('./data'):
for rec in n:
if rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, n)
@ -861,12 +849,9 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
# xml import/export
#
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
xmlstr = xmlfile.read()
xmlfile.seek(0)
relaxng_doc = etree.parse(file(os.path.join( config['root_path'], 'import_xml.rng' )))
relaxng = etree.RelaxNG(relaxng_doc)
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception, e:
@ -878,8 +863,7 @@ def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=Fa
if idref is None:
idref={}
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate)
obj.parse(xmlstr)
del obj
obj.parse(doc.getroot())
return True
def convert_xml_export(res):

View File

@ -376,9 +376,9 @@ def trans_export(lang, modules, buffer, format, dbname=None):
def trans_parse_xsl(de):
res = []
for n in [i for i in de.getchildren()]:
for n in de:
if n.get("t"):
for m in [j for j in n.getchildren() if j.text]:
for m in [j for j in n if j.text]:
l = m.text.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
@ -387,8 +387,8 @@ def trans_parse_xsl(de):
def trans_parse_rml(de):
res = []
for n in [i for i in de.getchildren()]:
for m in [j for j in n.getchildren() if j.text]:
for n in de:
for m in [j for j in n if j.text]:
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
@ -399,14 +399,10 @@ def trans_parse_rml(de):
def trans_parse_view(de):
res = []
if de.get("string"):
s = de.get('string')
if s:
res.append(s.encode("utf8"))
res.append(de.get('string').encode("utf8"))
if de.get("sum"):
s = de.get('sum')
if s:
res.append(s.encode("utf8"))
for n in [i for i in de.getchildren()]:
res.append(de.get('sum').encode("utf8"))
for n in de:
res.extend(trans_parse_view(n))
return res

View File

@ -53,7 +53,7 @@ class interface(netsvc.Service):
trans = translate(cr, self.wiz_name+','+state, 'wizard_view', lang, node.get('string').encode('utf8'))
if trans:
node.set('string', trans)
for n in node.getchildren():
for n in node:
self.translate_view(cr, n, state, lang)
def execute_cr(self, cr, uid, data, state='init', context=None):

42
debian/po/bg.po vendored Normal file
View File

@ -0,0 +1,42 @@
# Bulgarian translation for openobject-server
# Copyright (c) 2009 Rosetta Contributors and Canonical Ltd 2009
# This file is distributed under the same license as the openobject-server package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2009.
#
msgid ""
msgstr ""
"Project-Id-Version: openobject-server\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2009-08-24 22:41+0300\n"
"PO-Revision-Date: 2009-12-16 06:46+0000\n"
"Last-Translator: Fabien (Open ERP) <fp@tinyerp.com>\n"
"Language-Team: Bulgarian <bg@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-17 04:43+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid "Dedicated system account for the Open ERP server:"
msgstr "Системен акаунт специално за сървъра на Open ERP:"
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid ""
"The Open ERP server must use a dedicated account for its operation so that "
"the system's security is not compromised by running it with superuser "
"privileges."
msgstr ""
"Сървърът на Open ERP трябва да използва специално създаден акаунт за тази "
"операция,така че да не се допускат компромиси със сигурността на системите, "
"изпълнявайки я с привилегийте на супер потребител."
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid "Please choose that account's username."
msgstr "Моля, изберете потребителско име за този акаунт."

4
debian/po/ca.po vendored
View File

@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: openobject-server\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2009-08-24 22:41+0300\n"
"PO-Revision-Date: 2009-12-13 17:50+0000\n"
"PO-Revision-Date: 2009-12-14 06:11+0000\n"
"Last-Translator: Jordi Esteve - http://www.zikzakmedia.com "
"<jesteve@zikzakmedia.com>\n"
"Language-Team: Catalan <ca@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-14 04:35+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/cs.po vendored
View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/de.po vendored
View File

@ -14,7 +14,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

42
debian/po/el.po vendored Normal file
View File

@ -0,0 +1,42 @@
# Greek translation for openobject-server
# Copyright (c) 2009 Rosetta Contributors and Canonical Ltd 2009
# This file is distributed under the same license as the openobject-server package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2009.
#
msgid ""
msgstr ""
"Project-Id-Version: openobject-server\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2009-08-24 22:41+0300\n"
"PO-Revision-Date: 2009-12-16 06:46+0000\n"
"Last-Translator: Fabien (Open ERP) <fp@tinyerp.com>\n"
"Language-Team: Greek <el@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-17 04:43+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid "Dedicated system account for the Open ERP server:"
msgstr "Αποκλειστικός λογαριασμός του συστήματος για τον server OpenERP."
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid ""
"The Open ERP server must use a dedicated account for its operation so that "
"the system's security is not compromised by running it with superuser "
"privileges."
msgstr ""
"O OpenERP server πρέπει να χρησιμοποιεί έναν αποκλειστικό λογαριασμό για την "
"λειτουργία του, έτσι ώστε να μην κινδυνέψει η ασφάλεια του συστήματος αν "
"τρέχει με δικαιώματα υπερχρήστη."
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid "Please choose that account's username."
msgstr "Παρακαλώ επιλέξτε το username αυτού του λογαριασμού."

4
debian/po/es.po vendored
View File

@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: openerp-server 5.0.1-0-2\n"
"Report-Msgid-Bugs-To: openerp-server@packages.debian.org\n"
"POT-Creation-Date: 2009-08-24 22:41+0300\n"
"PO-Revision-Date: 2009-12-13 17:49+0000\n"
"PO-Revision-Date: 2009-12-14 06:11+0000\n"
"Last-Translator: Jordi Esteve - http://www.zikzakmedia.com "
"<jesteve@zikzakmedia.com>\n"
"Language-Team: Spanish <debian-l10n-spanish@lists.debian.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-12-14 04:35+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/fi.po vendored
View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/fr.po vendored
View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/gl.po vendored
View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/it.po vendored
View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

2
debian/po/ja.po vendored
View File

@ -13,7 +13,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-27 04:51+0000\n"
"X-Launchpad-Export-Date: 2009-12-15 05:03+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string

14
debian/po/nl.po vendored
View File

@ -8,20 +8,20 @@ msgstr ""
"Project-Id-Version: openobject-server\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2009-08-24 22:41+0300\n"
"PO-Revision-Date: 2009-11-27 13:51+0000\n"
"Last-Translator: Fabien (Open ERP) <fp@tinyerp.com>\n"
"PO-Revision-Date: 2009-12-16 08:28+0000\n"
"Last-Translator: Pieter J. Kersten (EduSense BV) <Unknown>\n"
"Language-Team: Dutch <nl@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2009-11-28 04:56+0000\n"
"X-Launchpad-Export-Date: 2009-12-17 04:43+0000\n"
"X-Generator: Launchpad (build Unknown)\n"
#. Type: string
#. Description
#: ../openerp-server.templates:1001
msgid "Dedicated system account for the Open ERP server:"
msgstr "Specifieke gebruikersaccount voor de Open ERP server:"
msgstr "Specifiek gebruikersaccount voor de OpenERP-server:"
#. Type: string
#. Description
@ -31,9 +31,9 @@ msgid ""
"the system's security is not compromised by running it with superuser "
"privileges."
msgstr ""
"De Open ERP server moet een eigen gebruikersaccount voor zijn verrichting "
"gebruiken zodat de server niet kan worden gecompromitteerd door Open ERP met "
"speciale-gebruikersrechten."
"De OpenERP-server dient een eigen systeemaccount te gebruiken, om te "
"voorkomen dat de serverbeveiliging gecompromitteerd kan raken door OpenERP "
"te laten functioneren met beheerder-privileges."
#. Type: string
#. Description

Some files were not shown because too many files have changed in this diff Show More