From 557de246930f0b8eaf4c77c7003369f82dcb4314 Mon Sep 17 00:00:00 2001 From: "Rvo (Open ERP)" Date: Fri, 29 Jan 2010 13:18:00 +0530 Subject: [PATCH] [FIX] Renamed faces and removed print statements in it bzr revid: rvo@tinyerp.co.in-20100129074800-cjtob9irn861g2ff --- addons/resource/faces/__init__.py | 30 + addons/resource/faces/observer.py | 79 + addons/resource/faces/pcalendar.py | 960 +++++++ addons/resource/faces/plocale.py | 54 + addons/resource/faces/resource.py | 866 +++++++ addons/resource/faces/task.py | 3855 ++++++++++++++++++++++++++++ addons/resource/faces/timescale.py | 113 + addons/resource/faces/utils.py | 124 + 8 files changed, 6081 insertions(+) create mode 100644 addons/resource/faces/__init__.py create mode 100644 addons/resource/faces/observer.py create mode 100644 addons/resource/faces/pcalendar.py create mode 100644 addons/resource/faces/plocale.py create mode 100644 addons/resource/faces/resource.py create mode 100755 addons/resource/faces/task.py create mode 100644 addons/resource/faces/timescale.py create mode 100644 addons/resource/faces/utils.py diff --git a/addons/resource/faces/__init__.py b/addons/resource/faces/__init__.py new file mode 100644 index 00000000000..d1d52daf742 --- /dev/null +++ b/addons/resource/faces/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2009 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +from pcalendar import Calendar, WorkingDate, StartDate, EndDate, Minutes + +from task import Project, BalancedProject, AdjustedProject, Task, \ + STRICT, SLOPPY, SMART, Multi, YearlyMax, WeeklyMax, MonthlyMax, \ + DailyMax, VariableLoad + +from resource import Resource + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: diff --git a/addons/resource/faces/observer.py b/addons/resource/faces/observer.py new file mode 100644 index 00000000000..411b2c5395b --- /dev/null +++ b/addons/resource/faces/observer.py @@ -0,0 +1,79 @@ +#@+leo-ver=4 +#@+node:@file observer.py +#@@language python +#@<< Copyright >> +#@+node:<< Copyright >> +############################################################################ +# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +#@-node:<< Copyright >> +#@nl +""" +This module contains the base class for all observer objects +""" +#@<< Imports >> +#@+node:<< Imports >> +#@-node:<< Imports >> +#@nl +_is_source_ = True +#@+others +#@+node:class Observer +class Observer(object): + """ + Base Class for all charts and reports. + + @var visible: Specifies if the observer is visible + at the navigation bar inside the gui. + + @var link_view: syncronizes the marked objects in all views. + + """ + #@ << declarations >> + #@+node:<< declarations >> + __type_name__ = None + __type_image__ = None + visible = True + link_view = True + + __attrib_completions__ = { "visible" : 'visible = False', + "link_view" : "link_view = False" } + + + #@-node:<< declarations >> + #@nl + + #@ @+others + #@+node:register_editors + def register_editors(cls, registry): + pass + + register_editors = classmethod(register_editors) + + #@-node:register_editors + #@-others + +#@-node:class Observer +#@-others +factories = { } +clear_cache_funcs = {} +#@-node:@file observer.py +#@-leo diff --git a/addons/resource/faces/pcalendar.py b/addons/resource/faces/pcalendar.py new file mode 100644 index 00000000000..b894a2b66a7 --- /dev/null +++ b/addons/resource/faces/pcalendar.py @@ -0,0 +1,960 @@ +#@+leo-ver=4 +#@+node:@file pcalendar.py +#@@language python +#@<< Copyright >> +#@+node:<< Copyright >> +############################################################################ +# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +#@-node:<< Copyright >> +#@nl +""" +This module contains all classes and functions for the project plan calendar +""" +#@<< Imports >> +#@+node:<< Imports >> +from string import * +import datetime +import time +import re +import locale +import bisect +import sys + +TIME_RANGE_PATTERN = re.compile("(\\d+):(\\d+)\\s*-\\s*(\\d+):(\\d+)") +TIME_DELTA_PATTERN = re.compile("([-+]?\\d+(\\.\\d+)?)([dwmyMH])") + +DEFAULT_MINIMUM_TIME_UNIT = 15 +DEFAULT_WORKING_DAYS_PER_WEEK = 5 +DEFAULT_WORKING_DAYS_PER_MONTH = 20 +DEFAULT_WORKING_DAYS_PER_YEAR = 200 +DEFAULT_WORKING_HOURS_PER_DAY = 8 + +DEFAULT_WORKING_TIMES = ( (8 * 60, 12 * 60 ), + (13 * 60, 17 * 60 ) ) +DEFAULT_WORKING_DAYS = { 0 : DEFAULT_WORKING_TIMES, + 1 : DEFAULT_WORKING_TIMES, + 2 : DEFAULT_WORKING_TIMES, + 3 : DEFAULT_WORKING_TIMES, + 4 : DEFAULT_WORKING_TIMES, + 5 : (), + 6 : () } + +#@-node:<< Imports >> +#@nl +#@+others +#@+node:to_time_range +def to_time_range(src): + """ + converts a string to a timerange, i.e + (from, to) + from, to are ints, specifing the minutes since midnight + """ + + if not src: return () + + mo = TIME_RANGE_PATTERN.match(src) + if not mo: + raise ValueError("%s is no time range" % src) + + from_time = int(mo.group(1)) * 60 + int(mo.group(2)) + to_time = int(mo.group(3)) * 60 + int(mo.group(4)) + return from_time, to_time +#@-node:to_time_range +#@+node:to_datetime +def to_datetime(src): + """ + a tolerant conversion function to convert different strings + to a datetime.dateime + """ + + #to get the original value for wrappers + new = getattr(src, "_value", src) + while new is not src: + src = new + new = getattr(src, "_value", src) + + if isinstance(src, _WorkingDateBase): + src = src.to_datetime() + + if isinstance(src, datetime.datetime): + return src + + src = str(src) + + formats = [ "%x %H:%M", + "%x", + "%Y-%m-%d %H:%M", + "%y-%m-%d %H:%M", + "%d.%m.%Y %H:%M", + "%d.%m.%y %H:%M", + "%Y%m%d %H:%M", + "%d/%m/%y %H:%M", + "%d/%m/%Y %H:%M", + "%d/%m/%Y", + "%d/%m/%y", + "%Y-%m-%d", + "%y-%m-%d", + "%d.%m.%Y", + "%d.%m.%y", + "%Y%m%d" ] + for f in formats: + try: + conv = time.strptime(src, f) + + return datetime.datetime(*conv[0:-3]) + except Exception, e: + pass + + raise TypeError("'%s' (%s) is not a datetime" % (src, str(type(src)))) +#@-node: +#@+node:_to_days +def _to_days(src): + """ + converts a string of the day abreviations mon, tue, wed, + thu, fri, sat, sun to a dir with correct weekday indices. + For Example + convert_to_days('mon, tue, thu') results in + { 0:1, 1:1, 3:1 } + """ + + tokens = src.split(",") + result = { } + for t in tokens: + try: + index = { "mon" : 0, + "tue" : 1, + "wed" : 2, + "thu" : 3, + "fri" : 4, + "sat" : 5, + "sun" : 6 } [ lower(t.strip()) ] + result[index] = 1 + except: + raise ValueError("%s is not a day" % (t)) + + return result +#@-node:_to_days +#@+node:_add_to_time_spans +def _add_to_time_spans(src, to_add, is_free): + if not isinstance(to_add, (tuple, list)): + to_add = (to_add,) + + tmp = [] + for start, end, f in src: + tmp.append((start, True, f)) + tmp.append((end, False, f)) + + for v in to_add: + if isinstance(v, (tuple, list)): + start = to_datetime(v[0]) + end = to_datetime(v[1]) + else: + start = to_datetime(v) + end = start.replace(hour=0, minute=0) + datetime.timedelta(1) + + tmp.append((start, start <= end, is_free)) + tmp.append((end, start > end, is_free)) + + tmp.sort() + + # 0: date + # 1: is_start + # 2: is_free + sequence = [] + free_count = 0 + work_count = 0 + last = None + for date, is_start, is_free in tmp: + if is_start: + if is_free: + if not free_count and not work_count: + last = date + + free_count += 1 + else: + if not work_count: + if free_count: sequence.append((last, date, True)) + last = date + work_count += 1 + else: + if is_free: + assert(free_count > 0) + free_count -= 1 + if not free_count and not work_count: + sequence.append((last, date, True)) + else: + assert(work_count > 0) + work_count -= 1 + if not work_count: sequence.append((last, date, False)) + if free_count: last = date + + return tuple(sequence) +#@-node:_add_to_time_spans +#@+node:to_timedelta +def to_timedelta(src, cal=None, is_duration=False): + """ + converts a string to a datetime.timedelta. If cal is specified + it will be used for getting the working times. if is_duration=True + working times will not be considered. Valid units are + d for Days + w for Weeks + m for Months + y for Years + H for Hours + M for Minutes + """ + + cal = cal or _default_calendar + if isinstance(src, datetime.timedelta): + return datetime.timedelta(src.days, seconds=src.seconds, calendar=cal) + + if isinstance(src, (long, int, float)): + src = "%sM" % str(src) + + if not isinstance(src, basestring): + raise ValueError("%s is not a duration" % (repr(src))) + + src = src.strip() + + if is_duration: + d_p_w = 7 + d_p_m = 30 + d_p_y = 360 + d_w_h = 24 + else: + d_p_w = cal.working_days_per_week + d_p_m = cal.working_days_per_month + d_p_y = cal.working_days_per_year + d_w_h = cal.working_hours_per_day + + def convert_minutes(minutes): + minutes = int(minutes) + hours = minutes / 60 + minutes = minutes % 60 + days = hours / d_w_h + hours = hours % d_w_h + return [ days, 0, 0, 0, minutes, hours ] + + def convert_days(value): + days = int(value) + value -= days + value *= d_w_h + hours = int(value) + value -= hours + value *= 60 + minutes = round(value) + return [ days, 0, 0, 0, minutes, hours ] + + sum_args = [ 0, 0, 0, 0, 0, 0 ] + + split = src.split(" ") + for s in split: + mo = TIME_DELTA_PATTERN.match(s) + if not mo: + raise ValueError(src + + " is not a valid duration: valid" + " units are: d w m y M H") + + unit = mo.group(3) + val = float(mo.group(1)) + + if unit == 'd': + args = convert_days(val) + elif unit == 'w': + args = convert_days(val * d_p_w) + elif unit == 'm': + args = convert_days(val * d_p_m) + elif unit == 'y': + args = convert_days(val * d_p_y) + elif unit == 'M': + args = convert_minutes(val) + elif unit == 'H': + args = convert_minutes(val * 60) + + sum_args = [ a + b for a, b in zip(sum_args, args) ] + + sum_args = tuple(sum_args) + return datetime.timedelta(*sum_args) +#@-node:to_timedelta +#@+node:timedelta_to_str +def timedelta_to_str(delta, format, cal=None, is_duration=False): + cal = cal or _default_calendar + if is_duration: + d_p_w = 7 + d_p_m = 30 + d_p_y = 365 + d_w_h = 24 + else: + d_p_w = cal.working_days_per_week + d_p_m = cal.working_days_per_month + d_p_y = cal.working_days_per_year + d_w_h = cal.working_hours_per_day + + has_years = format.find("%y") > -1 + has_minutes = format.find("%M") > -1 + has_hours = format.find("%H") > -1 or has_minutes + has_days = format.find("%d") > -1 + has_weeks = format.find("%w") > -1 + has_months = format.find("%m") > -1 + + result = format + days = delta.days + + d_r = (days, format) + minutes = delta.seconds / 60 + + def rebase(d_r, cond1, cond2, letter, divisor): + #rebase the days + if not cond1: return d_r + + days, result = d_r + + if cond2: + val = days / divisor + if not val: + result = re.sub("{[^{]*?%" + letter + "[^}]*?}", "", result) + + result = result.replace("%" + letter, str(val)) + days %= divisor + else: + result = result.replace("%" + letter, + locale.format("%.2f", + (float(days) / divisor))) + + return (days, result) + + d_r = rebase(d_r, has_years, has_months or has_weeks or has_days, "y", d_p_y) + d_r = rebase(d_r, has_months, has_weeks or has_days, "m", d_p_m) + d_r = rebase(d_r, has_weeks, has_days, "w", d_p_w) + days, result = d_r + + if not has_days: + minutes += days * d_w_h * 60 + days = 0 + + if has_hours: + if not days: + result = re.sub("{[^{]*?%d[^}]*?}", "", result) + + result = result.replace("%d", str(days)) + else: + result = result.replace("%d", + "%.2f" % (days + float(minutes) + / (d_w_h * 60))) + + if has_hours: + if has_minutes: + val = minutes / 60 + if not val: + result = re.sub("{[^{]*?%H[^}]*?}", "", result) + + result = result.replace("%H", str(val)) + minutes %= 60 + else: + result = result.replace("%H", "%.2f" % (float(minutes) / 60)) + + if not minutes: + result = re.sub("{[^{]*?%M[^}]*?}", "", result) + + result = result.replace("%M", str(minutes)) + + result = result.replace("{", "") + result = result.replace("}", "") + return result.strip() +#@-node:timedelta_to_str +#@+node:strftime +def strftime(dt, format): + """ + an extended version of strftime, that introduces some new + directives: + %IW iso week number + %IY iso year + %IB full month name appropriate to iso week + %ib abbreviated month name appropriate to iso week + %im month as decimal number appropriate to iso week + """ + iso = dt.isocalendar() + if iso[0] != dt.year: + iso_date = dt.replace(day=1, month=1) + format = format \ + .replace("%IB", iso_date.strftime("%B"))\ + .replace("%ib", iso_date.strftime("%b"))\ + .replace("%im", iso_date.strftime("%m")) + else: + format = format \ + .replace("%IB", "%B")\ + .replace("%ib", "%b")\ + .replace("%im", "%m") + + format = format \ + .replace("%IW", str(iso[1]))\ + .replace("%IY", str(iso[0]))\ + + return dt.strftime(format) +#@-node:strftime +#@+node:union +def union(*calendars): + """ + returns a calendar that unifies all working times + """ + #@ << check arguments >> + #@+node:<< check arguments >> + if len(calendars) == 1: + calendars = calendars[0] + #@nonl + #@-node:<< check arguments >> + #@nl + #@ << intersect vacations >> + #@+node:<< intersect vacations >> + free_time = [] + for c in calendars: + for start, end, is_free in c.time_spans: + if is_free: + free_time.append((start, False)) + free_time.append((end, True)) + + count = len(calendars) + open = 0 + time_spans = [] + free_time.sort() + for date, is_end in free_time: + if is_end: + if open == count: + time_spans.append((start, date, True)) + open -= 1 + else: + open += 1 + start = date + #@-node:<< intersect vacations >> + #@nl + #@ << unify extra worktime >> + #@+node:<< unify extra worktime >> + for c in calendars: + for start, end, is_free in c.time_spans: + if not is_free: + time_spans = _add_to_time_spans(time_spans, start, end) + #@nonl + #@-node:<< unify extra worktime >> + #@nl + #@ << unify working times >> + #@+node:<< unify working times >> + working_times = {} + for d in range(0, 7): + times = [] + for c in calendars: + for start, end in c.working_times.get(d, []): + times.append((start, False)) + times.append((end, True)) + + times.sort() + open = 0 + ti = [] + start = None + for time, is_end in times: + if not is_end: + if not start: start = time + open += 1 + else: + open -= 1 + if not open: + ti.append((start, time)) + start = None + + if ti: + working_times[d] = ti + #@-node:<< unify working times >> + #@nl + #@ << create result calendar >> + #@+node:<< create result calendar >> + result = Calendar() + result.working_times = working_times + result.time_spans = time_spans + result._recalc_working_time() + result._build_mapping() + #@nonl + #@-node:<< create result calendar >> + #@nl + return result +#@nonl +#@-node:union +#@+node:class _CalendarItem +class _CalendarItem(int): + #@ << class _CalendarItem declarations >> + #@+node:<< class _CalendarItem declarations >> + __slots__ = () + calender = None + + + #@-node:<< class _CalendarItem declarations >> + #@nl + #@ @+others + #@+node:__new__ + def __new__(cls, val): + try: + return int.__new__(cls, val) + except OverflowError: + return int.__new__(cls, sys.maxint) + #@-node:__new__ + #@+node:round + def round(self, round_up=True): + m_t_u = self.calendar.minimum_time_unit + + minutes = int(self) + base = (minutes / m_t_u) * m_t_u + minutes %= m_t_u + + round_up = round_up and minutes > 0 or minutes > m_t_u / 2 + if round_up: base += m_t_u + return self.__class__(base) + #@-node:round + #@-others +#@-node:class _CalendarItem +#@+node:class _Minutes +class _Minutes(_CalendarItem): + #@ << class _Minutes declarations >> + #@+node:<< class _Minutes declarations >> + __slots__ = () + STR_FORMAT = "{%dd}{ %HH}{ %MM}" + + + #@-node:<< class _Minutes declarations >> + #@nl + #@ @+others + #@+node:__new__ + def __new__(cls, src=0, is_duration=False): + """ + converts a timedelta in working minutes. + """ + if isinstance(src, cls) or type(src) is int: + return _CalendarItem.__new__(cls, src) + + cal = cls.calendar + if not isinstance(src, datetime.timedelta): + src = to_timedelta(src, cal, is_duration) + + d_w_h = is_duration and 24 or cal.working_hours_per_day + src = src.days * d_w_h * 60 + src.seconds / 60 + return _CalendarItem.__new__(cls, src) + #@-node:__new__ + #@+node:__cmp__ + def __cmp__(self, other): + return cmp(int(self), int(self.__class__(other))) + #@-node:__cmp__ + #@+node:__add__ + def __add__(self, other): + try: + return self.__class__(int(self) + int(self.__class__(other))) + except: + return NotImplemented + #@-node:__add__ + #@+node:__sub__ + def __sub__(self, other): + try: + return self.__class__(int(self) - int(self.__class__(other))) + except: + return NotImplemented + #@-node:__sub__ + #@+node:to_timedelta + def to_timedelta(self, is_duration=False): + d_w_h = is_duration and 24 or self.calendar.working_hours_per_day + minutes = int(self) + hours = minutes / 60 + minutes = minutes % 60 + days = hours / d_w_h + hours = hours % d_w_h + return datetime.timedelta(days, hours=hours, minutes=minutes) + #@nonl + #@-node:to_timedelta + #@+node:strftime + def strftime(self, format=None, is_duration=False): + td = self.to_timedelta(is_duration) + return timedelta_to_str(td, format or self.STR_FORMAT, + self.calendar, is_duration) + #@nonl + #@-node:strftime + #@-others +#@-node:class _Minutes +#@+node:class _WorkingDateBase +class _WorkingDateBase(_CalendarItem): + """ + A daytetime which has only valid values within the + workingtimes of a specific calendar + """ + #@ << class _WorkingDateBase declarations >> + #@+node:<< class _WorkingDateBase declarations >> + timetuple = True + STR_FORMAT = "%x %H:%M" + _minutes = _Minutes + __slots__ = () + + + #@-node:<< class _WorkingDateBase declarations >> + #@nl + #@ @+others + #@+node:__new__ + def __new__(cls, src): + #cls.__bases__[0] is the base of + #the calendar specific StartDate and EndDate + + if isinstance(src, cls.__bases__[0]) or type(src) in (int, float): + return _CalendarItem.__new__(cls, src) + + + src = cls.calendar.from_datetime(to_datetime(src)) + return _CalendarItem.__new__(cls, src) + #@-node:__new__ + #@+node:__repr__ + def __repr__(self): + return self.strftime() + #@-node:__repr__ + #@+node:to_datetime + def to_datetime(self): + return self.to_starttime() + #@-node:to_datetime + #@+node:to_starttime + def to_starttime(self): + return self.calendar.to_starttime(self) + #@-node:to_starttime + #@+node:to_endtime + def to_endtime(self): + return self.calendar.to_endtime(self) + #@-node:to_endtime + #@+node:__cmp__ + def __cmp__(self, other): + return cmp(int(self), int(self.__class__(other))) + #@-node:__cmp__ + #@+node:__add__ + def __add__(self, other): + try: + return self.__class__(int(self) + int(self._minutes(other))) + except ValueError, e: + raise e + except: + return NotImplemented + #@-node:__add__ + #@+node:__sub__ + def __sub__(self, other): + if isinstance(other, (datetime.timedelta, str, _Minutes)): + try: + other = self._minutes(other) + except: + pass + + if isinstance(other, self._minutes): + return self.__class__(int(self) - int(other)) + + try: + return self._minutes(int(self) - int(self.__class__(other))) + except: + return NotImplemented + #@-node:__sub__ + #@+node:strftime + def strftime(self, format=None): + return strftime(self.to_datetime(), format or self.STR_FORMAT) + #@-node:strftime + #@-others +#@-node:class _WorkingDateBase +#@+node:class Calendar +class Calendar(object): + """ + A calendar to specify working times and vacations. + The calendars epoch start at 1.1.1979 + """ + #@ << declarations >> + #@+node:<< declarations >> + # january the first must be a monday + EPOCH = datetime.datetime(1979, 1, 1) + minimum_time_unit = DEFAULT_MINIMUM_TIME_UNIT + working_days_per_week = DEFAULT_WORKING_DAYS_PER_WEEK + working_days_per_month = DEFAULT_WORKING_DAYS_PER_MONTH + working_days_per_year = DEFAULT_WORKING_DAYS_PER_YEAR + working_hours_per_day = DEFAULT_WORKING_HOURS_PER_DAY + now = EPOCH + + + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self): + + self.time_spans = () + self._dt_num_can = () + self._num_dt_can = () + self.working_times = { } + self._recalc_working_time() + self._make_classes() + #@-node:__init__ + #@+node:__or__ + def __or__(self, other): + if isinstance(other, Calendar): + return union(self, other) + + return NotImplemented + #@nonl + #@-node:__or__ + #@+node:clone + def clone(self): + result = Calendar() + result.working_times = self.working_times.copy() + result.time_spans = self.time_spans + result._recalc_working_time() + result._build_mapping() + return result + #@nonl + #@-node:clone + #@+node:set_working_days + def set_working_days(self, day_range, trange, *further_tranges): + """ + Sets the working days of an calendar + day_range is a string of day abbreviations like 'mon, tue' + trange and further_tranges is a time range string like + '8:00-10:00' + """ + time_ranges = [ trange ] + list(further_tranges) + time_ranges = filter(bool, map(to_time_range, time_ranges)) + days = _to_days(day_range) + + for k in days.keys(): + self.working_times[k] = time_ranges + + self._recalc_working_time() + self._build_mapping() + #@-node:set_working_days + #@+node:set_vacation + def set_vacation(self, value): + """ + Sets vacation time. + value is either a datetime literal or + a sequence of items that can be + a datetime literals and or pair of datetime literals + """ + self.time_spans = _add_to_time_spans(self.time_spans, value, True) + self._build_mapping() + #@-node:set_vacation + #@+node:set_extra_work + def set_extra_work(self, value): + """ + Sets extra working time + value is either a datetime literal or + a sequence of items that can be + a datetime literals and or pair of datetime literals + """ + self.time_spans = _add_to_time_spans(self.time_spans, value, False) + self._build_mapping() + #@-node:set_extra_work + #@+node:from_datetime + def from_datetime(self, value): + assert(isinstance(value, datetime.datetime)) + delta = value - self.EPOCH + days = delta.days + minutes = delta.seconds / 60 + +# calculate the weektime + weeks = days / 7 + wtime = self.week_time * weeks + +# calculate the daytime + days %= 7 + dtime = sum(self.day_times[:days]) + +# calculate the minute time + slots = self.working_times.get(days, DEFAULT_WORKING_DAYS[days]) + mtime = 0 + for start, end in slots: + if minutes > end: + mtime += end - start + else: + if minutes > start: + mtime += minutes - start + break + + result = wtime + dtime + mtime + +# map exceptional timespans + dt_num_can = self._dt_num_can + pos = bisect.bisect(dt_num_can, (value,)) - 1 + if pos >= 0: + start, end, nstart, nend, cend = dt_num_can[pos] + if value < end: + if nstart < nend: + delta = value - start + delta = delta.days * 24 * 60 + delta.seconds / 60 + result = nstart + delta + else: + result = nstart + else: + result += (nend - cend) # == (result - cend) + nend + + return result + #@-node:from_datetime + #@+node:split_time + def split_time(self, value): + #map exceptional timespans + num_dt_can = self._num_dt_can + pos = bisect.bisect(num_dt_can, (value, sys.maxint)) - 1 + if pos >= 0: + nstart, nend, start, end, cend = num_dt_can[pos] + if value < nend: + value = start + datetime.timedelta(minutes=value - nstart) + delta = value - self.EPOCH + return delta.days / 7, delta.days % 7, delta.seconds / 60, -1 + else: + value += (cend - nend) # (value - nend + cend) + #calculate the weeks since the epoch + + weeks = value / self.week_time + value %= self.week_time + + #calculate the remaining days + days = 0 + for day_time in self.day_times: + if value < day_time: break + value -= day_time + days += 1 + + #calculate the remaining minutes + minutes = 0 + slots = self.working_times.get(days, DEFAULT_WORKING_DAYS[days]) + index = 0 + for start, end in slots: + delta = end - start + if delta > value: + minutes = start + value + break + else: + value -= delta + + index += 1 + + return weeks, days, minutes, index + #@-node:split_time + #@+node:to_starttime + def to_starttime(self, value): + weeks, days, minutes, index = self.split_time(value) + return self.EPOCH + datetime.timedelta(weeks=weeks, + days=days, + minutes=minutes) + #@-node:to_starttime + #@+node:to_endtime + def to_endtime(self, value): + return self.to_starttime(value - 1) + datetime.timedelta(minutes=1) + #@-node:to_endtime + #@+node:get_working_times + def get_working_times(self, day): + return self.working_times.get(day, DEFAULT_WORKING_DAYS[day]) + #@-node:get_working_times + #@+node:_build_mapping + def _build_mapping(self): + self._dt_num_can = self._num_dt_can = () + dt_num_can = [] + num_dt_can = [] + + delta = self.Minutes() + for start, end, is_free in self.time_spans: + cstart = self.StartDate(start) + cend = self.EndDate(end) + nstart = cstart + delta + + if not is_free: + d = end - start + d = d.days * 24 * 60 + d.seconds / 60 + nend = nstart + d + else: + nend = nstart + + delta += (nend - nstart) - (cend - cstart) + dt_num_can.append((start, end, nstart, nend, cend)) + num_dt_can.append((nstart, nend, start, end, cend)) + + self._dt_num_can = tuple(dt_num_can) + self._num_dt_can = tuple(num_dt_can) + + #@-node:_build_mapping + #@+node:_recalc_working_time + def _recalc_working_time(self): + def slot_sum_time(day): + slots = self.working_times.get(day, DEFAULT_WORKING_DAYS[day]) + return sum(map(lambda slot: slot[1] - slot[0], slots)) + + self.day_times = map(slot_sum_time, range(0, 7)) + self.week_time = sum(self.day_times) + + #@-node:_recalc_working_time + #@+node:_make_classes + def _make_classes(self): + #ensure that the clases are instance specific + class minutes(_Minutes): + calendar = self + __slots__ = () + + class db(_WorkingDateBase): + calendar = self + _minutes = minutes + __slots__ = () + + class wdt(db): __slots__ = () + class edt(db): + __slots__ = () + + def to_datetime(self): + return self.to_endtime() + + self.Minutes, self.StartDate, self.EndDate = minutes, wdt, edt + + self.WorkingDate = self.StartDate + + #@-node:_make_classes + #@-others + + +_default_calendar = Calendar() + +WorkingDate = _default_calendar.WorkingDate +StartDate = _default_calendar.StartDate +EndDate = _default_calendar.EndDate +Minutes = _default_calendar.Minutes +#@-node:class Calendar +#@-others + +if __name__ == '__main__': + cal = Calendar() + + start = EndDate("10.1.2005") + print "start", start.strftime(), type(start) + + delay = Minutes("4H") + print "delay", delay, delay.strftime() + + print "Start", cal.StartDate is StartDate + print "base", cal.StartDate.__bases__[0] == StartDate.__bases__[0] + print "type", type(start) + + print "convert start" + start2 = cal.StartDate(start) + print "convert end" + + start3 = cal.StartDate("10.1.2005") + print "start2", start2.strftime(), type(start2) +#@-node:@file pcalendar.py +#@-leo diff --git a/addons/resource/faces/plocale.py b/addons/resource/faces/plocale.py new file mode 100644 index 00000000000..3dcd35c7cae --- /dev/null +++ b/addons/resource/faces/plocale.py @@ -0,0 +1,54 @@ +############################################################################ +# Copyright (C) 2005 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +import gettext +import os.path +import locale + + +def _get_translation(): + try: + return gettext.translation("faces") + except: + try: + if sys.frozen: + path = os.path.dirname(sys.argv[0]) + path = os.path.join(path, "resources", "faces", "locale") + else: + path = os.path.split(__file__)[0] + path = os.path.join(path, "locale") + + return gettext.translation("faces", path) + except Exception, e: + return None + +def get_gettext(): + trans = _get_translation() + if trans: return trans.ugettext + return lambda msg: msg + + +def get_encoding(): + trans = _get_translation() + if trans: return trans.charset() + return locale.getpreferredencoding() + diff --git a/addons/resource/faces/resource.py b/addons/resource/faces/resource.py new file mode 100644 index 00000000000..7cc2741fc8e --- /dev/null +++ b/addons/resource/faces/resource.py @@ -0,0 +1,866 @@ +#@+leo-ver=4 +#@+node:@file resource.py +#@@language python +#@<< Copyright >> +#@+node:<< Copyright >> +############################################################################ +# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +#@-node:<< Copyright >> +#@nl +#@<< Imports >> +#@+node:<< Imports >> +import pcalendar +import datetime +import utils +import string +import bisect +import plocale +#@-node:<< Imports >> +#@nl + +_is_source = True +_to_datetime = pcalendar.to_datetime +_ = plocale.get_gettext() + +#@+others +#@+node:_isattrib +#@+doc +#@nonl +# is used to find snapshot attributes +#@-doc +#@@code +def _isattrib(obj, a): + return a[0] != "_" \ + and not callable(getattr(obj, a)) \ + and not a.endswith("_members") \ + and a not in ("name") +#@-node:_isattrib +#@+node:class ResourceCalendar +class ResourceCalendar(object): + """ + The resource calendar saves the load time of a resource. + Is ia sequence of time intervals of loads. An example of + such a sequence is: + [ (datetime.min, 0), + (2006/1/1, 1.0), + (2006/1/10, 0.5), + (2006/1/15, 0) ] + + That means the resource: + is free till january the first 2006 + is fully booked from january the first to january 10th + is half booked from january 10th to january 15th + is free since january 15th + """ + + #@ @+others + #@+node:__init__ + def __init__(self, src=None): + if src: + self.bookings = list(src.bookings) + else: + self.bookings = [ (datetime.datetime.min, 0) ] + #@-node:__init__ + #@+node:__str__ + def __str__(self): + return str(self.bookings) + #@-node:__str__ + #@+node:__repr__ + def __repr__(self): + return "" % (str(self)) + #@-node:__repr__ + #@+node:add_load + def add_load(self, start, end, load): + start = _to_datetime(start) + end = _to_datetime(end) + + bookings = self.bookings + + # the load will be converted in an integer to avoid + # rouning problems + load = int(load * 10000) + + start_item = (start, 0) + start_pos = bisect.bisect_left(bookings, start_item) + + left_load = 0 + left_load = bookings[start_pos - 1][1] + + if start_pos < len(bookings) and bookings[start_pos][0] == start: + prev_load = bookings[start_pos][1] + if prev_load + load == left_load: + del bookings[start_pos] + else: + bookings[start_pos] = (start, prev_load + load) + start_pos += 1 + else: + bookings.insert(start_pos, (start, load + left_load)) + start_pos += 1 + + item = (datetime.datetime.min, 0) + for i in range(start_pos, len(bookings)): + end_pos = i + item = bookings[i] + if item[0] >= end: break + bookings[i] = (item[0], item[1] + load) + else: + end_pos = len(bookings) + + left_load = bookings[end_pos - 1][1] + if item[0] == end: + if item[1] == left_load: + del bookings[end_pos] + else: + bookings.insert(end_pos, (end, left_load - load)) + #@-node:add_load + #@+node:end_of_booking_interval + def end_of_booking_interval(self, date): + date = _to_datetime(date) + bookings = self.bookings + date_item = (date, 999999) + date_pos = bisect.bisect_left(bookings, date_item) - 1 + next_date = datetime.datetime.max + load = 0 + + try: + book_item = bookings[date_pos] + load = bookings[date_pos][1] / 10000.0 + next_date = bookings[date_pos + 1][0] + except: + pass + + return next_date, load + #@-node:end_of_booking_interval + #@+node:find_free_time + def find_free_time(self, start, length, load, max_load): + bookings = self.bookings + + if isinstance(start, datetime.datetime): + adjust_date = _to_datetime + else: + adjust_date = start.calendar.EndDate + + start = _to_datetime(start) + load = int(load * 10000) + max_load = int(max_load * 10000) + lb = len(bookings) + + def next_possible(index): + while index < lb: + sd, lo = bookings[index] + if lo + load <= max_load: + break + + index += 1 + + sd = adjust_date(max(start, sd)) + ed = sd + length + end = _to_datetime(ed) + + index += 1 + while index < lb: + date, lo = bookings[index] + + if date >= end: + #I found a good start date + return None, sd + + if lo + load > max_load: + return index + 1, None + + index += 1 + + return None, sd + + start_item = (start, 1000000) + i = bisect.bisect_left(bookings, start_item) - 1 + + next_start = None + while not next_start and i < lb: + i, next_start = next_possible(i) + + assert(next_start is not None) + return next_start + #@-node:find_free_time + #@+node:get_bookings + def get_bookings(self, start, end): + start = _to_datetime(start) + end = _to_datetime(end) + bookings = self.bookings + start_item = (start, 0) + start_pos = bisect.bisect_left(bookings, start_item) + if start_pos >= len(bookings) or bookings[start_pos][0] > start: + start_pos -= 1 + + end_item = (end, 0) + end_pos = bisect.bisect_left(bookings, end_item) + return start_pos, end_pos, bookings + #@-node:get_bookings + #@+node:get_load + def get_load(self, date): + date = _to_datetime(date) + bookings = self.bookings + item = (date, 100000) + pos = bisect.bisect_left(bookings, item) - 1 + return bookings[pos][1] / 10000.0 + #@-node:get_load + #@-others +#@-node:class ResourceCalendar +#@+node:class _ResourceBase +class _ResourceBase(object): + pass + +#@-node:class _ResourceBase +#@+node:class _MetaResource +class _MetaResource(type): + doc_template = """ + A resource class. The resources default attributes can + be changed when the class ist instanciated, i.e. + %(name)s(max_load=2.0) + + @var max_load: + Specify the maximal allowed load sum of all simultaneously + allocated tasks of a resource. A ME{max_load} of 1.0 (default) + means the resource may be fully allocated. A ME{max_load} of 1.3 + means the resource may be allocated with 30%% overtime. + + @var title: + Specifies an alternative more descriptive name for the task. + + @var efficiency: + The efficiency of a resource can be used for two purposes. First + you can use it as a crude way to model a team. A team of 5 people + should have an efficiency of 5.0. Keep in mind that you cannot + track the member of the team individually if you use this + feature. The other use is to model performance variations between + your resources. + + @var vacation: + Specifies the vacation of the resource. This attribute is + specified as a list of date literals or date literal intervals. + Be aware that the end of an interval is excluded, i.e. it is + the first working date. + """ + + #@ @+others + #@+node:__init__ + def __init__(self, name, bases, dict_): + super(_MetaResource, self).__init__(name, bases, dict_) + self.name = name + self.title = dict_.get("title", name) + self._calendar = { None: ResourceCalendar() } + self._tasks = { } + self.__set_vacation() + self.__add_resource(bases[0]) + self.__doc__ = dict_.get("__doc__", self.doc_template) % locals() + #@-node:__init__ + #@+node:__or__ + def __or__(self, other): + return self().__or__(other) + #@-node:__or__ + #@+node:__and__ + def __and__(self, other): + return self().__and__(other) + #@-node:__and__ + #@+node:__cmp__ + def __cmp__(self, other): + return cmp(self.name, getattr(other, "name", None)) + #@-node:__cmp__ + #@+node:__repr__ + def __repr__(self): + return "" % self.name + #@-node:__repr__ + #@+node:__str__ + def __str__(self): + return repr(self) + #@-node:__str__ + #@+node:__set_vacation + def __set_vacation(self): + vacation = self.vacation + + if isinstance(vacation, (tuple, list)): + for v in vacation: + if isinstance(v, (tuple, list)): + self.add_vacation(v[0], v[1]) + else: + self.add_vacation(v) + else: + self.add_vacation(vacation) + #@-node:__set_vacation + #@+node:__add_resource + def __add_resource(self, base): + if issubclass(base, _ResourceBase): + members = getattr(base, base.__name__ + "_members", []) + members.append(self) + setattr(base, base.__name__ + "_members", members) + #@-node:__add_resource + #@+node:get_members + def get_members(self): + return getattr(self, self.__name__ + "_members", []) + #@-node:get_members + #@+node:add_vacation + def add_vacation(self, start, end=None): + start_date = _to_datetime(start) + + if not end: + end_date = start_date.replace(hour=23, minute=59) + else: + end_date = _to_datetime(end) + + for cal in self._calendar.itervalues(): + cal.add_load(start_date, end_date, 1) + + tp = Booking() + tp.start = start_date + tp.end = end_date + tp.book_start = start_date + tp.book_end = end_date + tp.work_time = end_date - start_date + tp.load = 1.0 + tp.name = tp.title = _("(vacation)") + tp._id = "" + self._tasks.setdefault("", []).append(tp) + #@-node:add_vacation + #@+node:calendar + def calendar(self, scenario): + try: + return self._calendar[scenario] + except KeyError: + cal = self._calendar[scenario] = ResourceCalendar(self._calendar[None]) + return cal + + #@-node:calendar + #@-others +#@-node:class _MetaResource +#@+node:make_team +def make_team(resource): + members = resource.get_members() + if not members: + return resource + + result = make_team(members[0]) + for r in members[1:]: + result = result & make_team(r) + + return result +#@-node:make_team +#@+node:class Booking +class Booking(object): + """ + A booking unit for a task. + """ + #@ << declarations >> + #@+node:<< declarations >> + book_start = datetime.datetime.min + book_end = datetime.datetime.max + actual = False + _id = "" + + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, task=None): + self.__task = task + #@-node:__init__ + #@+node:__cmp__ + def __cmp__(self, other): + return cmp(self._id, other._id) + #@-node:__cmp__ + #@+node:path + def path(self): + first_dot = self._id.find(".") + return "root" + self._id[first_dot:] + + path = property(path) + #@nonl + #@-node:path + #@+node:_idendity_ + def _idendity_(self): + return self._id + #@-node:_idendity_ + #@+node:__getattr__ + def __getattr__(self, name): + if self.__task: + return getattr(self.__task, name) + + raise AttributeError("'%s' is not a valid attribute" % (name)) + #@-node:__getattr__ + #@-others +#@-node:class Booking +#@+node:class ResourceList +class ResourceList(list): + #@ @+others + #@+node:__init__ + def __init__(self, *args): + if args: self.extend(args) + #@-node:__init__ + #@-others +#@-node:class ResourceList +#@+node:class Resource +class Resource(_ResourceBase): + #@ << declarations >> + #@+node:<< declarations >> + __metaclass__ = _MetaResource + __attrib_completions__ = {\ + "max_load": 'max_load = ', + "title": 'title = "|"', + "efficiency": 'efficiency = ', + "vacation": 'vacation = [("|2002-02-01", "2002-02-05")]' } + + __type_image__ = "resource16" + + max_load = None # the maximum sum load for all task + vacation = () + efficiency = 1.0 + + + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, **kwargs): + for k, v in kwargs.iteritems(): + setattr(self, k, v) + #@-node:__init__ + #@+node:_idendity_ + def _idendity_(cls): + return "resource:" + cls.__name__ + + _idendity_ = classmethod(_idendity_) + #@-node:_idendity_ + #@+node:__repr__ + def __repr__(self): + return "" % self.__class__.__name__ + #@-node:__repr__ + #@+node:__str__ + def __str__(self): + return repr(self) + #@-node:__str__ + #@+node:__call__ + def __call__(self): + return self + #@-node:__call__ + #@+node:__hash__ + def __hash__(self): + return hash(self.__class__) + #@-node:__hash__ + #@+node:__cmp__ + def __cmp__(self, other): + return cmp(self.name, other.name) + #@-node:__cmp__ + #@+node:__or__ + def __or__(self, other): + if type(other) is _MetaResource: + other = other() + + result = Resource() + result._subresource = _OrResourceGroup(self, other) + return result + #@-node:__or__ + #@+node:__and__ + def __and__(self, other): + if type(other) is _MetaResource: + other = other() + + result = Resource() + result._subresource = _AndResourceGroup(self, other) + return result + #@-node:__and__ + #@+node:_permutation_count + def _permutation_count(self): + if hasattr(self, "_subresource"): + return self._subresource._permutation_count() + + return 1 + #@-node:_permutation_count + #@+node:_get_resources + def _get_resources(self, state): + if hasattr(self, "_subresource"): + result = self._subresource._get_resources(state) + + if self.name != "Resource": + result.name = self.name + + if self.title != "Resource": + result.title = self.title + + return result + + result = ResourceList(self) + return result + #@-node:_get_resources + #@+node:all_members + def all_members(self): + if hasattr(self, "_subresource"): + return self._subresource.all_members() + + return [ self.__class__ ] + #@-node:all_members + #@+node:unbook_tasks_of_project + def unbook_tasks_of_project(cls, project_id, scenario): + try: + task_list = cls._tasks[scenario] + except KeyError: + return + + add_load = cls.calendar(scenario).add_load + for task_id, bookings in task_list.items(): + if task_id.startswith(project_id): + for item in bookings: + add_load(item.book_start, item.book_end, -item.load) + + del task_list[task_id] + + if not task_list: + del cls._tasks[scenario] + + unbook_tasks_of_project = classmethod(unbook_tasks_of_project) + #@-node:unbook_tasks_of_project + #@+node:unbook_task + def unbook_task(cls, task): + identdity = task._idendity_() + scenario = task.scenario + + try: + task_list = cls._tasks[scenario] + bookings = task_list[identdity] + except KeyError: + return + + add_load = cls.calendar(scenario).add_load + for b in bookings: + add_load(b.book_start, b.book_end, -b.load) + + del task_list[identdity] + if not task_list: + del cls._tasks[scenario] + + unbook_task = classmethod(unbook_task) + #@-node:unbook_task + #@+node:correct_bookings + def correct_bookings(cls, task): + #correct the booking data with the actual task data + try: + tasks = cls._tasks[task.scenario][task._idendity_()] + except KeyError: + return + + for t in tasks: + t.start = task.start.to_datetime() + t.end = task.end.to_datetime() + + correct_bookings = classmethod(correct_bookings) + #@-node:correct_bookings + #@+node:book_task + def book_task(cls, task, start, end, load, work_time, actual): + if not work_time: return + + start = _to_datetime(start) + end = _to_datetime(end) + + identdity = task._idendity_() + task_list = cls._tasks.setdefault(task.scenario, {}) + bookings = task_list.setdefault(identdity, []) + add_load = cls.calendar(task.scenario).add_load + + tb = Booking(task) + tb.book_start = start + tb.book_end = end + tb._id = identdity + tb.load = load + tb.start = _to_datetime(task.start) + tb.end = _to_datetime(task.end) + tb.title = task.title + tb.name = task.name + tb.work_time = int(work_time) + tb.actual = actual + bookings.append(tb) + result = add_load(start, end, load) + return result + + book_task = classmethod(book_task) + #@-node:book_task + #@+node:length_of + def length_of(cls, task): + cal = task.root.calendar + bookings = cls.get_bookings(task) + return sum(map(lambda b: task._to_delta(b.work_time).round(), bookings)) + + length_of = classmethod(length_of) + #@-node:length_of + #@+node:done_of + def done_of(self, task): + cal = task.root.calendar + now = cal.now + bookings = self.get_bookings(task) + + if task.__dict__.has_key("effort"): + efficiency = self.efficiency * task.efficiency + else: + efficiency = 1 + + def book_done(booking): + if booking.book_start >= now: + return 0 + + factor = 1 + if booking.book_end > now: + start = task._to_start(booking.book_start) + end = task._to_end(booking.book_end) + cnow = task._to_start(now) + factor = float(cnow - start) / ((end - start) or 1) + + return factor * booking.work_time * efficiency + + return task._to_delta(sum(map(book_done, bookings))) + #@-node:done_of + #@+node:todo_of + def todo_of(self, task): + cal = task.root.calendar + now = cal.now + + bookings = self.get_bookings(task) + if task.__dict__.has_key("effort"): + efficiency = self.efficiency * task.efficiency + else: + efficiency = 1 + + def book_todo(booking): + if booking.book_end <= now: + return 0 + + factor = 1 + if booking.book_start < now: + start = task._to_start(booking.book_start) + end = task._to_end(booking.book_end) + cnow = task._to_start(now) + factor = float(end - cnow) / ((end - start) or 1) + + return factor * booking.work_time * efficiency + + return task._to_delta(sum(map(book_todo, bookings))) + #@-node:todo_of + #@+node:get_bookings + def get_bookings(cls, task): + return cls._tasks.get(task.scenario, {}).get(task._idendity_(), ()) + + get_bookings = classmethod(get_bookings) + #@-node:get_bookings + #@+node:get_bookings_at + def get_bookings_at(cls, start, end, scenario): + result = [] + + try: + items = cls._tasks[scenario].iteritems() + except KeyError: + return () + + for task_id, bookings in items: + result += [ booking for booking in bookings + if booking.book_start < end + and booking.book_end > start ] + + vacations = cls._tasks.get("", ()) + result += [ booking for booking in vacations + if booking.book_start < end + and booking.book_end > start ] + + return result + + get_bookings_at = classmethod(get_bookings_at) + #@-node:get_bookings_at + #@+node:find_free_time + def find_free_time(cls, start, length, load, max_load, scenario): + return cls.calendar(scenario).find_free_time(start, length, load, max_load) + + find_free_time = classmethod(find_free_time) + #@-node:find_free_time + #@+node:get_load + def get_load(cls, date, scenario): + return cls.calendar(scenario).get_load(date) + + get_load = classmethod(get_load) + #@-node:get_load + #@+node:end_of_booking_interval + def end_of_booking_interval(cls, date, task): + return cls.calendar(task.scenario).end_of_booking_interval(date) + + end_of_booking_interval = classmethod(end_of_booking_interval) + #@-node:end_of_booking_interval + #@+node:snapshot + def snapshot(self): + from task import _as_string + def isattrib(a): + if a == "max_load" and self.max_load is None: return False + if a in ("name", "title", "vacation"): return False + return _isattrib(self, a) + + attribs = filter(isattrib, dir(self)) + attribs = map(lambda a: "%s=%s" % (a, _as_string(getattr(self, a))), + attribs) + + return self.name + "(%s)" % ", ".join(attribs) + #@-node:snapshot + #@-others +#@-node:class Resource +#@+node:class _ResourceGroup + + +class _ResourceGroup(object): + #@ @+others + #@+node:__init__ + def __init__(self, *args): + self.resources = [] + for a in args: + self.__append(a) + #@-node:__init__ + #@+node:all_members + def all_members(self): + group = reduce(lambda a, b: a + b.all_members(), + self.resources, []) + group = map(lambda r: (r, True), group) + group = dict(group) + group = group.keys() + return group + #@-node:all_members + #@+node:_permutation_count + def _permutation_count(self): + abstract + #@-node:_permutation_count + #@+node:_refactor + def _refactor(self, arg): + pass + #@-node:_refactor + #@+node:__append + def __append(self, arg): + if isinstance(arg, self.__class__): + self.resources += arg.resources + for r in arg.resources: + self._refactor(r) + return + elif isinstance(arg, Resource): + subresources = getattr(arg, "_subresource", None) + if subresources: + self.__append(subresources) + return + else: + self.resources.append(arg) + else: + assert(isinstance(arg, _ResourceGroup)) + self.resources.append(arg) + + self._refactor(arg) + #@-node:__append + #@+node:__str__ + def __str__(self): + op = lower(self.__class__.__name__[0:-13]) + return "(" + \ + string.join([str(r) for r in self.resources], + " " + op + " ") + \ + ")" + #@-node:__str__ + #@-others +#@-node:class _ResourceGroup +#@+node:class _OrResourceGroup + + +class _OrResourceGroup(_ResourceGroup): + #@ @+others + #@+node:_get_resources + def _get_resources(self, state): + for r in self.resources: + c = r._permutation_count() + if c <= state: + state -= c + else: + return r._get_resources(state) + + assert(0) + #@-node:_get_resources + #@+node:_permutation_count + def _permutation_count(self): + return sum([ r._permutation_count() for r in self.resources]) + #@-node:_permutation_count + #@-others +#@-node:class _OrResourceGroup +#@+node:class _AndResourceGroup + + +class _AndResourceGroup(_ResourceGroup): + #@ @+others + #@+node:__init__ + def __init__(self, *args): + self.factors = [ 1 ] + _ResourceGroup.__init__(self, *args) + #@-node:__init__ + #@+node:_refactor + def _refactor(self, arg): + count = arg._permutation_count() + self.factors = [ count * f for f in self.factors ] + self.factors.append(1) + #@-node:_refactor + #@+node:_permutation_count + #print "AndResourceGroup", count, arg, self.factors + + + def _permutation_count(self): + return self.factors[0] + #@-node:_permutation_count + #@+node:_get_resources + def _get_resources(self, state): + """delivers None when there are duplicate resources""" + result = [] + for i in range(1, len(self.factors)): + f = self.factors[i] + substate = state / f + state %= f + result.append(self.resources[i - 1]._get_resources(substate)) + + result = ResourceList(*list(utils.flatten(result))) + dupl_test = { } + for r in result: + if dupl_test.has_key(r): + return None + else: + dupl_test[r] = 1 + + return result + #@-node:_get_resources + #@+node:_has_duplicates + def _has_duplicates(self, state): + resources = self._get_resources(state) + tmp = { } + for r in resources: + if tmp.has_key(r): + return True + + tmp[r] = 1 + + return False + #@-node:_has_duplicates + #@-others +#@-node:class _AndResourceGroup +#@-others +#@-node:@file resource.py +#@-leo diff --git a/addons/resource/faces/task.py b/addons/resource/faces/task.py new file mode 100755 index 00000000000..6fdb28db3f6 --- /dev/null +++ b/addons/resource/faces/task.py @@ -0,0 +1,3855 @@ +#@+leo-ver=4 +#@+node:@file task.py +#@@language python +#@<< Copyright >> +#@+node:<< Copyright >> +############################################################################ +# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +#@-node:<< Copyright >> +#@nl +""" +This module contains all classes for project plan objects +""" +#@<< Imports >> +#@+node:<< Imports >> +import pcalendar +import resource +import types +import sys +import datetime +import operator as op +import warnings +import locale +import weakref +import opcode +import new +try: + set +except NameError: + from sets import Set as set +#@-node:<< Imports >> +#@nl + +_is_source = True + +STRICT = 3 +SLOPPY = 2 +SMART = 1 + +#@+others +#@+node:Exceptions +#@+node:class AttributeError +class AttributeError(AttributeError): + #@ << class AttributeError declarations >> + #@+node:<< class AttributeError declarations >> + is_frozen = False + + + #@-node:<< class AttributeError declarations >> + #@nl +#@-node:class AttributeError +#@+node:class RecursionError +class RecursionError(Exception): + """This exception is raised in cas of cirular dependencies + within an project""" + #@ << class RecursionError declarations >> + #@+node:<< class RecursionError declarations >> + pass + + + #@-node:<< class RecursionError declarations >> + #@nl +#@-node:class RecursionError +#@+node:class _IncompleteError +class _IncompleteError(Exception): + """This exception is raised, when there is not enough + data specified to calculate as task""" + #@ @+others + #@+node:__init__ + def __init__(self, *args): + if isinstance(args[0], (basestring)): + Exception.__init__(self, *args) + else: + Exception.__init__(self, + "not enough data for calculating task, "\ + "maybe you have a recursive reference", + *args) + #@-node:__init__ + #@-others +#@-node:class _IncompleteError +#@-node:Exceptions +#@+node:Proxies for self referencing +#@+node:class _MeProxy +class _MeProxy(object): + """ + A Proxy class for the me attribute of tasks in the compile case + """ + #@ << declarations >> + #@+node:<< declarations >> + __slots__ = "task" + + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, task): + object.__setattr__(self, "task", task) + #@-node:__init__ + #@+node:__getattr__ + def __getattr__(self, name): + if self.task._is_frozen: + return getattr(self.task, name) + + if name in ("name", "up", "root", "path", + "depth", "index", "calendar", + "children", "resource", "balance"): + return getattr(self.task, name) + + value = self.task.__dict__.get(name, _NEVER_USED_) + def make_val(default): + if value is _NEVER_USED_: return default + return value + + if name in ("start", "end"): + return self.task._to_start(make_val("1.1.2006")) + + if name in ("length", "effort", "duration", "todo", "done", + "buffer", "performed", "performed_effort", + "performed_end", "performed_start", + "performed_work_time" ): + return self.task._to_delta(make_val("0d")) + + if name in ("complete", "priority", "efficiency"): + return make_val(0) + + if value is _NEVER_USED_: + raise AttributeError("'%s' is not a valid attribute." % (name)) + + return value + #@-node:__getattr__ + #@+node:__setattr__ + def __setattr__(self, name, value): + self.task._set_attrib(name, value) + #@-node:__setattr__ + #@+node:__iter__ + def __iter__(self): + return iter(self.task) + #@nonl + #@-node:__iter__ + #@+node:add_attrib + def add_attrib(self, name_or_iter, val=None): + if not isinstance(name_or_iter, str): + for n, v in name_or_iter: + setattr(self, n, v) + else: + setattr(self, name_or_iter, val) + #@-node:add_attrib + #@-others +#@nonl +#@-node:class _MeProxy +#@+node:class _MeProxyRecalc +class _MeProxyRecalc(_MeProxy): + """ + A Proxy class for the me attribute of tasks in the recalc case + """ + #@ @+others + #@+node:__setattr__ + def __setattr__(self, name, value): + if self.task._properties.has_key(name): + self.task._set_attrib(name, value) + #@-node:__setattr__ + #@-others +#@-node:class _MeProxyRecalc +#@+node:class _MeProxyError +class _MeProxyError(_MeProxy): + #@ << declarations >> + #@+node:<< declarations >> + __slots__ = ("task", "attrib", "exc") + + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, task, attrib, exc): + _MeProxy.__init__(self, task) + object.__setattr__(self, "attrib", attrib) + object.__setattr__(self, "exc", exc) + #@-node:__init__ + #@+node:__setattr__ + def __setattr__(self, name, value): + if name == self.attrib or not self.attrib: + raise self.exc + #@-node:__setattr__ + #@-others +#@-node:class _MeProxyError +#@+node:class _MeProxyWarn +class _MeProxyWarn(_MeProxy): + #@ << declarations >> + #@+node:<< declarations >> + __slots__ = ("task", "attrib", "message") + + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, task, attrib, message): + _MeProxy.__init__(self, task) + object.__setattr__(self, "attrib", attrib) + object.__setattr__(self, "message", message) + #@-node:__init__ + #@+node:__setattr__ + def __setattr__(self, name, value): + if name == self.attrib or not self.attrib: + warnings.warn(self.message, RuntimeWarning, 2) + if not self.attrib: + #warn only one time! + object.__setattr__(self, "attrib", 1) + #@-node:__setattr__ + #@-others +#@-node:class _MeProxyWarn +#@-node:Proxies for self referencing +#@+node:Task instrumentation +#@+doc +# This section contains code for byte code instrumenting +# the task functions +#@-doc +#@nonl +#@+node:_int_to_arg +def _int_to_arg(value): + return value % 256, value / 256 +#@-node:_int_to_arg +#@+node:_correct_labels +def _correct_labels(old_code, new_code): + #@ << localize dot variables >> + #@+node:<< localize dot variables >> + hasjrel = opcode.hasjrel + hasjabs = opcode.hasjabs + HAVE_ARGUMENT = opcode.HAVE_ARGUMENT + #@nonl + #@-node:<< localize dot variables >> + #@nl + #@ << loop initialization >> + #@+node:<< loop initialization >> + labels = {} + old_new_map = {} # map old code offset to new code offset + n = len(old_code) + i = 0 + j = 0 + #@nonl + #@-node:<< loop initialization >> + #@nl + while i < n: + op = old_code[i] + nop = new_code[j] + old_new_map[i] = j + i = i + 1 + j = j + 1 + if op >= HAVE_ARGUMENT: + oparg = old_code[i] + old_code[i + 1] * 256 + i = i + 2 + j = j + 2 + if nop != op: + j += 3 # skip the 3 addition opcodes for attrib access + else: + #@ << add label if necessary >> + #@+node:<< add label if necessary >> + label = -1 + if op in hasjrel: + label = i + oparg + elif op in hasjabs: + label = oparg + if label >= 0: + labels[i] = label + #@nonl + #@-node:<< add label if necessary >> + #@nl + + for offset, label in labels.iteritems(): + new_offset = old_new_map[offset] + new_label = old_new_map[label] + op = new_code[new_offset - 3] + #change jump arguments + if op in hasjrel: + jump = _int_to_arg(new_label - new_offset) + new_code[new_offset - 2:new_offset] = jump + elif op in hasjabs: + new_code[new_offset - 2:new_offset] = _int_to_arg(new_label) +#@nonl +#@-node:_correct_labels +#@+node:_instrument +def _instrument(func): + #@ << localize dot variables >> + #@+node:<< localize dot variables >> + opname = opcode.opname + opmap = opcode.opmap + jumps = opcode.hasjrel + opcode.hasjabs + HAVE_ARGUMENT = opcode.HAVE_ARGUMENT + co = func.func_code + local_names = co.co_varnames + all_names = list(co.co_names) + global_names = set() + #@-node:<< localize dot variables >> + #@nl + #@ << define local functions list_to_dict and is_local >> + #@+node:<< define local functions list_to_dict and is_local >> + def list_to_dict(l): + return dict([(t[1], t[0]) for t in enumerate(l)]) + + def is_local(name): + return name[0] == "_" and name != "__constraint__" + #@nonl + #@-node:<< define local functions list_to_dict and is_local >> + #@nl + + #convert code + #@ << loop initialization >> + #@+node:<< loop initialization >> + # all_name_map maps names to the all_names index + # (same like all_names.index()) + all_name_map = list_to_dict(all_names) + if not all_name_map.has_key("me"): + all_name_map["me"] = len(all_names) + all_names.append("me") + + # + for ln in local_names: + if not all_name_map.has_key(ln): + all_name_map[ln] = len(all_names) + all_names.append(ln) + # + + new_local_names = filter(is_local, local_names) + new_local_name_map = list_to_dict(new_local_names) + + me_arg = _int_to_arg(all_name_map["me"]) + old_lnotab = map(ord, co.co_lnotab) + new_lnotab = [] + tab_pos = 0 + try: + next_tab_point = old_lnotab[0] + except IndexError: + next_tab_point = None + + last_tab_point = 0 + code = map(ord, co.co_code) + new_code = [] + has_labels = False + n = len(code) + i = 0 + #@nonl + #@-node:<< loop initialization >> + #@nl + while i < n: + if i == next_tab_point: + #@ << calculate new tab point >> + #@+node:<< calculate new tab point >> + increment = len(new_code) - last_tab_point + new_lnotab.extend((increment, old_lnotab[tab_pos + 1])) + tab_pos += 2 + try: + next_tab_point = i + old_lnotab[tab_pos] + last_tab_point = len(new_code) + except IndexError: + next_tab_point = -1 + #@nonl + #@-node:<< calculate new tab point >> + #@nl + + op = code[i] + i += 1 + if op >= HAVE_ARGUMENT: + #@ << calculate argument >> + #@+node:<< calculate argument >> + arg0 = code[i] + arg1 = code[i+1] + oparg = arg0 + arg1 * 256 + #@nonl + #@-node:<< calculate argument >> + #@nl + i += 2 + + if opname[op] == "LOAD_GLOBAL": + global_names.add(oparg) + + elif opname[op] == "STORE_FAST": + #@ << change "store fast" to "store attribute" >> + #@+node:<< change "store fast" to "store attribute" >> + name = local_names[oparg] + if not is_local(name): + new_code.append(opmap["LOAD_GLOBAL"]) + new_code.extend(me_arg) + op = opmap["STORE_ATTR"] + arg0, arg1 = _int_to_arg(all_name_map[name]) + else: + arg0, arg1 = _int_to_arg(new_local_name_map[name]) + #@nonl + #@-node:<< change "store fast" to "store attribute" >> + #@nl + + elif opname[op] == "LOAD_FAST": + #@ << change "load fast" to "load attribute" >> + #@+node:<< change "load fast" to "load attribute" >> + name = local_names[oparg] + if not is_local(name): + new_code.append(opmap["LOAD_GLOBAL"]) + new_code.extend(me_arg) + op = opmap["LOAD_ATTR"] + arg0, arg1 = _int_to_arg(all_name_map[name]) + else: + arg0, arg1 = _int_to_arg(new_local_name_map[name]) + #@nonl + #@-node:<< change "load fast" to "load attribute" >> + #@nl + + elif op in jumps: + has_labels = True + + new_code.extend((op, arg0, arg1)) + else: + new_code.append(op) + + if has_labels: + _correct_labels(code, new_code) + + #@ << create new code and function objects and return >> + #@+node:<< create new code and function objects and return >> + new_code = "".join(map(chr, new_code)) + new_lnotab = "".join(map(chr, new_lnotab)) + new_co = new.code(co.co_argcount, + len(new_local_names), + max(co.co_stacksize, 2), + co.co_flags, + new_code, + co.co_consts, + tuple(all_names), + tuple(new_local_names), + co.co_filename, + co.co_name, + co.co_firstlineno, + new_lnotab, + co.co_freevars, + co.co_cellvars) + + + func = new.function(new_co, + func.func_globals, + func.func_name, + func.func_defaults, + func.func_closure) + func.global_names = tuple([all_names[index] for index in global_names]) + return func + #@nonl + #@-node:<< create new code and function objects and return >> + #@nl +#@nonl +#@-node:_instrument +#@-node:Task instrumentation +#@+node:Wrappers +#@+node:class _Path +class _Path(object): + """ + This class represents an instrumented path, to + a task. If it points to an attribute of a task, it + not only returns the value of the attribute. You can also + find out the source attribute (task and attribute name) + of the value. + """ + #@ @+others + #@+node:__init__ + def __init__(self, task, path_str): + self._task = task + self._path_str = path_str + #@-node:__init__ + #@+node:__getattr__ + def __getattr__(self, name): + new = getattr(self._task, name) + if isinstance(new, Task): + return _Path(new, self._path_str + "." + name) + + return _ValueWrapper(new, [(self._task, name)]) + #@-node:__getattr__ + #@+node:__str__ + def __str__(self): + return self._path_str + #@-node:__str__ + #@+node:__iter__ + def __iter__(self): + return iter(self._task) + #@nonl + #@-node:__iter__ + #@-others + +#@-node:class _Path +#@+node:_val +#helper functions for _ValueWrapper +#---------------------------------- + +def _val(val): + if isinstance(val, _ValueWrapper): + return val._value + + return val +#@-node:_val +#@+node:_ref +def _ref(val): + if isinstance(val, _ValueWrapper): + return val._ref + + return [] +#@-node:_ref +#@+node:_sref +def _sref(val, ref): + if isinstance(val, _ValueWrapper): + val._ref = ref +#@nonl +#@-node:_sref +#@+node:_refsum +def _refsum(refs): + return reduce(lambda a, b: a + b, refs, []) +#@nonl +#@-node:_refsum +#@+node:class _ValueWrapper + + +class _ValueWrapper(object): + """ + This class represents a value, of a task attribute or + a return value of a task method. It contains the value, + and the supplier of that value + """ + #@ @+others + #@+node:__init__ + def __init__(self, value, ref): + self._value = value + self._ref = ref + #@-node:__init__ + #@+node:unicode + def unicode(self, *args): + if isinstance(self._value, str): + return unicode(self._value, *args) + + return unicode(self._value) + #@nonl + #@-node:unicode + #@+node:_vw + def _vw(self, operand, *args): + refs = _refsum(map(_ref, args)) + vals = map(_val, args) + result = operand(*vals) + return self.__class__(result, refs) + #@-node:_vw + #@+node:_cmp + def _cmp(self, operand, *args): + refs = _refsum(map(_ref, args)) + vals = map(_val, args) + result = operand(*vals) + map(lambda a: _sref(a, refs), args) + return result + #@-node:_cmp + #@+node:__getattr__ + def __getattr__(self, name): return getattr(self._value, name) + #@-node:__getattr__ + #@+node:__getitem__ + def __getitem__(self, slice): + return self.__class__(self._value[slice], self._ref) + #@nonl + #@-node:__getitem__ + #@+node:__str__ + def __str__(self): return str(self._value) + #@-node:__str__ + #@+node:__unicode__ + def __unicode__(self): return unicode(self._value) + #@nonl + #@-node:__unicode__ + #@+node:__repr__ + def __repr__(self): return repr(self._value) + #@-node:__repr__ + #@+node:__nonzero__ + def __nonzero__(self): return bool(self._value) + #@-node:__nonzero__ + #@+node:__lt__ + def __lt__(self, other): return self._cmp(op.lt, self, other) + #@-node:__lt__ + #@+node:__le__ + def __le__(self, other): return self._cmp(op.le, self, other) + #@-node:__le__ + #@+node:__eq__ + def __eq__(self, other): return self._cmp(op.eq, self, other) + #@-node:__eq__ + #@+node:__ne__ + def __ne__(self, other): return self._cmp(op.ne, self, other) + #@-node:__ne__ + #@+node:__gt__ + def __gt__(self, other): return self._cmp(op.gt, self, other) + #@-node:__gt__ + #@+node:__ge__ + def __ge__(self, other): return self._cmp(op.ge, self, other) + #@-node:__ge__ + #@+node:__add__ + def __add__(self, other): return self._vw(op.add, self, other) + #@nonl + #@-node:__add__ + #@+node:__sub__ + def __sub__(self, other): return self._vw(op.sub, self, other) + #@-node:__sub__ + #@+node:__mul__ + def __mul__(self, other): return self._vw(op.mul, self, other) + #@-node:__mul__ + #@+node:__floordiv__ + def __floordiv__(self, other): return self._vw(op.floordiv, self, other) + #@-node:__floordiv__ + #@+node:__mod__ + def __mod__(self, other): return self._vw(op.mod, self, other) + #@-node:__mod__ + #@+node:__divmod__ + def __divmod__(self, other): return self._vw(op.divmod, self, other) + #@-node:__divmod__ + #@+node:__pow__ + def __pow__(self, other): return self._vw(op.pow, self, other) + #@-node:__pow__ + #@+node:__lshift__ + def __lshift__(self, other): return self._vw(op.lshift, self, other) + #@-node:__lshift__ + #@+node:__rshift__ + def __rshift__(self, other): return self._vw(op.rshift, self, other) + #@-node:__rshift__ + #@+node:__and__ + def __and__(self, other): return self._vw(op.and_, self, other) + #@-node:__and__ + #@+node:__xor__ + def __xor__(self, other): return self._vw(op.xor, self, other) + #@-node:__xor__ + #@+node:__or__ + def __or__(self, other): return self._vw(op.or_, self, other) + #@-node:__or__ + #@+node:__div__ + def __div__(self, other): return self._vw(op.div, self, other) + #@-node:__div__ + #@+node:__radd__ + def __radd__(self, other): return self._vw(op.add, other, self) + #@-node:__radd__ + #@+node:__rsub__ + def __rsub__(self, other): return self._vw(op.sub, other, self) + #@-node:__rsub__ + #@+node:__rmul__ + def __rmul__(self, other): return self._vw(op.mul, other, self) + #@-node:__rmul__ + #@+node:__rdiv__ + def __rdiv__(self, other): return self._vw(op.div, other, self) + #@-node:__rdiv__ + #@+node:__rtruediv__ + def __rtruediv__(self, other): return self._vw(op.truediv, other, self) + #@-node:__rtruediv__ + #@+node:__rfloordiv__ + def __rfloordiv__(self, other): return self._vw(op.floordiv, other, self) + #@-node:__rfloordiv__ + #@+node:__rmod__ + def __rmod__(self, other): return self._vw(op.mod, other, self) + #@-node:__rmod__ + #@+node:__rdivmod__ + def __rdivmod__(self, other): return self._vw(op.divmod, other, self) + #@-node:__rdivmod__ + #@+node:__rpow__ + def __rpow__(self, other): return self._vw(op.pow, other, self) + #@-node:__rpow__ + #@+node:__rlshift__ + def __rlshift__(self, other): return self._vw(op.lshift, other, self) + #@-node:__rlshift__ + #@+node:__rrshift__ + def __rrshift__(self, other): return self._vw(op.rshift, other, self) + #@-node:__rrshift__ + #@+node:__rand__ + def __rand__(self, other): return self._vw(op.and_, other, self) + #@-node:__rand__ + #@+node:__rxor__ + def __rxor__(self, other): return self._vw(op.xor, other, self) + #@-node:__rxor__ + #@+node:__ror__ + def __ror__(self, other): return self._vw(op.or_, other, self) + #@-node:__ror__ + #@+node:__int__ + def __int__(self): return int(self._value) + #@-node:__int__ + #@+node:__long__ + def __long__(self): return long(self._value) + #@-node:__long__ + #@+node:__float__ + def __float__(self): return float(self._value) + #@-node:__float__ + #@+node:__len__ + def __len__(self): return len(self._value) + #@-node:__len__ + #@+node:__iter__ + def __iter__(self): return iter(self._value) + #@-node:__iter__ + #@+node:__hash__ + def __hash__(self): return hash(self._value) + #@-node:__hash__ + #@-others +#@-node:class _ValueWrapper +#@-node:Wrappers +#@+node:Utilities +#@+node:class _NEVER_USED_ +class _NEVER_USED_: + pass + +#@-node:class _NEVER_USED_ +#@+node:class _StringConverter +class _StringConverter(object): + """This class is a helper for the to_string mechanism + of tasks""" + #@ @+others + #@+node:__init__ + def __init__(self, source, format=None): + self.source = source + self.format = format + #@-node:__init__ + #@+node:__getitem__ + def __getitem__(self, format): + return _StringConverter(self.source, format) + #@-node:__getitem__ + #@+node:__getattr__ + def __getattr__(self, name): + class StrWrapper(object): + def __init__(self, value, name, source, format): + self._value = value + self.name = name + self.source = source + self.format = format + + def __call__(self, arg): + formatter = self.source.formatter(self.name, + arg, + self.format) + return formatter(self._value(arg)) + + value = getattr(self.source, name) + if callable(value): + #for methods the wrapper has to + return StrWrapper(value, name, self.source, self.format) + + formatter = self.source.formatter(name, format=self.format) + return formatter(value) + #@-node:__getattr__ + #@-others +#@-node:class _StringConverter +#@+node:Multi +def Multi(val, **kwargs): + """returns a directory for mutlivalued attributes""" + return dict(_default=val, **kwargs) +#@nonl +#@-node:Multi +#@+node:create_relative_path +def create_relative_path(from_, to_): + """ + creates a relative path from absolute path + from_ to absolute path to_ + """ + from_ = from_.split(".") + to_ = to_.split(".") + + for i, parts in enumerate(zip(from_, to_)): + from_part, to_part = parts + if from_part != to_part: + break + + from_ = from_[i:] + to_ = to_[i:] + return "up." * len(from_) + ".".join(to_) +#@nonl +#@-node:create_relative_path +#@+node:create_absolute_path +def create_absolute_path(from_, to_): + """ + creates a absolute path from absolute path + from_ to relative path to_ + """ + from_ = from_.split(".") + to_ = to_.split(".") + + for i, part in enumerate(to_): + if part != "up": + break + + from_ = from_[:-i] + to_ = to_[i:] + return "%s.%s" % (".".join(from_), ".".join(to_)) + + +#@-node:create_absolute_path +#@+node:_split_path +def _split_path(path): + try: + index = path.rindex(".") + return path[:index], path[index + 1:] + except: + return path +#@-node:_split_path +#@+node:_to_datetime +_to_datetime = pcalendar.to_datetime +#@nonl +#@-node:_to_datetime +#@+node:_get_tasks_of_sources +def _get_tasks_of_sources(task, attrib_filter="end,start,effort,length,duration"): + #return all source tasks, this task is dependend on + + dep_tasks = {} + + while task: + for dep in task._sources.values(): + for d in dep: + path, attrib = _split_path(d) + if attrib and attrib_filter.find(attrib) >= 0: + dep_tasks[path] = True + + task = task.up + + return dep_tasks.keys() +#@-node:_get_tasks_of_sources +#@+node:_build_balancing_list +def _build_balancing_list(tasks): + """ + Returns a specialy sorted list of tasks. + If the tasks will allocate resources in the sorting order of that list + correct balancing is ensured + """ + + # first sort the list for attributes + index = 0 + balancing_list = [(-t.priority, t.balance, index, t) for index, t in enumerate(tasks)] + balancing_list.sort() + + #print + #for p, b, i, t in balancing_list: + # print p, b, i, t.path + + balancing_list = [ t for p, b, i, t in balancing_list ] + + #now correct the presorted list: + #if task a is dependent on task b, b will be moved before a + + done_map = { } + count = len(balancing_list) + while len(done_map) < count: + for i in range(count): + to_inspect = balancing_list[i] + if done_map.has_key(to_inspect): + continue + + done_map[to_inspect] = True + break + else: + break + + #@ << define inspect_depends_on >> + #@+node:<< define inspect_depends_on >> + inspect_path = to_inspect.path + "." + sources = _get_tasks_of_sources(to_inspect) + sources = [ s + "." for s in sources + if not inspect_path.startswith(s) ] + + # the if in the later line ignores assignments like + # like start = up.start (i.e. references to parents) + # this will be handled in the second if of inspect_depends_on + # and can cause errors otherwise + + def inspect_depends_on(task): + cmp_path = task.path + "." + for src in sources: + if cmp_path.startswith(src): + #task is a source of to_inspect + return True + + if inspect_path.startswith(cmp_path): + #to_inspect is a child of task + return True + + return False + #@nonl + #@-node:<< define inspect_depends_on >> + #@nl + for j in range(i + 1, count): + check_task = balancing_list[j] + if done_map.has_key(check_task): + continue + + if inspect_depends_on(check_task): + del balancing_list[j] + balancing_list.insert(i, check_task) + i += 1 # to_inspect is now at i + 1 + + + return balancing_list +#@-node:_build_balancing_list +#@+node:_as_string +def _as_string(val): + if isinstance(val, basestring): + return '"""%s"""' % val.replace("\n", "\\n") + + if isinstance(val, pcalendar._WorkingDateBase): + return '"%s"' % val.strftime("%Y-%m-%d %H:%M") + + if isinstance(val, datetime.datetime): + return '"%s"' % val.strftime("%Y-%m-%d %H:%M") + + if isinstance(val, datetime.timedelta): + return '"%id %iM"' % (val.days, val.seconds / 60) + + if isinstance(val, tuple): + result = map(_as_string, val) + return "(%s)" % ", ".join(result) + + if isinstance(val, list): + result = map(_as_string, val) + return "[%s]" % ", ".join(result) + + if isinstance(val, resource.Resource): + return val._as_string() + + if isinstance(val, Task): + return val.path + + return str(val) +#@-node:_as_string +#@+node:_step_tasks +def _step_tasks(task): + if isinstance(task, Task): + yield task + + stack = [iter(task.children)] + while stack: + for task in stack[-1]: + yield task + + if task.children: + stack.append(iter(task.children)) + break + else: + stack.pop() +#@-node:_step_tasks +#@-node:Utilities +#@+node:Cache +instrumentation_cache = {} +balancing_cache = {} + +def clear_cache(): + instrumentation_cache.clear() + balancing_cache.clear() +#@nonl +#@-node:Cache +#@+node:Resource Allocators +#@+others +#@+node:VariableLoad +def VariableLoad(limit=0): + """ + Allocates the resource with maximal possible load. + If limit is given, a the load is at least limit or more. + """ + try: + balance = me.balance + except NameError: + balance = SLOPPY + + if balance != SLOPPY: + raise RuntimeError("You may specify variable_load only with balance=SLOPPY") + + return -limit +#@-node:VariableLoad +#@+node:_calc_load +def _calc_load(task, resource): + #changed at the resource instance + load = resource.__dict__.get("load") + if load is not None: return load + + load = task.__dict__.get("load") + if load is not None: return load + + #inherited by the task + return min(task.load, task.max_load, resource.max_load or 100.0) +#@-node:_calc_load +#@+node:_calc_maxload +def _calc_maxload(task, resource): + #changed at the resource instance + max_load = resource.__dict__.get("max_load") + if max_load: return max_load + + #an explicit load can overwrite max_load + load = max(resource.__dict__.get("load", 0), + task.__dict__.get("load"), 0) + + #change at the task + max_load = task.__dict__.get("max_load") + if max_load: return max(max_load, load) + + #inherited by the resource + max_load = resource.max_load + if max_load: return max(max_load, load) + + #inherited by the task + return max(task.max_load, load) +#@-node:_calc_maxload +#@+node:class AllocationAlgorithm +class AllocationAlgorithm(object): + """This class is a base for resource allocation algorithms""" + #@ @+others + #@+node:test_allocation + def test_allocation(self, task, resource): + """This method simulates the allocation of a specific resource. + It returns a list of values representing the state of the allocation. + The task allocator calls test_allocation for every alternative resource. + It compares the first items of all return lists, and allocates the + resource with the minum first item value""" + return (task.end, ) + #@-node:test_allocation + #@+node:allocate + def allocate(self, task, state): + """This method eventually allocates a specific resource. + State is the return list of test_allocation""" + pass + #@-node:allocate + #@-others +#@-node:class AllocationAlgorithm +#@+node:class StrictAllocator +class StrictAllocator(AllocationAlgorithm): + """This class implements the STRICT resource allocation""" + #@ @+others + #@+node:_distribute_len_loads + def _distribute_len_loads(self, task, resource, effort, length): + # A special load calculation, if effort and length are given. + # and the resources have a defined maxload, the load must be + # individually calculated for each resource. + + # Formulars: r=resources, t=task + # effort = length * efficiency(t) * sum[load(r) * effiency(r)] + # ==> sum_load = sum[load(r) * effiency(r)] + # = effort / (length * efficiency(t)) + # + + sum_load = float(effort) / (task.efficiency * length) + + # algorithm: + # The goal is to distribute the load (norm_load) equally + # to all resources. If a resource has a max_load(r) < norm_load + # the load of this resource will be max_load(r), and the other + # resources will have another (higher) norm_load + + max_loads = map(lambda r: (_calc_maxload(task, r), r), resource) + max_loads.sort() + + efficiency_sum = sum(map(lambda r: r.efficiency, resource)) + norm_load = sum_load / efficiency_sum + + loads = {} + for max_load, r in max_loads[:-1]: + if max_load < norm_load: + loads[r] = max_load + efficiency_sum -= r.efficiency + sum_load -= max_load * r.efficiency + norm_load = sum_load / efficiency_sum + else: + loads[r] = norm_load + + max_load, r = max_loads[-1] + loads[r] = norm_load + return loads + #@-node:_distribute_len_loads + #@+node:test_allocation + def test_allocation(self, task, resource): + effort = task.__dict__.get("effort") + to_start = task._to_start + to_end = task._to_end + to_delta = task._to_delta + + if task.performed_end: + start = to_start(max(task.performed_end, + task.root.calendar.now, + task.start)) + else: + start = task.start + if task.root.has_actual_data and task.complete == 0: + start = max(start, to_start(task.root.calendar.now)) + + base_start = to_start(task.performed_start or task.start) + calc_load = lambda r: _calc_load(task, r) + loads = map(lambda r: (r, calc_load(r)), resource) + + length = task.__dict__.get("length") + duration = task.__dict__.get("duration") + end = task.__dict__.get("end") + + #@ << correct length >> + #@+node:<< correct length >> + if length is not None: + length = to_delta(max(length - (task.start - base_start), 0)) + #@nonl + #@-node:<< correct length >> + #@nl + #@ << correct duration >> + #@+node:<< correct duration >> + if duration is not None: + delta = task.start.to_datetime() - base_start.to_datetime() + delta = to_delta(delta, True) + duration = to_delta(max(duration - delta, 0), True) + #@nonl + #@-node:<< correct duration >> + #@nl + #@ << check end >> + #@+node:<< check end >> + if end is not None: + length = end - start + if length <= 0: return False + #@nonl + #@-node:<< check end >> + #@nl + #@ << correct effort and (re)calculate length >> + #@+node:<< correct effort and (re)calculate length >> + if effort is not None: + effort -= task.performed_effort + effort = to_delta(max(effort, 0)) + if effort <= 0: return False + + if length is not None: + #if length and effort is set, the load will be calculated + length = length or task.calendar.minimum_time_unit + loads = self._distribute_len_loads(task, resource, + effort, length) + def calc_load(res): + return loads[res] + else: + #the length depends on the count of resources + factor = sum(map(lambda a: a[0].efficiency * a[1], + loads)) * task.efficiency + length = effort / factor + #@nonl + #@-node:<< correct effort and (re)calculate length >> + #@nl + #@ << set adjust_date and delta >> + #@+node:<< set adjust_date and delta >> + if length is not None: + adjust_date = lambda date: date + delta = to_delta(length).round() + else: + assert(duration is not None) + adjust_date = _to_datetime + delta = datetime.timedelta(minutes=duration) + #@nonl + #@-node:<< set adjust_date and delta >> + #@nl + + # find the earliest start date + start, book_load\ + = self.balance(task, start, delta, adjust_date, + calc_load, resource) + + end = to_end(start + delta) + start = to_start(start) + + if effort is None: + #length is frozen ==> a new effort will be calculated + factor = sum(map(lambda a: a[1], loads)) + length = end - start + + effort = to_delta(length * factor\ + + task.performed_effort).round() + + return (end, book_load), resource, calc_load, start, effort + #@-node:test_allocation + #@+node:allocate + def allocate(self, task, state): + # now really book the resource + end_bl, resource, calc_load, start, effort = state + end = end_bl[0] + cal = task.root.calendar + to_start = task._to_start + to_end = task._to_end + to_delta = task._to_delta + + task.start = task.performed_start \ + and to_start(task.performed_start) \ + or to_start(start) + + task.end = end + task._unfreeze("length") + task._unfreeze("duration") + length = end - start + + for r in resource: + book_load = calc_load(r) + work_time = to_delta(length * book_load).round() + r.book_task(task, start, end, book_load, work_time, False) + + #the following lines are important to be exactly at this + #positions in that order: + # done and todo are dependend on: + # - the existence of effort (if effort was set or not set) + # - book_task (they can only be calculated, if the task is booked) + # - booked_resource (to get the booked tasks) + task.booked_resource = resource + task.done = task.done + task.todo = task.todo + task.length = end - task.start + task.effort = to_delta(effort + task.performed_effort) + #@-node:allocate + #@+node:balance + #now effort exists always + + + def balance(self, task, start, delta, adjust_date, + calc_load, resource): + book_load = max(map(lambda r: r.get_load(task.start, task.scenario), resource)) + return start, book_load + #@-node:balance + #@-others +#@-node:class StrictAllocator +#@+node:class SmartAllocator + + +class SmartAllocator(StrictAllocator): + #@ @+others + #@+node:balance + def balance(self, task, start, delta, adjust_date, + calc_load, resource): + #find the earliest start date, at which all + #resources in the team are free + + cal = task.root.calendar + to_start = task._to_start + start = adjust_date(start) + scenario = task.scenario + + while True: + #we have finished, when all resources have the + #same next free start date + for r in resource: + max_load = _calc_maxload(task, r) + load = calc_load(r) + + #find the next free time of the resource + s = r.find_free_time(start, delta, load, max_load, scenario) + if s != start: + s = to_start(s) + start = adjust_date(s) + break + else: + #only one resource + break + + return start, 1.0 + #@-node:balance + #@-others +#@-node:class SmartAllocator +#@+node:class SloppyAllocator + + +class SloppyAllocator(AllocationAlgorithm): + #@ @+others + #@+node:test_allocation + def test_allocation(self, task, resource): + if task.__dict__.has_key("effort"): + return self.test_allocation_effort(task, resource) + + return self.test_allocation_length(task, resource) + #@-node:test_allocation + #@+node:test_allocation_length + def test_allocation_length(self, task, resource): + #length is frozen ==> effort will be calculated + to_start = task._to_start + to_end = task._to_end + to_delta = task._to_delta + + end = task.end + if task.performed_end: + start = to_start(max(task.performed_end, + task.root.calendar.now, + start)) + else: + start = task.start + + base_start = to_start(task.performed_start or task.start) + length = to_delta(max(task.length - (start - base_start), 0)) + sum_effort = 0 + intervals = [] + scenario = task.scenario + for r in resource: + date = start + max_load = _calc_maxload(task, r) + book_load = _calc_load(task, r) + + while date < end: + #find free time intervals and add them for booking + endi, load = r.end_of_booking_interval(date, task) + endi = min(endi, end) + endi = to_end(endi) + + if book_load <= 0: + #variable book_load ==> calc the maxmimal possible book_load >= (the given book_load) + used_book_load = - book_load + diff_load = max_load - load + if diff_load and diff_load >= book_load: + used_book_load = diff_load + else: + used_book_load = max_load + else: + used_book_load = book_load + + if max_load - load >= used_book_load: + intervals.append((r, used_book_load, date, endi)) + sum_effort = (endi - date) * used_book_load + + date = to_start(endi) + + return -sum_effort, end, resource, intervals + #@-node:test_allocation_length + #@+node:test_allocation_effort + def test_allocation_effort(self, task, resource): + #effort is frozen ==> length will be calculated + + to_start = task._to_start + to_end = task._to_end + to_delta = task._to_delta + + intervals = [] + effort = task.__dict__.get("effort") + + if task.performed_end: + next_date = to_start(max(task.performed_end, + task.root.calendar.now, + task.start)) + else: + next_date = task.start + if task.root.has_actual_data and task.complete == 0: + next_date = max(next_date, to_start(task.root.calendar.now)) + + #walks chronologicly through the booking + #intervals of each resource, and reduces + #the effort for each free interval + #until it becomes 0 + + alloc_effort = effort + effort -= task.performed_effort + while effort > 0: + date = next_date + + interval_resource = [] + interval_end = to_start(sys.maxint) + factor = 0 + + for r in resource: + max_load = _calc_maxload(task, r) + book_load = _calc_load(task, r) + end, load = r.end_of_booking_interval(date, task) + interval_end = to_start(min(end, interval_end)) + + if book_load <= 0: + #variable book_load ==> calc the maxmimal possible book_load >= (the given book_load) + book_load = - book_load + diff_load = max_load - load + if diff_load and diff_load >= book_load: + book_load = diff_load + else: + book_load = max_load + + if book_load + load <= max_load: + resource_factor = book_load * r.efficiency + interval_resource.append((r, book_load, resource_factor)) + factor += resource_factor + + + + next_date = interval_end + if factor: + factor *= task.efficiency + length = to_delta(effort / factor).round() + end = date + length + + if interval_end >= end: + next_date = interval_end = end + effort = 0 + book_end = end + else: + book_end = interval_end + length = book_end - date + minus_effort = length * factor + effort -= minus_effort + + book_end = to_end(book_end) + intervals.append((date, book_end, length, interval_resource)) + + return next_date, alloc_effort, resource, intervals + #@-node:test_allocation_effort + #@+node:allocate + def allocate(self, task, state): + if task.__dict__.has_key("effort"): self.allocate_effort(task, state) + else: self.allocate_length(task, state) + #@-node:allocate + #@+node:allocate_length + def allocate_length(self, task, state): + # now really book the resource + neg_sum_effort, end, resource, intervals = state + + cal = task.root.calendar + to_start = task._to_start + to_end = task._to_end + to_delta = task._to_delta + + task.start = to_start(task.performed_start or task.start) + task.end = to_end(end) + task._unfreeze("length") + task._unfreeze("duration") + + effort = 0 + for r, load, s, e in intervals: + work_time = to_delta((e - s) * load).round() + effort += work_time + r.book_task(task, s, e, load, work_time, False) + + #see comment at StrictAllocator.allocate + task.booked_resource = resource + task.done = task.done + task.todo = task.todo + task.effort = to_delta(effort + task.performed_effort).round() + #@-node:allocate_length + #@+node:allocate_effort + def allocate_effort(self, task, state): + # now really book the resource + end, effort, resource, intervals = state + to_start = task._to_start + to_end = task._to_end + to_delta = task._to_delta + + task.start = task.performed_start \ + and to_start(task.performed_start) \ + or to_start(intervals[0][0]) + task.end = to_end(end) + task._unfreeze("length") + task._unfreeze("duration") + + for start, end, length, resources in intervals: + for r, load, factor in resources: + work_time = to_delta(length * load) + r.book_task(task, start, end, load, work_time, False) + + task.booked_resource = resource + task.done = task.done + task.todo = task.todo + task.effort = to_delta(effort) + task.length = task.end - task.start + #@-node:allocate_effort + #@-others +#@-node:class SloppyAllocator +#@-others + +_smart_allocator = SmartAllocator() +_sloppy_allocator = SloppyAllocator() +_strict_allocator = StrictAllocator() +_allocators = { SMART: _smart_allocator, + SLOPPY: _sloppy_allocator, + STRICT: _strict_allocator } + +_allocator_strings = { SMART: "SMART", + SLOPPY: "SLOPPY", + STRICT: "STRICT" } +#@-node:Resource Allocators +#@+node:Load Calculators +#@+node:YearlyMax +def YearlyMax(value): + """ + Calculates a load parameter with a maximal yearly workload + """ + #@ << calculate calendar and time_diff >> + #@+node:<< calculate calendar and time_diff >> + try: + cal = me.calendar + except NameError: + cal = pcalendar._default_calendar + + time_diff = cal.Minutes(value) + #@nonl + #@-node:<< calculate calendar and time_diff >> + #@nl + return float(time_diff) / \ + (cal.working_days_per_year \ + * cal.working_hours_per_day \ + * 60) +#@nonl +#@-node:YearlyMax +#@+node:WeeklyMax +def WeeklyMax(value): + """ + Calculates a load parameter with a maximal weekly workload + """ + #@ << calculate calendar and time_diff >> + #@+node:<< calculate calendar and time_diff >> + try: + cal = me.calendar + except NameError: + cal = pcalendar._default_calendar + + time_diff = cal.Minutes(value) + #@nonl + #@-node:<< calculate calendar and time_diff >> + #@nl + return float(time_diff) / \ + (cal.working_days_per_week \ + * cal.working_hours_per_day \ + * 60) + +#@-node:WeeklyMax +#@+node:MonthlyMax +def MonthlyMax(value): + """ + Calculates a load parameter with a maximal monthly workload + """ + #@ << calculate calendar and time_diff >> + #@+node:<< calculate calendar and time_diff >> + try: + cal = me.calendar + except NameError: + cal = pcalendar._default_calendar + + time_diff = cal.Minutes(value) + #@nonl + #@-node:<< calculate calendar and time_diff >> + #@nl + return float(time_diff) / \ + (cal.working_days_per_month \ + * cal.working_hours_per_day \ + * 60) + +#@-node:MonthlyMax +#@+node:DailyMax +def DailyMax(value): + """ + Calculates a load parameter with a maximal daily workload + """ + #@ << calculate calendar and time_diff >> + #@+node:<< calculate calendar and time_diff >> + try: + cal = me.calendar + except NameError: + cal = pcalendar._default_calendar + + time_diff = cal.Minutes(value) + #@nonl + #@-node:<< calculate calendar and time_diff >> + #@nl + return float(time_diff) / (cal.working_hours_per_day * 60) +#@-node:DailyMax +#@-node:Load Calculators +#@+node:Task +#@+node:class _TaskProperty +class _TaskProperty(object): + #@ @+others + #@+node:__init__ + def __init__(self, method): + self.method = method + #@-node:__init__ + #@+node:__get__ + def __get__(self, instance, owner): + if not instance: + return None + + return instance._wrap_attrib(self.method) + #@-node:__get__ + #@-others +#@-node:class _TaskProperty +#@+node:class _RoundingTaskProperty +class _RoundingTaskProperty(object): + #@ @+others + #@+node:__init__ + def __init__(self, method, name): + self.method = method + self.name = name + #@-node:__init__ + #@+node:__get__ + def __get__(self, instance, owner): + if not instance: + return None + + result = instance._wrap_attrib(self.method).round() + if instance._is_frozen: + #correct the attrib to the rounded value + setattr(instance, self.name, result) + + return result + #@-node:__get__ + #@-others +#@-node:class _RoundingTaskProperty +#@+node:class Task +class Task(object): + #@ << description >> + #@+node:<< description >> + """ + This class represents a single task in the project tree. A task + can have other child tasks, or is a leaf of the tree. Resources + will be allocated only to leafes. You will never create task + objects by your self, they are created indirectly by Projects. + + @var root: + Returns the root project task. + + @var up: + Returns the parent task. + + @var title: + Specifies an alternative more descriptive name for the task. + + @var start: + The start date of the task. Valid values are expressions and + strings specifing a datatime + + @var end: + The end date of the task. Valid values are expressions and + strings. + + @var effort: + Specifies the effort needed to complete the task. Valid values + are expressions and strings. (Todo: What happens, in case of + specified performance data...) + + + @var length: + Specifies the time the task occupies the resources. This is + working time, not calendar time. 7d means 7 working days, not one + week. Whether a day is considered a working day or not depends on + the defined working hours and global vacations. + + @var duration: + Specifies the time the task occupies the resources. This is + calendar time, not working time. 7d means one week. + + @var buffer: + Specifies the time a task can be delayed, without moving dependend + milestones. A Task with a buffer S{<=} 0d is part of the critical + chain. This attribute is readonly. + + @var complete: + Specifies what percentage of the task is already completed. + + @var todo: + Specifies the effort, which needs to be done to complete a + task. This is another (indirect) way to specify the ME{complete} + attribute. + + @var done: + Specifies the work effort, which has been already done. This + attribute is readonly. + + @var estimated_effort: + Specifies the estimated_effort given by setting the effort property. + + @var performed: + Specifies a list of actual working times performed on the task. + The format is: C{[ (resource, from, to, time), ... ]} + + @var performed_work_time: + Specifies the sum of all working times. This attribute is + readonly. + + @var performed_effort: + Specifies the complete effort of all working times. This attribute is + readonly. + + @var performed_start: + The start date of the performed data. + + @var performed_end: + The end date of the performed data. + + @var performed_resource: + The resources who have already performed on the task. This attribute is readonly. + + + @var balance: + Specifies the resource allocation type. Possible values are + CO{STRICT}, CO{SLOPPY}, CO{SMART}. + + @var resource: + Specifies the possible resources, that may be allocated for the + task. + + @var booked_resource: + Specifies the allocated resources of a task. This attribute is + readonly. + + @var load: + Specifies the daily load of a resource for an allocation of the + specified task. A load of 1.0 (default) means the resource is + allocated for as many hours as specified by + ME{working_hours_per_day}. A load of 0.5 means half that many + hours. + + @var max_load: + Specify the maximal allowed load sum of all simultaneously + allocated tasks of a resource. A ME{max_load} of 1.0 (default) + means the resource may be fully allocated. A ME{max_load} of 1.3 + means the resource may be allocated with 30% overtime. + + @var efficiency: + The efficiency of a resource can be used for two purposes. First + you can use it as a crude way to model a team. A team of 5 people + should have an efficiency of 5.0. Keep in mind that you cannot + track the member of the team individually if you use this + feature. The other use is to model performance variations between + your resources. + + @var milestone: + Specified if the task is a milestone. The possible values are + C{True} or "later". If the start date of the milestone is not + a valid working date, the milestone will appear at the previous + working date before the given start date. If "later" is specified + the milestone will appear at the next valid working date. + A milestone has always an effort of 0d. + + @var priority: + Specifies a priority between 1 and 1000. A task with higher + priority is more likely to get the requested resources. The + default priority is 500. + + @var children: + Specifies a list of all subtasks. A task without children is + called a leaf task index{leaf task} otherwise it is called a + parent task index{parent task}. This attribute is readonly. + + @var depth: + Specifies the depth of the task within the hierachy. This + attribute is readonly. + + @var index: + Specifies a structural index number. This attribute is readonly. + + @var path: + Specifies the path. + + @var copy_src: + Specifies the path to an other task. When you set this attribute, + all attributes (except of ME{start} and ME{end}) of copy_src will + be copied to the current task. This is usefull if you want to + define the same task, in diffent project definitions. It acts like + a task link. + + @var scenario: + The scenario which is currently evaluated. This attribute is readonly. + + @var dont_inherit: + A list of attribute names, which will be not inherited by + subtasks. + + @var calendar: + Specifies the task calendar. + + @var working_days_per_week: + Specifies the days within a working week. This value is used + internally to convert time differences from weeks to days. The + default value is 5 days. + + @var working_days_per_month: + Specifies the days within a working month. This value is used + internally to convert time differences from months to days. The + default value is 20 days. + + @var working_days_per_year: + Specifies the days within a working year. This value is used + internally to convert time differences from years to days The + default value is 200 days. + + @var working_hours_per_day: + Specifies the hours within a working day. This value is used + internally to convert time differences from are entered in days to + hours. The default value is 8 hours. + + @var minimum_time_unit: + Specifies the minimum resolution in minutes for the task + scheduling. The default value is 15 minutes. + + @var vacation: + Specifies a public vacation for the calendar. This attribute is + specified as a list of date literals or date literal intervals. Be + aware that the end of an interval is excluded, i.e. it is the + first working date. + + @var extra_work: + Specifies additional worktime. This attribute is specified as a + list of date literals or date literal intervals. Be aware that the + end of an interval is excluded, i.e. it is the first working date. + + @var working_days: + Specifies the weekly working time within calendar. The format of + this attribute is: [ (day_range, time_range, ...), (day_range, time_range, ...), ... ]. + day_range is a comma sperated string of week days. Valid values + are mon, tue, wed, thu, fri, sat, sun. + time_range is string specifing a time interval like + 8:00-10:00. You can specified any number of time_ranges, following + the first. + + @var now: + Specifies the current daytime and is a date literal. ME{now} is + used to calculate several task attributes. + + """ + #@nonl + #@-node:<< description >> + #@nl + #@ << declarations >> + #@+node:<< declarations >> + # Variables for the gui interface + _date_completion = { "Date": 'Date("|")', + "max": "max(|)", + "min": "min(|)", + "Multi" : "Multi(|)" } + + + _delta_completion = { "Delta" : 'Delta("|")', + "Multi" : "Multi(|)" } + + + __attrib_completions__ = { \ + "def NewTask():" : "def |NewTask():\n", + "milestone": 'milestone = True', + "start": 'start = ', + "end": 'end = ', + "effort": 'effort = "|"', + "duration": 'duration = "|"', + "length": 'length = "|"', + "todo": 'todo = "|"', + "done": 'done = "|"', + "title": 'title = "|"', + "load": 'load = ', + "max_load": 'max_load = ', + "efficiency": 'efficiency = ', + "complete": 'complete = ', + "copy_src": 'copy_src =', + "__constraint__": '__constraint__():\n|"', + "priority": 'priority = ', + "balance" : 'balance = ', + "resource": 'resource = ', + "performed" : 'performed = [(|resource, "2002-02-01", "2002-02-05", "2H"),]', + "add_attrib": "add_attrib(|'name', None)", + "working_days_per_week": 'working_days_per_week = ', + "working_days_per_month": 'working_days_per_month = ', + "working_days_per_year": 'working_days_per_year = ', + "working_hours_per_day": 'working_hours_per_day = ', + "minimum_time_unit": 'minimum_time_unit = ', + "vacation": 'vacation = [("|2002-02-01", "2002-02-05")]', + "extra_work": 'extra_work = [("|2002-02-01", "2002-02-05")]', + "working_days" : 'working_days = ["|mon,tue,wed,thu,fri", "8:00-12:00", "13:00-17:00"]', + "now": 'now = "|"', + "calendar" : 'calendar = ', + "#load": { "YearlyMax": 'YearlyMax("|")', + "WeeklyMax": 'WeeklyMax("|")', + "MonthlyMax": 'MonthlyMax("|")', + "DailyMax": 'DailyMax("|")', + "VariableLoad" : "VariableLoad(|)"}, + "#max_load": { "YearlyMax": 'YearlyMax("|")', + "WeeklyMax": 'WeeklyMax("|")', + "MonthlyMax": 'MonthlyMax("|")', + "DailyMax": 'DailyMax("|")' }, + "#start": _date_completion, + "#end": _date_completion, + "#effort": _delta_completion, + "#duration": _delta_completion, + "#length": _delta_completion, + "#todo": _delta_completion, + "#done": _delta_completion, + "#resource" : "get_resource_completions", + "#calendar" : "get_calendar_completions", + "#balance": { "STRICT": "STRICT", + "SMART": "SMART", + "SLOPPY": "SLOPPY" } } + + + formats = { "start" : "%x %H:%M", + "end" : "%x %H:%M", + "performed_start" : "%x %H:%M", + "performed_end" : "%x %H:%M", + "load" : "%.2f", + "length" : "%dd{ %HH}{ %MM}", + "effort" : "%dd{ %HH}{ %MM}", + "estimated_effort" : "%dd{ %HH}{ %MM}", + "performed_effort" : "%dd{ %HH}{ %MM}", + "duration" : "%dd{ %HH}{ %MM}", + "complete" : "%i", + "priority" : "%i", + "todo" : "%dd{ %HH}{ %MM}", + "done" : "%dd{ %HH}{ %MM}", + "efficiency" : "%.2f", + "buffer" : "%dd{ %HH}{ %MM}", + "costs" : "%.2f", + "sum" : "%.2f", + "max" : "%.2f", + "min" : "%.2f", + "milestone" : "%s", + "resource" : "%s", + "booked_resource" : "%s", + "performed_resource" : "%s" } + + _constraint = None + _is_frozen = False + _is_compiled = False + _is_parent_referer = False + + scenario = None # only for autocompletion + milestone = False + performed = () + performed_resource = () + booked_resource = () + _performed_resource_length = () + _resource_length = () + dont_inherit = () + performed_start = None + performed_end = None + performed_work_time = pcalendar.Minutes(0) + + _setting_hooks = {} + #@nonl + #@-node:<< declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, func, name, parent=None, index=1): + + assert(type(func) == types.FunctionType) + + func_key = (func.func_code, func.func_closure and id(func.func_closure)) + + try: + instrumented = instrumentation_cache[func_key] + except KeyError: + instrumented = _instrument(func) + instrumented.org_code = func_key + instrumentation_cache[func_key] = instrumented + + func.task_func = instrumented # will be used in the gui + self._function = instrumented + self.name = name + self.up = parent + self.children = [] + self._sources = {} # all tasks, I am linked to + self._dependencies = {} # all tasks that link to me + self._original_values = {} + self._properties = {} # a registry of all non standard attributes + self.title = self.name + self.root = parent and parent.root or self + self.scenario = self.root.scenario + self.path = parent and parent.path + "." + name or name + self.depth = len(self.path.split(".")) - 1 + self.index = parent and ("%s.%i" % (parent.index, index)) \ + or str(index) + if self.formats.has_key(name): + raise AttributeError("Task name '%s' hides attribute of parent." \ + % name) + + cal = self.calendar + self._to_delta = cal.Minutes + self._to_start = cal.StartDate + self._to_end = cal.EndDate + + #@-node:__init__ + #@+node:__iter__ + def __iter__(self): + return _step_tasks(self) + #@-node:__iter__ + #@+node:__repr__ + def __repr__(self): + return "" % self.name + #@-node:__repr__ + #@+node:__cmp__ + def __cmp__(self, other): + try: + return cmp(self.path, other.path) + except Exception: + return cmp(self.path, other) + #@-node:__cmp__ + #@+node:__getattr__ + def __getattr__(self, name): + try: + if name[0] != "_": + parent = self.up + while parent: + if name not in parent.dont_inherit: + result = getattr(parent, name) + if not (isinstance(result, Task) and result.up == parent): + return result + + parent = parent.up + except AttributeError: + pass + except IndexError: + raise AttributeError() + + exception = AttributeError("'%s' is not a valid attribute of '%s'" + % (name, self.path)) + exception.is_frozen = self._is_frozen + raise exception + #@-node:__getattr__ + #@+node:_idendity_ + def _idendity_(self): return self.root.id + self.path[4:] + #@-node:_idendity_ + #@+node:_set_hook + def _set_hook(cls, attrib_name, function=None): + if function: + cls._setting_hooks[attrib_name] = function + else: + try: + del cls._setting_hooks[attrib_name] + except KeyError: pass + + + _set_hook = classmethod(_set_hook) + #@nonl + #@-node:_set_hook + #@+node:Public methods + #@+node:to_string + def to_string(self): return _StringConverter(self) + to_string = property(to_string) + #@nonl + #@-node:to_string + #@+node:indent_name + def indent_name(self, ident=" "): + """ + returns a indented name, according to its depth in the hierachy. + """ + + return ident * self.depth + self.name + + indent_name.attrib_method = True + indent_name.__call_completion__ = "indent_name()" + #@-node:indent_name + #@+node:costs + def costs(self, cost_name, mode="ep"): + """ + calculates the resource costs for the task. + cost_name is the name of a rate attribute of the reosurce + mode is character combination: + e calculates the estimated costs + p calculates the performed costs + ==> pe calculates all costs + """ + + if self.children: + return sum([ c.costs(cost_name, mode) for c in self.children]) + + costs = 0 + if 'e' in mode: + costs += sum(map(lambda rl: getattr(rl[0], cost_name) * rl[1], + self._resource_length)) + + if 'p' in mode: + costs += sum(map(lambda rl: getattr(rl[0], cost_name) * rl[1], + self._performed_resource_length)) + + costs /= (60.0 * self.root.calendar.working_hours_per_day) + return round(costs, 2) + + costs.attrib_method = True + costs.__call_completion__ = 'costs("|")' + #@-node:costs + #@+node:sum + def sum(self, attrib_name): + val = 0 + + if self.children: + val += sum(map(lambda c: c.sum(attrib_name), self.children)) + if self.is_inherited(attrib_name): + return val + + if attrib_name not in self.dont_inherit: + return val + + return val + getattr(self, attrib_name) + + sum.attrib_method = True + sum.__call_completion__ = 'sum("|")' + + #@-node:sum + #@+node:min + def min(self, attrib_name): + if self.children: + return min(map(lambda c: c.min(attrib_name), self.children)) + + return getattr(self, attrib_name) + + min.attrib_method = True + min.__call_completion__ = 'min("|")' + + #@-node:min + #@+node:max + def max(self, attrib_name): + if self.children: + return max(map(lambda c: c.max(attrib_name), self.children)) + + return getattr(self, attrib_name) + + max.attrib_method = True + max.__call_completion__ = 'max("|")' + + #@-node:max + #@+node:all_resources + def all_resources(self): + result = self._all_resources_as_dict() + result = result.keys() + result.sort() + return result + #@-node:all_resources + #@+node:get_task + def get_task(self, path=None): + """ + Returns a task with the given path. + """ + + if not path: + return self + + names = path.split(".") + rest = ".".join(names[1:]) + result = getattr(self, names[0], None) + return isinstance(result, Task) and result.get_task(rest) or None + #@-node:get_task + #@+node:snapshot + def snapshot(self, indent="", name=None): + text = indent + "def %s():\n" % (name or self.name) + indent += " " + for name in ("priority", "balance", "complete", + "milestone", "end", "start", "effort", "load"): + val = getattr(self, name, None) + if val is None: + continue + + if name[0] == "_": + name = name[1:] + + text += "%s%s = %s\n" % (indent, name, _as_string(val)) + + for name in self._properties: + if name.startswith("performed"): continue + val = getattr(self, name, None) + try: + if issubclass(val, resource.Resource): continue + except TypeError: + pass + text += "%s%s = %s\n" % (indent, name, _as_string(val)) + + resources = tuple(self._iter_booked_resources()) + if resources: + text += "%sresource = \\\n" % indent + def make_resource(res): + return "%s %s" \ + % (indent, res.snapshot()) + + text += "&\\\n".join(map(make_resource, resources)) + "\n" + + def make_resource_booking(res): + def make_booking(booking): + return '%s (%s, "%s", "%s", "%sM"),' \ + % (indent, res.name, + booking.book_start.strftime("%Y%m%d %H:%M"), + booking.book_end.strftime("%Y%m%d %H:%M"), + booking.work_time) + + return "\n".join(map(make_booking, res.get_bookings(self))) + + + text += "%sperformed = [\n" % indent + text += "\n".join(map(make_resource_booking, resources)) + "]" + + + child_text = map(lambda c: c.snapshot(indent), self.children) + text += "\n\n" + text += "".join(child_text) + + return text + #@-node:snapshot + #@+node:is_inherited + def is_inherited(self, attrib_name): + return not self.__dict__.has_key(attrib_name) + #@-node:is_inherited + #@+node:formatter + def formatter(self, attrib_name, arg=None, format=None): + """returns a function which is able + to convert the value of the given attrib_name to a string""" + + formats = self.formats + format = format or formats.get(attrib_name) + + if attrib_name in ("start", "end", "length", "effort", + "done", "todo", "buffer", "estimated_effort", + "performed_effort", "performed_start", "performed_end"): + def save_strftime(v): + try: + return v.strftime(format) + #except AttributeError: some bug avoid catching this exception + except Exception: + return str(v) + + return save_strftime + + if attrib_name == "duration": + def save_strftime(v): + try: + return v.strftime(format, True) + except AttributeError: + return str(v) + + return save_strftime + + if attrib_name in ("booked_resource", "performed_resource"): + def get_resource_name(v): + title = getattr(v, "title", None) + if title: return title + return ", ".join([r.title for r in v]) + return get_resource_name + + if arg and attrib_name in ("costs", "sum", "max", "min"): + format = formats.get("%s(%s)" % (attrib_name, arg), format) + + if format: + return lambda v: locale.format(format, v, True) + + return str + #@-node:formatter + #@-node:Public methods + #@+node:Resource allocation Methods + #@+node:_all_resources_as_dict + def _all_resources_as_dict(self): + if self.children: + result = {} + for c in self.children: + result.update(c._all_resources_as_dict()) + + return result + + if self.resource: + return dict(map(lambda r: (r, 1), self.resource.all_members())) + + return {} + #@-node:_all_resources_as_dict + #@+node:_test_allocation + def _test_allocation(self, resource_state, allocator): + resource = self.resource._get_resources(resource_state) + if not resource: + return False + + return allocator.test_allocation(self, resource) + #@-node:_test_allocation + #@+node:_allocate + def _allocate(self, state, allocator): + allocator.allocate(self, state) + #activate cache for done and todo + + if self.start.to_datetime() > self.end.to_datetime(): + #this can happen when performed effort are + #during non working time + tmp = self.start + self.start = self.end + self.end = tmp + + for r in self.performed_resource: + r.correct_bookings(self) + + self._resource_length = map(lambda r: (weakref.proxy(r), \ + r.length_of(self)), + self._iter_booked_resources()) + #@-node:_allocate + #@+node:_convert_performed + def _convert_performed(self, all_resources): + performed = self.performed + if not performed: return False + + if not isinstance(performed, (tuple, list)) \ + or not isinstance(performed[0], (tuple, list)) \ + or not len(performed[0]) >= 3: + self._raise(TypeError("""The format of the performed attribute must be: + [( res_name, start_literal, end_literal, working_time ), ... ]. + """), "performed") + + round_down_delta = self.root.calendar.minimum_time_unit / 2 + round_down_delta = datetime.timedelta(minutes=round_down_delta) + + def convert_item(index): + item = performed[index] + res, start, end = item[:3] + if isinstance(res, str): + found = filter(lambda r: r.name == res, all_resources) + if found: res = found[0] + + try: + if not isinstance(res, (resource.Resource, + resource._MetaResource)): + raise ValueError("the resource '%s' is unknown." % res) + + start = _to_datetime(start) + end = _to_datetime(end) + + if len(item) > 3: + working_time = self._to_delta(item[3]).round() + else: + working_time = self._to_delta(end - start, True) + + return ((res, start, end, working_time), index) + except Exception, exc: + self._raise(exc.__class__("Item %i: %s" \ + % (index + 1, str(exc))), + "performed") + + converted = dict(map(convert_item, range(len(performed)))) + converted = converted.items() + converted.sort() + + #check for overlapping items + last_res = None + for item, index in converted: + res, start, end, work_time = item + if last_res == res and start < last_end: + self._warn("Items %i, %i: %s and %s are overlapping." \ + % (last_index + 1, index + 1, + str(performed[last_index]), + str(performed[index])), + "performed") + + last_res = res + last_end = end + last_index = index + + self._performed = map(lambda x: x[0], converted) + return True + #@-node:_convert_performed + #@+node:_allocate_performed + def _allocate_performed(self, performed): + if not performed: return + + to_delta = self._to_delta + to_start = self._to_start + to_end = self._to_end + + last = datetime.datetime.min + first = datetime.datetime.max + effort = 0 + work_time_sum = 0 + zero_minutes = to_delta(0) + minimum_time_unit = to_delta(self.calendar.minimum_time_unit) + summary = {} + + for item in performed: + res, start, end, work_time = item + effort += work_time * self.efficiency * res.efficiency + work_time_sum += work_time + + res = res() + ss, es, wts = summary.get(res, (datetime.datetime.max, + datetime.datetime.min, + zero_minutes)) + summary[res] = (min(ss, start), max(es, end), wts + work_time) + + for r, v in summary.iteritems(): + start, end, work_time = v + assert(start.__class__ is datetime.datetime) + assert(end.__class__ is datetime.datetime) + + #the booking limits should be inside the workingtime + #to display them correct in resource charts + cstart = to_start(start).to_datetime() + if cstart > start: cstart = to_end(start).to_datetime() + + cend = to_end(end).to_datetime() + if cend < end: cend = to_start(end).to_datetime() + + if self.root.is_snapshot: + delta = to_end(cend) - to_start(cstart) + else: + delta = to_delta(cend - cstart).round() + + if not delta: + delta = minimum_time_unit + + book_load = float(work_time) / delta + r().book_task(self, cstart, cend, book_load, work_time, True) + last = max(end, last) + first = min(start, first) + + self._performed_resource_length = tuple([ (r, v[2]) for r, v in summary.iteritems() ]) + self.performed_resource = tuple(summary.keys()) + self.performed_end = last + self.performed_start = first + self.performed_effort = to_delta(effort) + self.performed_work_time = to_delta(work_time_sum) + self._check_completion() + #@-node:_allocate_performed + #@+node:_iter_booked_resources + def _iter_booked_resources(self): + result = dict(map(lambda r: (r, 1), self.performed_resource)) + result.update(dict(map(lambda r: (r, 1), self.booked_resource))) + return result.iterkeys() + #@-node:_iter_booked_resources + #@-node:Resource allocation Methods + #@+node:Compile Methods + #@+node:_generate + def _generate(self, deferred=None): + do_raise = False + deferred = deferred or [ self ] + while deferred: + new_deferred = [] + for task in deferred: + task._compile(new_deferred, do_raise) + + do_raise = deferred == new_deferred + deferred = new_deferred + #@-node:_generate + #@+node:_recalc_properties + def _recalc_properties(self): + if not self._properties: return + self.__compile_function([], False, _MeProxyRecalc(self)) + self._is_compiled = True + #@-node:_recalc_properties + #@+node:_compile + def _compile(self, deferred, do_raise): + self.dont_inherit = () + self._constraint = None + self._original_values.clear() + self._properties.clear() + + try: + self.__at_compile + #@ << raise child recursion error >> + #@+node:<< raise child recursion error >> + self._raise(RecursionError("A child defines a "\ + "recursive definition at %s" % self.path)) + #@-node:<< raise child recursion error >> + #@nl + except AttributeError: + self.__at_compile = self, "" + + try: + self.__compile_function(deferred, do_raise, _MeProxy(self)) + finally: + del self.__at_compile + + for c in self.children: + if not c._is_compiled: + c._compile(deferred, do_raise) + + if self._is_compiled: + self.__check_milestone() + self.__check_task() + self.root.has_actual_data |= self.__dict__.has_key("performed") + + #@-node:_compile + #@+node:__compile_function + def __compile_function(self, deferred, do_raise, me_instance): + self._is_compiled = self._is_frozen + + restore_globals = [] + globals_ = self._function.func_globals + + #@ << set function global values >> + #@+node:<< set function global values >> + def to_value_wrapper(a): + if isinstance(a, _ValueWrapper): + return a + + return _ValueWrapper(a, [(None, None)]) + + def my_max(*args): + return max(map(to_value_wrapper, args)) + + def my_min(*args): + return min(map(to_value_wrapper, args)) + + globals_["me"] = me_instance + + if self._is_compiled: + globals_["up"] = self.up + globals_["root"] = self.root + else: + globals_["up"] = _Path(self.up, "up") + globals_["root"] = _Path(self.root, "root") + + globals_["Delta"] = self._to_delta + globals_["Date"] = self._to_start + globals_["max"] = my_max + globals_["min"] = my_min + globals_["add_attrib"] = me_instance.add_attrib + #@nonl + #@-node:<< set function global values >> + #@nl + #@ << set me in global functions >> + #@+node:<< set me in global functions >> + #@+at + # Is used for functions like YearlyMax, MonthlyMax, .... + #@-at + #@@code + for name in self._function.global_names: + try: + obj = globals_[name] + if isinstance(obj, types.FunctionType): + fg = obj.func_globals + if not fg.has_key("me") and "me" in obj.func_code.co_names: + restore_globals.append(fg) + fg["me"] = me_instance + except KeyError: continue + #@nonl + #@-node:<< set me in global functions >> + #@nl + try: + #@ << eval function >> + #@+node:<< eval function >> + if do_raise: + try: + self._function() + self._is_compiled = True + except _IncompleteError, e: + src = e.args[1] + if src is not self: + self.__at_compile = e.args[1:] + src._compile([], True) + + raise + else: + try: + self._function() + self._is_compiled = True + except AttributeError, e: + #print "AttributeError:", e, self.name, e.is_frozen, do_raise + deferred.append(self) + except _IncompleteError: + #print "_IncompleteError:", id(self), self.name, do_raise + deferred.append(self) + except RecursionError: + self._is_parent_referer = True + deferred.append(self) + #@nonl + #@-node:<< eval function >> + #@nl + finally: + for fg in restore_globals: + del fg["me"] + + #@-node:__compile_function + #@-node:Compile Methods + #@+node:Setting methods + #@+node:_set_attrib + def _set_attrib(self, name, value): + + if value is _NEVER_USED_: return + + try: + value = self._setting_hooks[name](self, name, value) + except KeyError: pass + + if name == "__constraint__": + self._constraint = value + return + + if type(value) == types.FunctionType: + if value.func_code.co_argcount == 0: + #@ << add child task >> + #@+node:<< add child task >> + try: + task = self.__dict__[name] + except KeyError: + task = Task(value, name, self, len(self.children) + 1) + self.children.append(task) + setattr(self, task.name, task) + return + #@nonl + #@-node:<< add child task >> + #@nl + + if name[0] == "_": + #private vars will not be set + return + + if isinstance(value, _Path): + value = value._task + + set_method = getattr(self, "_set_" + name, None) + if set_method: + #@ << set standard attribute >> + #@+node:<< set standard attribute >> + if type(value) == types.DictionaryType: + self.root.all_scenarios.update(value.keys()) + value = value.get(self.scenario, value["_default"]) + + self.__set_sources(name, value) + self._original_values[name] = value + set_method(_val(value)) + #@nonl + #@-node:<< set standard attribute >> + #@nl + else: + #@ << set userdefined attribute >> + #@+node:<< set userdefined attribute >> + if callable( getattr(self.__class__, name, None)): + raise NameError('You may not use "%s" as attribute' % name) + + setattr(self, name, value) + self._properties[name] = True + self.__set_sources(name, value) + #@nonl + #@-node:<< set userdefined attribute >> + #@nl + #@-node:_set_attrib + #@+node:read only attributes + #@+node:_set_name + def _set_name(self, value): + raise AttributeError("The attribute 'name' is readonly.") + #@nonl + #@-node:_set_name + #@+node:_set_done + def _set_done(self, value): + raise AttributeError("The attribute 'done' is readonly.") + #@nonl + #@-node:_set_done + #@+node:_set_performed_work_time + def _set_performed_work_time(self, value): + raise AttributeError("The attribute 'performed_work_time' is readonly.") + #@nonl + #@-node:_set_performed_work_time + #@+node:_set_booked_resource + def _set_booked_resource(self, value): + raise AttributeError("The attribute 'booked_resource' is readonly.") + #@nonl + #@-node:_set_booked_resource + #@+node:_set_performed_effort + def _set_performed_effort(self, value): + raise AttributeError("The attribute 'performed_effort' is readonly.") + #@nonl + #@-node:_set_performed_effort + #@+node:_set_children + def _set_children(self, value): + raise AttributeError("The attribute 'children' is readonly.") + #@nonl + #@-node:_set_children + #@+node:_set_depth + def _set_depth(self, value): + raise AttributeError("The attribute 'depth' is readonly.") + #@nonl + #@-node:_set_depth + #@+node:_set_index + def _set_index(self, value): + raise AttributeError("The attribute 'index' is readonly.") + #@nonl + #@-node:_set_index + #@+node:_set_scenario + def _set_scenario(self, value): + raise AttributeError("The attribute 'scenario' is readonly.") + #@nonl + #@-node:_set_scenario + #@+node:_set_buffer + def _set_buffer(self, value): + raise AttributeError("The attribute 'buffer' is readonly.") + #@nonl + #@-node:_set_buffer + #@-node:read only attributes + #@+node:_set_start + def _set_start(self, value): + self.__start_class = value.__class__ + self.start = self._to_start(value).round() + #@-node:_set_start + #@+node:_set_end + def _set_end(self, value): + self.end = self._to_end(value) + #@-node:_set_end + #@+node:_set_max_load + def _set_max_load(self, max_load): + self.max_load = float(max_load) + #@-node:_set_max_load + #@+node:_set_load + def _set_load(self, load): + self.load = float(load) + #@-node:_set_load + #@+node:_set_length + def _set_length(self, value): + self.length = self._to_delta(value).round() + #@-node:_set_length + #@+node:_set_effort + def _set_effort(self, value): + self.effort = self._to_delta(value).round() + #@-node:_set_effort + #@+node:_set_duration + def _set_duration(self, value): + self.duration = self._to_delta(value, True).round() + #@-node:_set_duration + #@+node:_set_complete + def _set_complete(self, value): + self.complete = value + #@-node:_set_complete + #@+node:_set_done + def _set_done(self, value): + self.done = self._to_delta(value).round() + #@-node:_set_done + #@+node:_set_todo + def _set_todo(self, value): + self.todo = self._to_delta(value).round() + #@-node:_set_todo + #@+node:_set_milestone + def _set_milestone(self, value): + self.milestone = value + #@-node:_set_milestone + #@+node:_set_resource + def _set_resource(self, value): + if not value: + self.resource = None + return + + if isinstance(value, (tuple, list)): + value = reduce(lambda a, b: a & b, value) + + self.resource = value() + + #@-node:_set_resource + #@+node:_set_copy_src + def _set_copy_src(self, value): + if isinstance(value, _MeProxy): + raise RuntimeError("Cannot copy me.") + + if not value._is_compiled: + raise _IncompleteError(value, "copy_src") + + if value.resource and not self.resource: + self.resource = value.resource + + if value.balance and not self.balance: + self.balance = value.balance + + copy_parms = ("priority", "todo", "complete", + "_constraint", "load", "length", + "effort", "duration") + + for p in copy_parms: + v = value.__dict__.get(p) + if v: setattr(self, p, v) + + self.copy_src = value + self._properties.update(value._properties) + for k in value._properties.iterkeys(): + setattr(self, k, getattr(value, k)) + #@-node:_set_copy_src + #@+node:__set_sources + def __set_sources(self, attrib_name, value): + #@ << find references >> + #@+node:<< find references >> + def make_ref(val): + if isinstance(val, _ValueWrapper): + return val._ref + + if isinstance(val, Task): + return [(val, "")] + + return [] + + if isinstance(value, (list, tuple)): + sources = _refsum(map(make_ref, value)) + else: + sources = make_ref(value) + #@nonl + #@-node:<< find references >> + #@nl + if not sources: return + + #track only dependcies within the same project + root = self.root + sources = [ task.path + "." + attrib + for task, attrib in sources + if task and task.root is root ] + self._sources[attrib_name] = tuple(sources) + attr_path = self.path + "." + attrib_name + + #set dependencies of my sources + for d in sources: + path, attrib = _split_path(d) + task = self.get_task(path) + r_d = task._dependencies + d_l = r_d.setdefault(attrib, {}) + d_l[attr_path] = True + #@-node:__set_sources + #@+node:Calendar Setters + #@+node:_set_calendar + def _set_calendar(self, value): + self.calendar = value + self._to_delta = value.Minutes + self._to_start = value.StartDate + self._to_end = value.EndDate + self.__renew_dates() + + #@-node:_set_calendar + #@+node:__renew_dates + def __renew_dates(self): + for attrib in ("effort", "start", "end", "length", "todo"): + try: + + self._set_attrib(attrib, self._original_values[attrib]) + except KeyError: + pass + + #@-node:__renew_dates + #@+node:__make_calendar + def __make_calendar(self): + if not "calendar" in self.__dict__: + cal = self.calendar = self.calendar.clone() + self._to_delta = cal.Minutes + self._to_start = cal.StartDate + self._to_end = cal.EndDate + #@nonl + #@-node:__make_calendar + #@+node:_set_vacation + def _set_vacation(self, value): + self.__make_calendar() + self.calendar.set_vacation(value) + self._properties["vacation"] = True + self.vacation = value + self.__renew_dates() + #@-node:_set_vacation + #@+node:_set_extra_work + def _set_extra_work(self, value): + self.__make_calendar() + self.calendar.set_extra_work(value) + self._properties["extra_work"] = True + self.extra_work = value + self.__renew_dates() + #@-node:_set_extra_work + #@+node:_set_working_days + def _set_working_days(self, value): + + if type(value[0]) is str: + value = (value, ) + + self.working_days = value + self._properties["working_days"] = True + self.__make_calendar() + + for v in value: + day_range = v[0] + tranges = tuple(v[1:]) + self.calendar.set_working_days(day_range, *tranges) + + self.__renew_dates() + #@nonl + #@-node:_set_working_days + #@+node:_set_minimum_time_unit + def _set_minimum_time_unit(self, value): + self.__make_calendar() + self.calendar.minimum_time_unit = value + self._properties["minimum_time_unit"] = True + #@-node:_set_minimum_time_unit + #@+node:_get_minimum_time_unit + def _get_minimum_time_unit(self): + return self.calendar.minimum_time_unit + + minimum_time_unit = property(_get_minimum_time_unit) + #@-node:_get_minimum_time_unit + #@+node:_set_working_days_per_week + def _set_working_days_per_week(self, value): + + self.__make_calendar() + self.calendar.working_days_per_week = value + self._properties["working_days_per_week"] = True + #@-node:_set_working_days_per_week + #@+node:_get_working_days_per_week + def _get_working_days_per_week(self): + return self.calendar.working_days_per_week + + working_days_per_week = property(_get_working_days_per_week) + #@-node:_get_working_days_per_week + #@+node:_set_working_days_per_month + def _set_working_days_per_month(self, value): + self.__make_calendar() + self.calendar.working_days_per_month = value + self._properties["working_days_per_month"] = True + #@-node:_set_working_days_per_month + #@+node:_get_working_days_per_month + def _get_working_days_per_month(self): + return self.calendar.working_days_per_month + + working_days_per_month = property(_get_working_days_per_month) + #@-node:_get_working_days_per_month + #@+node:_set_working_days_per_year + def _set_working_days_per_year(self, value): + self.__make_calendar() + self.calendar.working_days_per_year = value + self._properties["working_days_per_year"] = True + #@-node:_set_working_days_per_year + #@+node:_get_working_days_per_year + def _get_working_days_per_year(self): + return self.calendar.working_days_per_year + + working_days_per_year = property(_get_working_days_per_year) + #@-node:_get_working_days_per_year + #@+node:_set_working_hours_per_day + def _set_working_hours_per_day(self, value): + self.__make_calendar() + self.calendar.working_hours_per_day = value + self._properties["set_working_hours_per_day"] = True + #@-node:_set_working_hours_per_day + #@+node:_get_working_hours_per_day + def _get_working_hours_per_day(self): + return self.calendar.working_hours_per_day + + working_hours_per_day = property(_get_working_hours_per_day) + #@-node:_get_working_hours_per_day + #@+node:_set_now + def _set_now(self, value): + proxy = weakref.proxy + self.calendar.now = _to_datetime(value) + #@-node:_set_now + #@-node:Calendar Setters + #@-node:Setting methods + #@+node:Freezer Methods + #@+node:_unfreeze + def _unfreeze(self, attrib_name): + if self.__dict__.has_key(attrib_name): + del self.__dict__[attrib_name] + #@-node:_unfreeze + #@+node:_wrap_attrib + def _wrap_attrib(self, method): + attrib_name = method.__name__[7:] + recursion_attrib = "_rec" + attrib_name + + try: + dest, dattr = self.__at_compile + raise RecursionError("Recursive definition of %s(%s) and %s(%s)" \ + % (self.path, attrib_name, dest.path, dattr)) + except AttributeError: pass + + if not self._is_compiled: + raise _IncompleteError(self, attrib_name) + + try: + getattr(self, recursion_attrib) + raise RecursionError(self, attrib_name) + except AttributeError: pass + + setattr(self, recursion_attrib, True) + + try: + result = method(self) + + if self._is_frozen: + setattr(self, attrib_name, result) + + return result + finally: + delattr(self, recursion_attrib) + #@-node:_wrap_attrib + #@+node:_find_frozen + def _find_frozen(self, attrib_name, default=None): + value = self.__dict__.get(attrib_name) + if value is not None: + return value + + up = self.up + return up and up._find_frozen(attrib_name) or default + #@-node:_find_frozen + #@-node:Freezer Methods + #@+node:Calculation Methods + #@+node:__calc_performed_effort + def __calc_performed_effort(self): + if self.children: + return self._to_delta(sum([ t.performed_effort for t in self.children ])) + + return pcalendar.Minutes(0) + + performed_effort = _TaskProperty(__calc_performed_effort) + #@-node:__calc_performed_effort + #@+node:__calc_estimated_effort + def __calc_estimated_effort(self): + if self.children: + return self._to_delta(sum([ t.estimated_effort for t in self.children ])) + + return self.effort + + estimated_effort = _TaskProperty(__calc_estimated_effort) + #@-node:__calc_estimated_effort + #@+node:__calc_start + def __calc_start(self): + to_start = self._to_start + + if self.children: + try: + return min([ to_start(t.start) for t in self.children + if not t._is_parent_referer ]) + except ValueError: + #@ << raise child recursion error >> + #@+node:<< raise child recursion error >> + self._raise(RecursionError("A child defines a "\ + "recursive definition at %s" % self.path)) + #@-node:<< raise child recursion error >> + #@nl + + try: + end = self.end + duration = self.__dict__.get("duration") + if duration is not None: + start = end.to_datetime() - datetime.timedelta(minutes=duration) + else: + start = end - self.length + + return to_start(start) + + except RecursionError: + start = self._find_frozen("start") + if start: return to_start(start) + #@ << raise recursion error >> + #@+node:<< raise recursion error >> + raise RecursionError("you have to specify a "\ + "start or an end at %s" % self.path) + #@nonl + #@-node:<< raise recursion error >> + #@nl + + start = _TaskProperty(__calc_start) + + #@-node:__calc_start + #@+node:__calc_end + def __calc_end(self): + to_end = self._to_end + + if self.children: + try: + return max([ to_end(t.end) for t in self.children + if not t._is_parent_referer ]) + except ValueError: + #@ << raise child recursion error >> + #@+node:<< raise child recursion error >> + self._raise(RecursionError("A child defines a "\ + "recursive definition at %s" % self.path)) + #@-node:<< raise child recursion error >> + #@nl + + try: + start = self.start + duration = self.__dict__.get("duration") + if duration is not None: + end = start.to_datetime() + datetime.timedelta(minutes=duration) + else: + end = start + self.length + + return to_end(end) + + except RecursionError: + end = self._find_frozen("end") + if end: return to_end(end) + #@ << raise recursion error >> + #@+node:<< raise recursion error >> + raise RecursionError("you have to specify a "\ + "start or an end at %s" % self.path) + #@nonl + #@-node:<< raise recursion error >> + #@nl + + + end = _TaskProperty(__calc_end) + #@-node:__calc_end + #@+node:__calc_load + def __calc_load(self): + length = self.__dict__.get("length") + effort = self.__dict__.get("effort") + + if length is not None and effort is not None: + return float(effort) / (float(length) or 1.0) + + load = self._find_frozen("load") + if load is not None: return load + return 1.0 + + load = _TaskProperty(__calc_load) + #@-node:__calc_load + #@+node:__calc_length + def __calc_length(self): + effort = self.__dict__.get("effort") + if effort is None: + return self.end - self.start + + return self._to_delta(effort / self.load) + + length = _RoundingTaskProperty(__calc_length, "length") + #@-node:__calc_length + #@+node:__calc_duration + def __calc_duration(self): + return self._to_delta(self.end.to_datetime()\ + - self.start.to_datetime(), True) + + duration = _TaskProperty(__calc_duration) + #@-node:__calc_duration + #@+node:__calc_effort + def __calc_effort(self): + if self.children: + return self._to_delta(sum([ t.effort for t in self.children ])) + + return self._to_delta(self.length * self.load) + + effort = _RoundingTaskProperty(__calc_effort, "effort") + #@-node:__calc_effort + #@+node:__calc_done + def __calc_done(self): + if self.children: + dones = map(lambda t: t.done, self.children) + return self._to_delta(sum(dones)) + + res = self._iter_booked_resources() + done = sum(map(lambda r: r.done_of(self), res)) + + complete = self.__dict__.get("complete") + todo = self.__dict__.get("todo") + + if not done and complete == 100 or todo == 0: + #if now is not set + done = self.effort + + return self._to_delta(done) + + done = _TaskProperty(__calc_done) + #@-node:__calc_done + #@+node:__calc_buffer + def __calc_buffer(self): + if self.children: + return self._to_delta(min(map(lambda t: t.buffer, self.children))) + + scenario = self.scenario + end = self.end + old_end = self.__dict__.get("end") + + #@ << find all tasks, that depend on my end >> + #@+node:<< find all tasks, that depend on my end >> + deps = { } + task = self + while task: + deps.update(task._dependencies.get("end", {})) + task = task.up + #@nonl + #@-node:<< find all tasks, that depend on my end >> + #@nl + + #@ << define unfreeze_parents >> + #@+node:<< define unfreeze_parents >> + def unfreeze_parents(): + task = self.up + while task: + task._unfreeze("end") + task = task.up + #@nonl + #@-node:<< define unfreeze_parents >> + #@nl + + buffers = [ ] + for d in deps.keys(): + path, attrib = _split_path(d) + if attrib != "start": + continue + + #@ << calculate buffer to descendant 'd' >> + #@+node:<< calculate buffer to descendant 'd' >> + unfreeze_parents() + + # the following code considers a expressione like + # start = predecessor.end + Delta("1d") the buffer + # calculation must be aware of the 1d delay. + # (therefore a simple succ_start - end would be + # incorrect) + # Solution: Simluate a later end and calculate the + # real delay + + succ_task = self.get_task(path) + simulated_task = Task(succ_task._function, + succ_task.name, + succ_task.up, 1) + + current_start = succ_task.start + simulated_end = current_start + self.end = current_start + + simulated_task._generate() + simulated_start = simulated_task.start + + unfreeze_parents() + if old_end: self.end = old_end + else: self._unfreeze("end") + del simulated_task + + current_delay = current_start - end + simulated_delay = simulated_start - simulated_end + real_delay = current_delay - simulated_delay + try: + buffer_ = real_delay + succ_task.buffer + except RecursionError, err: + self._raise(err) + #@nonl + #@-node:<< calculate buffer to descendant 'd' >> + #@nl + + buffers.append(buffer_) + if not buffer_: + break + + if buffers: + return self._to_delta(min(buffers)) + + return not self.milestone \ + and self.root.end - end \ + or self._to_delta(0) + + buffer = _TaskProperty(__calc_buffer) + #@-node:__calc_buffer + #@+node:__calc_complete + def __calc_complete(self): + done = self.done + todo = self.todo + return int(100.0 * done / ((done + todo) or 1)) + + complete = _TaskProperty(__calc_complete) + #@-node:__calc_complete + #@+node:__calc_todo + def __calc_todo(self): + complete = self.__dict__.get("complete") + if complete: + # effort = done + todo + # done done + # complete = ------ ==> todo = -------- - done + # effort complete + complete = float(complete) + done = self.done + if done: + done = float(done) + return self._to_delta(done * 100.0 / complete - done) + return self._to_delta(self.effort * complete / 100.0) + + if self.children: + todos = map(lambda t: t.todo, self.children) + return self._to_delta(sum(todos)) + + todo = sum(map(lambda r: r.todo_of(self), self.booked_resource)) + return self._to_delta(max(todo, self.effort - self.done)) + + todo = _TaskProperty(__calc_todo) + #@-node:__calc_todo + #@-node:Calculation Methods + #@+node:Check Methods + #@+node:__check_task + def __check_task(self): + if self.children: return + + start = self._find_frozen("start") + end = self._find_frozen("end") + + if not (start or end): + self._raise(ValueError("You must specify either a"\ + " start or an end attribute")) + + if start and end: return + + length = self.__dict__.get("length") + duration = self.__dict__.get("duration") + effort = self.__dict__.get("effort") + if not (effort or length or duration): + #set a default value + self._set_effort("1d") + #self._raise(ValueError("You must specify either a"\ + # " length or a duration or "\ + # "an effort attribute")) + #@-node:__check_task + #@+node:__check_milestone + def __check_milestone(self): + if not self.milestone: return + + self.length = self._to_delta(0) + start = self.__dict__.get("start") + if not start: + self._raise(ValueError("Milestone must have start attribute"), + "milstone") + + if self.__start_class.__name__ == "edt": + #the milestone is probably dependent on the end date of + #an other task (see edt in pcalendar) ==> start at the end date + self.start = self.end = self._to_end(self.start) + else: + self.start = self.end = self._to_start(self.start) + + #@-node:__check_milestone + #@+node:_check_completion + def _check_completion(self): + if not self.performed_effort: return + if self.root.is_snapshot: return + + # allocation is not done yet ==> self.todo, self.done, + # self.complete cannot be calculated + if self._find_frozen("complete", 0) < 100 \ + and self.__dict__.get("todo", 1) > 0: + return + + start = self.performed_start + end = self.performed_end + #ensure that self.start.to_datetime() < self.end.to_datetime() + cstart = self._to_start(start) + if cstart.to_datetime() > start: cstart = self._to_end(start) + + cend = self._to_end(end) + if cend.to_datetime() < end: cend = self._to_start(end) + + self.start = cstart + self.end = cend + + if self.performed_effort != self.effort: + self.estimated_effort = self.effort + self.effort = self.performed_effort + #@-node:_check_completion + #@+node:check + def check(self): + if self._constraint and self._is_compiled: + globals_ = self._function.func_globals + globals_["me"] = self + globals_["up"] = self.up + globals_["root"] = self.root + globals_["assert_"] = self.__assert + self._constraint() + #@-node:check + #@-node:Check Methods + #@+node:Error Methods + #@+node:__assert + def __assert(self, value): + if not value: + warnings.warn('assertion in scenario: "%s"' % self.scenario, + RuntimeWarning, 2) + #@-node:__assert + #@+node:_warn + def _warn(self, message, attrib=None, level=2): + self.__compile_function([], True, _MeProxyWarn(self, attrib, message)) + #@-node:_warn + #@+node:_raise + def _raise(self, exc, attrib=None): + self.__compile_function([], True, _MeProxyError(self, attrib, exc)) + raise exc + #@-node:_raise + #@-node:Error Methods + #@-others +#@nonl +#@-node:class Task +#@-node:Task +#@+node:Projects +#@+node:class _ProjectBase +class _ProjectBase(Task): + """ + Base class for all projects. + """ + #@ << class _ProjectBase declarations >> + #@+node:<< class _ProjectBase declarations >> + __attrib_completions__ = { } + __attrib_completions__.update(Task.__attrib_completions__) + del __attrib_completions__["milestone"] #project cannot be milestones + + priority = 500 + efficiency = 1.0 + max_load = 1.0 + balance = 0 + resource = None + copy_src = None + has_actual_data = False + is_snapshot = False + + #@-node:<< class _ProjectBase declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, top_task, scenario="_default", id=""): + self.calendar = pcalendar.Calendar() + Task.__init__(self, top_task, top_task.func_name) + self.id = id or self.name + self.scenario = scenario + self.all_scenarios = set(("_default",)) + self.path = "root" + self._globals = top_task.func_globals.copy() + self._generate() + + #@-node:__init__ + #@+node:_idendity_ + def _idendity_(self): return self.id + #@-node:_idendity_ + #@+node:_restore_globals + def _restore_globals(self): + self._function.func_globals.clear() + self._function.func_globals.update(self._globals) + del self._globals + #@-node:_restore_globals + #@+node:free + def free(self): + all_resources = self.all_resources() + for r in all_resources: + r().unbook_tasks_of_project(self.id, self.scenario) + + for t in self: + t.booked_resource = () + + return all_resources + #@-node:free + #@+node:_get_balancing_list + def _get_balancing_list(self): + + try: + cached_list = balancing_cache[self._function.org_code] + if len(cached_list) != len(tuple(self)): + # different scenarios can have different tasks + raise KeyError() + + except KeyError: + cached_list = _build_balancing_list(self) + balancing_cache[self._function.org_code] = cached_list + else: + cached_list = [ self.get_task(t.path) for t in cached_list ] + + return cached_list + #@-node:_get_balancing_list + #@+node:snapshot + def snapshot(self, indent="", name=None): + text = Task.snapshot(self, indent, name) + + lines = text.splitlines(True) + indent += " " + + def make_resource(r): + return '%sclass %s(Resource): title = "%s"\n' \ + % (indent, r.name, r.title) + + now = datetime.datetime.now().strftime("%x %H:%M") + resource_text = map(lambda r: make_resource(r), self.all_resources()) + lines.insert(1, "%sfrom faces import Resource\n" % indent) + lines.insert(2, "".join(resource_text) + "\n") + lines.insert(3, '%snow = "%s"\n' % (indent, now)) + lines.insert(4, '%sis_snapshot = True\n' % indent) + return "".join(lines) + #@-node:snapshot + #@-others +#@-node:class _ProjectBase +#@+node:class Project +class Project(_ProjectBase): + """ + Generates a Project without allocating resources. + + @param top_task: Specifies the highest function of a project definiton. + + @param scenario: Specifies the name of the scenario which should be scheduled. + + @param id: Specifiess a unique idenfication name to distinguish the project from + other projects in the resource database. The default value for id + is the name of top_task. + """ + #@ << class Project declarations >> + #@+node:<< class Project declarations >> + __call_completion__ = 'Project(|top_task, scenario="_default", id=None)' + + #@-node:<< class Project declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, top_task, scenario="_default", id=None): + _ProjectBase.__init__(self, top_task, scenario, id) + no_snapshot = not self.is_snapshot + for t in self: + t._is_frozen = True + t._recalc_properties() + no_snapshot and t.check() + + self._restore_globals() + + #@-node:__init__ + #@-others + +#@-node:class Project +#@+node:class _AllocationPoject +class _AllocationPoject(_ProjectBase): + #@ @+others + #@+node:unfreeze_parents + def unfreeze_parents(self): + if self.has_actual_data: + for t in filter(lambda t: t.children, self): + if not t._original_values.has_key("start"): t._unfreeze("start") + if not t._original_values.has_key("end"): t._unfreeze("end") + #@-node:unfreeze_parents + #@-others +#@-node:class _AllocationPoject +#@+node:class BalancedProject +class BalancedProject(_AllocationPoject): + """ + Generates a project with allocated resources. The tasks are balanced + to fit the resources load conditions. + """ + #@ << class BalancedProject declarations >> + #@+node:<< class BalancedProject declarations >> + __call_completion__ = """BalancedProject(|top_task, scenario="_default", + id=None, balance=SMART, performed=None)""" + + #@-node:<< class BalancedProject declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, top_task, scenario="_default", + id=None, balance=SMART, performed=None): + _AllocationPoject.__init__(self, top_task, scenario, id) + self.balance = balance + if performed: + self._distribute_performed(performed) + self.has_actual_data = True + + no_snapshot = not self.is_snapshot + if no_snapshot: + self.allocate() + else: + self.allocate_snapshot() + + for t in self: + t._is_frozen = True + t._recalc_properties() + no_snapshot and t.check() + + self._restore_globals() + #@nonl + #@-node:__init__ + #@+node:allocate_snapshot + def allocate_snapshot(self): + all_resources = self.free() + scenario = self.scenario + has_actual_data = True + for t in self: + if not t.resource or t.milestone or t.children: + continue + + t._convert_performed(all_resources) + t._allocate_performed(t._performed) + #@-node:allocate_snapshot + #@+node:allocate + def allocate(self): + all_resources = self.free() + balancing_list = self._get_balancing_list() + scenario = self.scenario + + #for t in balancing_list: + # print t.path + + for t in balancing_list: + t._compile([], True) + + if not t.resource or t.milestone or t.children: + continue + + if t._convert_performed(all_resources): + has_actual_data = True + + try: + t._allocate_performed(t._performed) + except AttributeError: + pass + + allocator = _allocators[t.balance] + min_val = None + min_state = None + for p in range(t.resource._permutation_count()): + state = t._test_allocation(p, allocator) + + if not state: continue + + to_minimize = state[0] + if not min_val or min_val > to_minimize: + min_val = to_minimize + min_state = state + + if min_state: + t._allocate(min_state, allocator) + elif t.performed_start: + # t could not be allocated ==> + # performance data holds all information + t.start = t._to_start(t.performed_start) + t.end = t._to_end(t.performed_end) + + self.unfreeze_parents() + #@-node:allocate + #@+node:_distribute_performed + def _distribute_performed(self, performed): + project_id = self._idendity_() + plen = len(project_id) + + performed = filter(lambda item: item[0].startswith(project_id), + performed) + performed.sort() + + task = None + for item in performed: + path = item[0] + rpath = "root" + path[plen:] + task = self.get_task(rpath) + + if not task: + #@ << extract task in activity path >> + #@+node:<< extract task in activity path >> + #@+at + # A performed path can have sub activities appended to the + # task path. + # like: + # + # root.parent1.parent2.task.subactivity + # + # here rhe correct task path is: + # + # root.parent1.parent2.task + # + #@-at + #@@code + orpath = rpath + while not task: + #path can specify a sub module + #find the correct path to the module + try: + last_dot = rpath.rindex(".", 0, len(rpath)) + except ValueError: + break + + rpath = rpath[:last_dot] + task = self.get_task(rpath) + + item = list(item) + item.append(orpath[len(rpath):]) + #@nonl + #@-node:<< extract task in activity path >> + #@nl + + if not task or task.children: + self._warn("The performance data contain " + "a task with id '%s'. But such " + "a task does not exist in your " + "project." % path) + continue + + if not isinstance(task.performed, list): + task.performed = list(task.performed) + + task.performed.append(item[1:]) + #@nonl + #@-node:_distribute_performed + #@-others +#@-node:class BalancedProject +#@+node:class AdjustedProject +class AdjustedProject(_AllocationPoject): + """ + Generates a project with allocated resources. The tasks are + adjusted to the actual tracking data and balanced to fit the + resources load conditions. + """ + #@ << class AdjustedProject declarations >> + #@+node:<< class AdjustedProject declarations >> + __call_completion__ = 'AdjustedProject(|base_project)' + + #@-node:<< class AdjustedProject declarations >> + #@nl + #@ @+others + #@+node:__init__ + def __init__(self, base_project): + _AllocationPoject.__init__(self, base_project._function, + base_project.scenario, + base_project.id) + + self.balance = base_project.balance + self.has_actual_data = base_project.has_actual_data + self.allocate(base_project) + for t in self: + t._is_frozen = True + t._recalc_properties() + t.check() + + self._restore_globals() + + + #@-node:__init__ + #@+node:allocate + def allocate(self, base): + balancing_list = self._get_balancing_list() + scenario = self.scenario + cal = self.calendar + now = cal.now + + #for t in balancing_list: + # print t.path + + #@ << free the resources, we have to rebook >> + #@+node:<< free the resources, we have to rebook >> + for t in balancing_list: + src = base.get_task(t.path) + if src.end > now or src.complete < 100: + for r in src._iter_booked_resources(): + r.unbook_task(src) + #@nonl + #@-node:<< free the resources, we have to rebook >> + #@nl + + for t in balancing_list: + src = base.get_task(t.path) + + if src.end <= now and src.complete == 100: + #@ << copy the attribs of complete tasks >> + #@+node:<< copy the attribs of complete tasks >> + t.effort = src.effort + t.load = src.load + t.start = src.start + t.end = src.end + t.done = src.done + t.todo = src.todo + t.booked_resource = src.booked_resource + t.performed_resource = src.performed_resource + t._unfreeze("length") + t._unfreeze("duration") + #@nonl + #@-node:<< copy the attribs of complete tasks >> + #@nl + continue + + t._compile([], True) + if not t.resource or t.milestone or t.children: + continue + + # now allocate the uncomplete tasks + #@ << allocate performed data >> + #@+node:<< allocate performed data >> + try: + t._performed = src._performed + t._allocate_performed(t._performed) + except AttributeError: + pass + #@nonl + #@-node:<< allocate performed data >> + #@nl + allocator = _allocators[t.balance] + + if src.start >= now: + #@ << allocate tasks, that have not begun yet >> + #@+node:<< allocate tasks, that have not begun yet >> + min_val = None + min_state = None + for p in range(t.resource._permutation_count()): + state = t._test_allocation(p, allocator) + if not state: continue + + to_minimize = state[0] + if not min_val or min_val > to_minimize: + min_val = to_minimize + min_state = state + + if min_state: + t._allocate(min_state, allocator) + elif t.performed_start: + t.start = t._to_start(t.performed_start) + t.end = t._to_end(t.performed_end) + #@-node:<< allocate tasks, that have not begun yet >> + #@nl + else: + #@ << allocate tasks, that are allready at work >> + #@+node:<< allocate tasks, that are allready at work >> + if t.__dict__.has_key("effort"): + t.effort = t._to_delta(src.done + src.todo).round() + + resource = src.booked_resource or src.performed_resource + state = allocator.test_allocation(t, resource) + if state: + t._allocate(state, allocator) + #@nonl + #@-node:<< allocate tasks, that are allready at work >> + #@nl + + self.unfreeze_parents() + #@nonl + #@-node:allocate + #@-others +#@-node:class AdjustedProject +#@-node:Projects +#@-others + + +""" + Atttribute mit Bedeutung: + + calendar + -------- + minimum_time_unit |int in minutes| + working_days_per_week |int in days | + working_days_per_month|int in days | + working_days_per_year |int in days | + working_hours_per_day |int in hours | + vacation | [ one_day, (from, to), .. ] | + working_days + now + + + + Task + ----- + load + start + end + length + effort + duration + resource + booked_resource + + milestone + complete + done + todo + priority + efficiency + buffer + + children + depth + index + path + dont_inherit + + performed_effort + performed_end + performed_start + + sum() + min() + max() + costs() + indent_name() + max_load + + copy_src (set: copy all attributes of another task + get: reference of copy) + + balance + + for gantt + ----- + line + accumulate + + + + + Resource + ---------- + efficiency + load + vacation + max_load + +""" +#@-node:@file task.py +#@-leo diff --git a/addons/resource/faces/timescale.py b/addons/resource/faces/timescale.py new file mode 100644 index 00000000000..215766eb958 --- /dev/null +++ b/addons/resource/faces/timescale.py @@ -0,0 +1,113 @@ +############################################################################ +# Copyright (C) 2005 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +import faces.pcalendar as pcal +import matplotlib.cbook as cbook +import datetime +import sys + + +class TimeScale(object): + def __init__(self, calendar): + self.data_calendar = calendar + self._create_chart_calendar() + self.now = self.to_num(self.data_calendar.now) + + + def to_datetime(self, xval): + return xval.to_datetime() + + + def to_num(self, date): + return self.chart_calendar.WorkingDate(date) + + + def is_free_slot(self, value): + dt1 = self.chart_calendar.to_starttime(value) + dt2 = self.data_calendar.to_starttime\ + (self.data_calendar.from_datetime(dt1)) + return dt1 != dt2 + + + def is_free_day(self, value): + dt1 = self.chart_calendar.to_starttime(value) + dt2 = self.data_calendar.to_starttime\ + (self.data_calendar.from_datetime(dt1)) + return dt1.date() != dt2.date() + + + def _create_chart_calendar(self): + dcal = self.data_calendar + ccal = self.chart_calendar = pcal.Calendar() + ccal.minimum_time_unit = 1 + + #pad worktime slots of calendar (all days should be equally long) + slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots)) + day_sum = lambda day: slot_sum(dcal.get_working_times(day)) + + max_work_time = max(map(day_sum, range(7))) + + #working_time should have 2/3 + sum_time = 3 * max_work_time / 2 + + #now create timeslots for ccal + def create_time_slots(day): + src_slots = dcal.get_working_times(day) + slots = [0, src_slots, 24*60] + slots = tuple(cbook.flatten(slots)) + slots = zip(slots[:-1], slots[1:]) + + #balance non working slots + work_time = slot_sum(src_slots) + non_work_time = sum_time - work_time + + non_slots = filter(lambda s: s not in src_slots, slots) + non_slots = map(lambda s: (s[1] - s[0], s), non_slots) + non_slots.sort() + + slots = [] + i = 0 + for l, s in non_slots: + delta = non_work_time / (len(non_slots) - i) + delta = min(l, delta) + non_work_time -= delta + slots.append((s[0], s[0] + delta)) + i += 1 + + slots.extend(src_slots) + slots.sort() + return slots + + min_delta = sys.maxint + for i in range(7): + slots = create_time_slots(i) + ccal.working_times[i] = slots + min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots))) + + ccal._recalc_working_time() + + self.slot_delta = min_delta + self.day_delta = sum_time + self.week_delta = ccal.week_time + + +_default_scale = TimeScale(pcal._default_calendar) diff --git a/addons/resource/faces/utils.py b/addons/resource/faces/utils.py new file mode 100644 index 00000000000..f7d61fb1dc9 --- /dev/null +++ b/addons/resource/faces/utils.py @@ -0,0 +1,124 @@ +############################################################################ +# Copyright (C) 2005 by Reithinger GmbH +# mreithinger@web.de +# +# This file is part of faces. +# +# faces is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# faces is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +############################################################################ + +import observer +import os.path +import sys +import os.path + +_call_dir = os.path.abspath(os.path.dirname(sys.argv[0])) + +def get_installation_path(): + try: + if sys.frozen: + path = _call_dir + else: + raise AttributeError() + except AttributeError: + path = os.path.abspath(observer.__file__) + path = os.path.split(path)[0] + + path = os.path.normcase(path) + return path + + +def get_resource_path(): + try: + if sys.frozen: + path = _call_dir + path = os.path.join(path, "resources", "faces", "gui") + else: + raise AttributeError() + except AttributeError: + path = get_installation_path() + path = os.path.join(path, "gui", "resources") + + path = os.path.normcase(path) + return path + + +def get_template_path(): + try: + if sys.frozen: + path = _call_dir + path = os.path.join(path, "resources", "faces", "templates") + else: + raise AttributeError() + except AttributeError: + path = get_installation_path() + path = os.path.join(path, "templates") + + path = os.path.normcase(path) + return path + + +def get_howtos_path(): + try: + if sys.frozen: + path = _call_dir + else: + raise AttributeError() + except AttributeError: + path = get_installation_path() + + path = os.path.join(path, "howtos") + path = os.path.normcase(path) + return path + + + +def flatten(items): + if isinstance(items, tuple): + items = list(items) + + if not isinstance(items, list): + yield items + + stack = [iter(items)] + while stack: + for item in stack[-1]: + if isinstance(item, tuple): + item = list(item) + + if isinstance(item, list): + stack.append(iter(item)) + break + yield item + else: + stack.pop() + + +def do_yield(): + pass + + +def progress_start(title, maximum, message=""): + pass + +def progress_update(value, message=""): + pass + +def progress_end(): + pass + + +