bitbake: Sync with 1.8 branch

git-svn-id: https://svn.o-hand.com/repos/poky/trunk@4463 311d38ba-8fff-0310-9ca6-ca027cbcb966
This commit is contained in:
Richard Purdie 2008-05-13 07:53:18 +00:00
parent 152f14b598
commit e14d7dcbee
5 changed files with 52 additions and 35 deletions

View File

@ -33,6 +33,14 @@ Changes in BitBake 1.8.x:
- Improve runtime PREFERRED_PROVIDERS warning message
- Add BB_STAMP_WHITELIST option which contains a list of stamps to ignore when
checking stamp dependencies and using a BB_STAMP_POLICY of "whitelist"
- No longer weight providers on the basis of a package being "already staged". This
leads to builds being non-deterministic.
- Flush stdout/stderr before forking to fix duplicate console output
- Make sure recrdeps tasks include all inter-task dependencies of a given fn
- Add bb.runqueue.check_stamp_fn() for use by packaged-staging
- Add PERSISTENT_DIR to store the PersistData in a persistent
directory != the cache dir.
- Add md5 and sha256 checksum generation functions to utils.py
Changes in BitBake 1.8.10:
- Psyco is available only for x86 - do not use it on other architectures.

View File

@ -43,9 +43,9 @@ class PersistData:
Why sqlite? It handles all the locking issues for us.
"""
def __init__(self, d):
self.cachedir = bb.data.getVar("CACHE", d, True)
self.cachedir = bb.data.getVar("PERSISTENT_DIR", d, True) or bb.data.getVar("CACHE", d, True)
if self.cachedir in [None, '']:
bb.msg.fatal(bb.msg.domain.PersistData, "Please set the 'CACHE' variable.")
bb.msg.fatal(bb.msg.domain.PersistData, "Please set the 'PERSISTENT_DIR' or 'CACHE' variable.")
try:
os.stat(self.cachedir)
except OSError:

View File

@ -213,34 +213,6 @@ def _filterProviders(providers, item, cfgData, dataCache):
eligible.remove(fn)
eligible = [fn] + eligible
# look to see if one of them is already staged, or marked as preferred.
# if so, bump it to the head of the queue
for p in providers:
pn = dataCache.pkg_fn[p]
pe, pv, pr = dataCache.pkg_pepvpr[p]
stamp = '%s.do_populate_staging' % dataCache.stamp[p]
if os.path.exists(stamp):
(newvers, fn) = preferred_versions[pn]
if not fn in eligible:
# package was made ineligible by already-failed check
continue
oldver = "%s-%s" % (pv, pr)
if pe > 0:
oldver = "%s:%s" % (pe, oldver)
newver = "%s-%s" % (newvers[1], newvers[2])
if newvers[0] > 0:
newver = "%s:%s" % (newvers[0], newver)
if (newver != oldver):
extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item)
else:
extra_chat = "Selecting already-staged %s (%s) to satisfy %s" % (pn, oldver, item)
bb.msg.note(2, bb.msg.domain.Provider, "%s" % extra_chat)
eligible.remove(fn)
eligible = [fn] + eligible
break
return eligible

View File

@ -164,6 +164,12 @@ class RunQueue:
taskname = self.runq_task[task]
return "%s, %s" % (fn, taskname)
def get_task_id(self, fnid, taskname):
for listid in range(len(self.runq_fnid)):
if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
return listid
return None
def circular_depchains_handler(self, tasks):
"""
Some tasks aren't buildable, likely due to circular dependency issues.
@ -398,8 +404,12 @@ class RunQueue:
return []
if task in recursive_tdepends:
return recursive_tdepends[task]
rectdepends = [task]
nextdeps = [task]
fnid = taskData.tasks_fnid[task]
taskids = taskData.gettask_ids(fnid)
rectdepends = taskids
nextdeps = taskids
while len(nextdeps) != 0:
newdeps = []
for nextdep in nextdeps:
@ -776,7 +786,7 @@ class RunQueue:
bb.fatal("check_stamps fatal internal error")
return current
def check_stamp(self, task):
def check_stamp_task(self, task):
if self.stamppolicy == "perfile":
fulldeptree = False
@ -791,10 +801,12 @@ class RunQueue:
stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
# If the stamp is missing its not current
if not os.access(stampfile, os.F_OK):
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s not available\n" % stampfile)
return False
# If its a 'nostamp' task, it's not current
taskdep = self.dataCache.task_deps[fn]
if 'nostamp' in taskdep and task in taskdep['nostamp']:
bb.msg.debug(2, bb.msg.domain.RunQueue, "%s.%s is nostamp\n" % (fn, taskname))
return False
iscurrent = True
@ -808,8 +820,10 @@ class RunQueue:
try:
t2 = os.stat(stampfile2)[stat.ST_MTIME]
if t1 < t2:
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile,stampfile2))
iscurrent = False
except:
bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 ,stampfile))
iscurrent = False
return iscurrent
@ -907,7 +921,7 @@ class RunQueue:
fn = self.taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
if self.check_stamp(task):
if self.check_stamp_task(task):
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
self.runq_running[task] = 1
self.task_complete(task)
@ -916,6 +930,8 @@ class RunQueue:
continue
bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task)))
sys.stdout.flush()
sys.stderr.flush()
try:
pid = os.fork()
except OSError, e:
@ -930,7 +946,8 @@ class RunQueue:
newsi = os.open('/dev/null', os.O_RDWR)
os.dup2(newsi, sys.stdin.fileno())
self.cooker.configuration.cmd = taskname[3:]
try:
bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data)
try:
self.cooker.tryBuild(fn)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
@ -1023,3 +1040,13 @@ class RunQueue:
self.runq_weight[task],
self.runq_depends[task],
self.runq_revdeps[task]))
def check_stamp_fn(fn, taskname, d):
rq = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d)
fnid = rq.taskData.getfn_id(fn)
taskid = rq.get_task_id(fnid, taskname)
if taskid is not None:
return rq.check_stamp_task(taskid)
return None

View File

@ -91,6 +91,16 @@ class TaskData:
return self.fn_index.index(name)
def gettask_ids(self, fnid):
"""
Return an array of the ID numbers matching a given fnid.
"""
ids = []
if fnid in self.tasks_lookup:
for task in self.tasks_lookup[fnid]:
ids.append(self.tasks_lookup[fnid][task])
return ids
def gettask_id(self, fn, task, create = True):
"""
Return an ID number for the task matching fn and task.