2006-11-29 22:52:37 +00:00
#!/usr/bin/env python
2006-11-16 15:02:15 +00:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake ' RunQueue ' implementation
Handles preparation and execution of a queue of tasks
"""
2007-09-02 14:10:08 +00:00
# Copyright (C) 2006-2007 Richard Purdie
2007-01-08 23:53:01 +00:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
2010-06-10 17:35:31 +00:00
import os
import sys
2010-08-16 15:37:29 +00:00
import subprocess
2007-04-01 15:04:49 +00:00
import signal
2008-03-03 22:01:45 +00:00
import stat
2010-01-21 23:46:20 +00:00
import fcntl
2010-08-19 10:36:29 +00:00
import copy
2010-06-10 15:05:52 +00:00
import logging
2010-06-10 17:35:31 +00:00
import bb
2010-06-10 15:05:52 +00:00
from bb import msg , data , event
bblogger = logging . getLogger ( " BitBake " )
2010-06-10 17:35:31 +00:00
logger = logging . getLogger ( " BitBake.RunQueue " )
2006-11-16 15:02:15 +00:00
2010-09-27 14:57:13 +00:00
try :
import cPickle as pickle
except ImportError :
import pickle
2010-06-10 17:35:31 +00:00
logger . info ( " Importing cPickle failed. Falling back to a very slow implementation. " )
2010-09-27 14:57:13 +00:00
2007-04-01 15:04:49 +00:00
class RunQueueStats :
"""
Holds statistics on the tasks handled by the associated runQueue
"""
2010-01-20 18:46:02 +00:00
def __init__ ( self , total ) :
2007-04-01 15:04:49 +00:00
self . completed = 0
self . skipped = 0
self . failed = 0
2010-01-20 18:46:02 +00:00
self . active = 0
self . total = total
2007-04-01 15:04:49 +00:00
def taskFailed ( self ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - 1
2007-04-01 15:04:49 +00:00
self . failed = self . failed + 1
2008-03-03 22:01:45 +00:00
def taskCompleted ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - number
2008-03-03 22:01:45 +00:00
self . completed = self . completed + number
2007-04-01 15:04:49 +00:00
2008-03-03 22:01:45 +00:00
def taskSkipped ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active + number
2008-03-03 22:01:45 +00:00
self . skipped = self . skipped + number
2007-04-01 15:04:49 +00:00
2010-01-20 18:46:02 +00:00
def taskActive ( self ) :
self . active = self . active + 1
2010-03-24 23:56:12 +00:00
# These values indicate the next step due to be run in the
2010-01-20 18:46:02 +00:00
# runQueue state machine
runQueuePrepare = 2
2010-08-19 10:36:29 +00:00
runQueueSceneInit = 3
runQueueSceneRun = 4
runQueueRunInit = 5
runQueueRunning = 6
runQueueFailed = 7
runQueueCleanUp = 8
runQueueComplete = 9
runQueueChildProcess = 10
2010-01-20 18:46:02 +00:00
2010-07-22 17:54:58 +00:00
class RunQueueScheduler ( object ) :
2008-01-06 16:51:51 +00:00
"""
Control the order tasks are scheduled in .
"""
2010-07-22 17:54:58 +00:00
name = " basic "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
2010-03-24 23:56:12 +00:00
The default scheduler just returns the first buildable task ( the
2008-01-06 16:51:51 +00:00
priority map is sorted by task numer )
"""
self . rq = runqueue
2010-08-18 10:30:53 +00:00
self . rqdata = rqdata
2008-01-06 16:51:51 +00:00
numTasks = len ( self . rq . runq_fnid )
self . prio_map = [ ]
self . prio_map . extend ( range ( numTasks ) )
2010-07-23 21:32:14 +00:00
def next_buildable_tasks ( self ) :
2008-01-06 16:51:51 +00:00
"""
Return the id of the first task we find that is buildable
"""
2010-07-23 21:32:14 +00:00
for tasknum in range ( len ( self . rqdata . runq_fnid ) ) :
taskid = self . prio_map [ tasknum ]
if self . rq . runq_running [ taskid ] == 1 :
continue
if self . rq . runq_buildable [ taskid ] == 1 :
yield taskid
def next ( self ) :
"""
Return the id of the task we should build next
"""
2010-07-23 20:42:54 +00:00
if self . rq . stats . active < self . rq . number_tasks :
2010-07-23 21:32:14 +00:00
return next ( self . next_buildable_tasks ( ) , None )
2008-01-06 16:51:51 +00:00
class RunQueueSchedulerSpeed ( RunQueueScheduler ) :
"""
A scheduler optimised for speed . The priority map is sorted by task weight ,
heavier weighted tasks ( tasks needed by the most other tasks ) are run first .
"""
2010-07-22 17:54:58 +00:00
name = " speed "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
The priority map is sorted by task weight .
"""
from copy import deepcopy
self . rq = runqueue
2010-08-18 10:30:53 +00:00
self . rqdata = rqdata
2008-01-06 16:51:51 +00:00
2010-08-18 10:30:53 +00:00
sortweight = sorted ( deepcopy ( self . rqdata . runq_weight ) )
copyweight = deepcopy ( self . rqdata . runq_weight )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
for weight in sortweight :
idx = copyweight . index ( weight )
self . prio_map . append ( idx )
copyweight [ idx ] = - 1
self . prio_map . reverse ( )
class RunQueueSchedulerCompletion ( RunQueueSchedulerSpeed ) :
"""
2010-03-24 23:56:12 +00:00
A scheduler optimised to complete . bb files are quickly as possible . The
priority map is sorted by task weight , but then reordered so once a given
2008-01-06 16:51:51 +00:00
. bb file starts to build , its completed as quickly as possible . This works
2010-03-24 23:56:12 +00:00
well where disk space is at a premium and classes like OE ' s rm_work are in
2008-01-06 16:51:51 +00:00
force .
"""
2010-07-22 17:54:58 +00:00
name = " completion "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
RunQueueSchedulerSpeed . __init__ ( self , runqueue , rqdata )
2008-01-06 16:51:51 +00:00
from copy import deepcopy
#FIXME - whilst this groups all fnids together it does not reorder the
#fnid groups optimally.
2010-03-24 23:56:12 +00:00
2008-01-06 16:51:51 +00:00
basemap = deepcopy ( self . prio_map )
self . prio_map = [ ]
while ( len ( basemap ) > 0 ) :
entry = basemap . pop ( 0 )
self . prio_map . append ( entry )
2010-08-18 10:30:53 +00:00
fnid = self . rqdata . runq_fnid [ entry ]
2008-01-06 16:51:51 +00:00
todel = [ ]
for entry in basemap :
2010-08-18 10:30:53 +00:00
entry_fnid = self . rqdata . runq_fnid [ entry ]
2008-01-06 16:51:51 +00:00
if entry_fnid == fnid :
todel . append ( basemap . index ( entry ) )
self . prio_map . append ( entry )
todel . reverse ( )
for idx in todel :
del basemap [ idx ]
2010-08-18 10:30:53 +00:00
class RunQueueData :
2006-11-16 15:02:15 +00:00
"""
BitBake Run Queue implementation
"""
2010-08-18 10:30:53 +00:00
def __init__ ( self , rq , cooker , cfgData , dataCache , taskData , targets ) :
2007-04-01 15:04:49 +00:00
self . cooker = cooker
self . dataCache = dataCache
self . taskData = taskData
self . targets = targets
2010-08-18 10:30:53 +00:00
self . rq = rq
2007-04-01 15:04:49 +00:00
2008-05-05 09:21:49 +00:00
self . stampwhitelist = bb . data . getVar ( " BB_STAMP_WHITELIST " , cfgData , 1 ) or " "
2010-08-18 10:30:53 +00:00
self . multi_provider_whitelist = ( bb . data . getVar ( " MULTI_PROVIDER_WHITELIST " , cfgData , 1 ) or " " ) . split ( )
2010-07-22 18:27:10 +00:00
self . schedulers = set ( obj for obj in globals ( ) . itervalues ( )
if type ( obj ) is type and issubclass ( obj , RunQueueScheduler ) )
user_schedulers = bb . data . getVar ( " BB_SCHEDULERS " , cfgData , True )
if user_schedulers :
for sched in user_schedulers . split ( ) :
if not " . " in sched :
bb . note ( " Ignoring scheduler ' %s ' from BB_SCHEDULERS: not an import " % sched )
continue
modname , name = sched . rsplit ( " . " , 1 )
try :
module = __import__ ( modname , fromlist = ( name , ) )
except ImportError , exc :
logger . critical ( " Unable to import scheduler ' %s ' from ' %s ' : %s " % ( name , modname , exc ) )
raise SystemExit ( 1 )
else :
self . schedulers . add ( getattr ( module , name ) )
2010-08-18 10:30:53 +00:00
self . reset ( )
2006-11-16 15:02:15 +00:00
2010-08-18 10:30:53 +00:00
def reset ( self ) :
2006-11-16 15:02:15 +00:00
self . runq_fnid = [ ]
self . runq_task = [ ]
self . runq_depends = [ ]
self . runq_revdeps = [ ]
2010-08-31 13:49:43 +00:00
self . runq_hash = [ ]
2010-01-20 18:46:02 +00:00
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
def runq_depends_names ( self , ids ) :
import re
ret = [ ]
for id in self . runq_depends [ ids ] :
nam = os . path . basename ( self . get_user_idstring ( id ) )
nam = re . sub ( " _[^,]*, " , " , " , nam )
ret . extend ( [ nam ] )
return ret
2007-04-01 15:04:49 +00:00
def get_user_idstring ( self , task ) :
fn = self . taskData . fn_index [ self . runq_fnid [ task ] ]
2006-11-16 15:02:15 +00:00
taskname = self . runq_task [ task ]
return " %s , %s " % ( fn , taskname )
2008-05-13 07:53:18 +00:00
def get_task_id ( self , fnid , taskname ) :
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2008-05-13 07:53:18 +00:00
if self . runq_fnid [ listid ] == fnid and self . runq_task [ listid ] == taskname :
return listid
return None
2008-01-06 16:51:51 +00:00
def circular_depchains_handler ( self , tasks ) :
"""
Some tasks aren ' t buildable, likely due to circular dependency issues.
Identify the circular dependencies and print them in a user readable format .
"""
from copy import deepcopy
valid_chains = [ ]
explored_deps = { }
msgs = [ ]
def chain_reorder ( chain ) :
"""
Reorder a dependency chain so the lowest task id is first
"""
lowest = 0
new_chain = [ ]
2010-11-19 20:39:22 +00:00
for entry in xrange ( len ( chain ) ) :
2008-01-06 16:51:51 +00:00
if chain [ entry ] < chain [ lowest ] :
lowest = entry
new_chain . extend ( chain [ lowest : ] )
new_chain . extend ( chain [ : lowest ] )
return new_chain
def chain_compare_equal ( chain1 , chain2 ) :
"""
Compare two dependency chains and see if they ' re the same
"""
if len ( chain1 ) != len ( chain2 ) :
return False
2010-11-19 20:39:22 +00:00
for index in xrange ( len ( chain1 ) ) :
2008-01-06 16:51:51 +00:00
if chain1 [ index ] != chain2 [ index ] :
return False
return True
2010-03-24 23:56:12 +00:00
2008-01-06 16:51:51 +00:00
def chain_array_contains ( chain , chain_array ) :
"""
Return True if chain_array contains chain
"""
for ch in chain_array :
if chain_compare_equal ( ch , chain ) :
return True
return False
def find_chains ( taskid , prev_chain ) :
prev_chain . append ( taskid )
total_deps = [ ]
total_deps . extend ( self . runq_revdeps [ taskid ] )
for revdep in self . runq_revdeps [ taskid ] :
if revdep in prev_chain :
idx = prev_chain . index ( revdep )
# To prevent duplicates, reorder the chain to start with the lowest taskid
# and search through an array of those we've already printed
chain = prev_chain [ idx : ]
new_chain = chain_reorder ( chain )
if not chain_array_contains ( new_chain , valid_chains ) :
valid_chains . append ( new_chain )
msgs . append ( " Dependency loop # %d found: \n " % len ( valid_chains ) )
for dep in new_chain :
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
msgs . append ( " Task %s ( %s ) (dependent Tasks %s ) \n " % ( dep , self . get_user_idstring ( dep ) , self . runq_depends_names ( dep ) ) )
2008-01-06 16:51:51 +00:00
msgs . append ( " \n " )
if len ( valid_chains ) > 10 :
msgs . append ( " Aborted dependency loops search after 10 matches. \n " )
return msgs
continue
scan = False
if revdep not in explored_deps :
scan = True
elif revdep in explored_deps [ revdep ] :
scan = True
else :
for dep in prev_chain :
if dep in explored_deps [ revdep ] :
scan = True
if scan :
find_chains ( revdep , deepcopy ( prev_chain ) )
for dep in explored_deps [ revdep ] :
if dep not in total_deps :
total_deps . append ( dep )
explored_deps [ taskid ] = total_deps
for task in tasks :
find_chains ( task , [ ] )
return msgs
def calculate_task_weights ( self , endpoints ) :
"""
2010-03-24 23:56:12 +00:00
Calculate a number representing the " weight " of each task . Heavier weighted tasks
2008-01-06 16:51:51 +00:00
have more dependencies and hence should be executed sooner for maximum speed .
2010-12-22 15:41:32 +00:00
This function also sanity checks the task list finding tasks that are not
2008-01-06 16:51:51 +00:00
possible to execute due to circular dependencies .
"""
numTasks = len ( self . runq_fnid )
weight = [ ]
deps_left = [ ]
task_done = [ ]
2010-11-19 20:39:22 +00:00
for listid in xrange ( numTasks ) :
2008-01-06 16:51:51 +00:00
task_done . append ( False )
weight . append ( 0 )
deps_left . append ( len ( self . runq_revdeps [ listid ] ) )
for listid in endpoints :
weight [ listid ] = 1
task_done [ listid ] = True
2010-04-12 00:03:55 +00:00
while True :
2008-01-06 16:51:51 +00:00
next_points = [ ]
for listid in endpoints :
for revdep in self . runq_depends [ listid ] :
weight [ revdep ] = weight [ revdep ] + weight [ listid ]
deps_left [ revdep ] = deps_left [ revdep ] - 1
if deps_left [ revdep ] == 0 :
next_points . append ( revdep )
task_done [ revdep ] = True
endpoints = next_points
if len ( next_points ) == 0 :
2010-03-24 23:56:12 +00:00
break
2008-01-06 16:51:51 +00:00
# Circular dependency sanity check
problem_tasks = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( numTasks ) :
2008-01-06 16:51:51 +00:00
if task_done [ task ] is False or deps_left [ task ] != 0 :
problem_tasks . append ( task )
2010-12-17 21:56:08 +00:00
logger . debug ( 2 , " Task %s ( %s ) is not buildable " , task , self . get_user_idstring ( task ) )
logger . debug ( 2 , " (Complete marker was %s and the remaining dependency count was %s ) \n " , task_done [ task ] , deps_left [ task ] )
2008-01-06 16:51:51 +00:00
if problem_tasks :
message = " Unbuildable tasks were found. \n "
message = message + " These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks. \n \n "
message = message + " Identifying dependency loops (this may take a short while)... \n "
2010-06-10 17:35:31 +00:00
logger . error ( message )
2008-01-06 16:51:51 +00:00
msgs = self . circular_depchains_handler ( problem_tasks )
message = " \n "
for msg in msgs :
message = message + msg
bb . msg . fatal ( bb . msg . domain . RunQueue , message )
return weight
2010-08-18 10:30:53 +00:00
def prepare ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-03-24 23:56:12 +00:00
Turn a set of taskData into a RunQueue and compute data needed
2006-11-16 15:02:15 +00:00
to optimise the execution order .
"""
runq_build = [ ]
2008-03-14 11:44:34 +00:00
recursive_tdepends = { }
2009-07-21 18:44:23 +00:00
runq_recrdepends = [ ]
tdepends_fnid = { }
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
taskData = self . taskData
2007-05-22 11:50:37 +00:00
if len ( taskData . tasks_name ) == 0 :
# Nothing to do
2010-08-24 23:58:23 +00:00
return 0
2007-05-22 11:50:37 +00:00
2010-06-10 17:35:31 +00:00
logger . info ( " Preparing runqueue " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Step A - Work out a list of tasks to run
#
2009-07-21 18:44:23 +00:00
# Taskdata gives us a list of possible providers for every build and run
2010-03-24 23:56:12 +00:00
# target ordered by priority. It also gives information on each of those
2009-07-21 18:44:23 +00:00
# providers.
2008-01-06 16:51:51 +00:00
#
2010-03-24 23:56:12 +00:00
# To create the actual list of tasks to execute we fix the list of
# providers and then resolve the dependencies into task IDs. This
# process is repeated for each type of dependency (tdepends, deptask,
2008-01-06 16:51:51 +00:00
# rdeptast, recrdeptask, idepends).
2009-07-21 21:38:53 +00:00
def add_build_dependencies ( depids , tasknames , depends ) :
for depid in depids :
# Won't be in build_targets if ASSUME_PROVIDED
if depid not in taskData . build_targets :
continue
depdata = taskData . build_targets [ depid ] [ 0 ]
if depdata is None :
continue
dep = taskData . fn_index [ depdata ]
for taskname in tasknames :
taskid = taskData . gettask_id ( dep , taskname , False )
if taskid is not None :
depends . append ( taskid )
def add_runtime_dependencies ( depids , tasknames , depends ) :
for depid in depids :
if depid not in taskData . run_targets :
continue
depdata = taskData . run_targets [ depid ] [ 0 ]
if depdata is None :
continue
dep = taskData . fn_index [ depdata ]
for taskname in tasknames :
taskid = taskData . gettask_id ( dep , taskname , False )
if taskid is not None :
depends . append ( taskid )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( taskData . tasks_name ) ) :
2009-07-21 18:44:23 +00:00
depends = [ ]
recrdepends = [ ]
2006-11-16 15:02:15 +00:00
fnid = taskData . tasks_fnid [ task ]
fn = taskData . fn_index [ fnid ]
2007-04-01 15:04:49 +00:00
task_deps = self . dataCache . task_deps [ fn ]
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Processing %s : %s " , fn , taskData . tasks_name [ task ] )
2009-07-21 18:44:23 +00:00
2006-11-16 15:02:15 +00:00
if fnid not in taskData . failed_fnids :
2010-03-24 23:56:12 +00:00
# Resolve task internal dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. addtask before X after Y
2006-11-16 15:02:15 +00:00
depends = taskData . tasks_tdepends [ task ]
2010-03-24 23:56:12 +00:00
# Resolve 'deptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[deptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS)
2006-11-16 15:02:15 +00:00
if ' deptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' deptask ' ] :
2007-10-30 12:03:07 +00:00
tasknames = task_deps [ ' deptask ' ] [ taskData . tasks_name [ task ] ] . split ( )
2009-07-21 21:38:53 +00:00
add_build_dependencies ( taskData . depids [ fnid ] , tasknames , depends )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve 'rdeptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
2006-11-16 15:02:15 +00:00
if ' rdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' rdeptask ' ] :
taskname = task_deps [ ' rdeptask ' ] [ taskData . tasks_name [ task ] ]
2009-07-21 21:38:53 +00:00
add_runtime_dependencies ( taskData . rdepids [ fnid ] , [ taskname ] , depends )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve inter-task dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
2009-07-21 18:44:23 +00:00
if fnid not in tdepends_fnid :
tdepends_fnid [ fnid ] = set ( )
2007-04-01 15:04:49 +00:00
idepends = taskData . tasks_idepends [ task ]
2008-03-14 11:44:34 +00:00
for ( depid , idependtask ) in idepends :
2007-04-01 15:04:49 +00:00
if depid in taskData . build_targets :
2007-08-05 22:43:24 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2007-04-01 15:04:49 +00:00
depdata = taskData . build_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2007-04-01 15:04:49 +00:00
dep = taskData . fn_index [ depdata ]
2010-08-06 23:19:12 +00:00
taskid = taskData . gettask_id ( dep , idependtask , False )
if taskid is None :
bb . msg . fatal ( bb . msg . domain . RunQueue , " Task %s in %s depends upon nonexistant task %s in %s " % ( taskData . tasks_name [ task ] , fn , idependtask , dep ) )
2009-07-21 18:44:23 +00:00
depends . append ( taskid )
if depdata != fnid :
tdepends_fnid [ fnid ] . add ( taskid )
# Resolve recursive 'recrdeptask' dependencies (A)
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2009-07-21 18:44:23 +00:00
# We cover the recursive part of the dependencies below
2006-11-16 15:02:15 +00:00
if ' recrdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' recrdeptask ' ] :
2007-01-08 23:53:01 +00:00
for taskname in task_deps [ ' recrdeptask ' ] [ taskData . tasks_name [ task ] ] . split ( ) :
2009-07-21 18:44:23 +00:00
recrdepends . append ( taskname )
2009-07-21 21:38:53 +00:00
add_build_dependencies ( taskData . depids [ fnid ] , [ taskname ] , depends )
add_runtime_dependencies ( taskData . rdepids [ fnid ] , [ taskname ] , depends )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Rmove all self references
2006-11-16 15:02:15 +00:00
if task in depends :
newdep = [ ]
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Task %s ( %s %s ) contains self reference! %s " , task , taskData . fn_index [ taskData . tasks_fnid [ task ] ] , taskData . tasks_name [ task ] , depends )
2006-11-16 15:02:15 +00:00
for dep in depends :
2010-03-24 23:56:12 +00:00
if task != dep :
newdep . append ( dep )
2006-11-16 15:02:15 +00:00
depends = newdep
self . runq_fnid . append ( taskData . tasks_fnid [ task ] )
self . runq_task . append ( taskData . tasks_name [ task ] )
2009-05-12 15:53:22 +00:00
self . runq_depends . append ( set ( depends ) )
self . runq_revdeps . append ( set ( ) )
2010-08-31 13:49:43 +00:00
self . runq_hash . append ( " " )
2006-11-16 15:02:15 +00:00
runq_build . append ( 0 )
2009-07-21 18:44:23 +00:00
runq_recrdepends . append ( recrdepends )
#
# Build a list of recursive cumulative dependencies for each fnid
# We do this by fnid, since if A depends on some task in B
2010-03-24 23:56:12 +00:00
# we're interested in later tasks B's fnid might have but B itself
2009-07-21 18:44:23 +00:00
# doesn't depend on
#
# Algorithm is O(tasks) + O(tasks)*O(fnids)
#
reccumdepends = { }
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2009-07-21 18:44:23 +00:00
fnid = self . runq_fnid [ task ]
if fnid not in reccumdepends :
2009-07-21 21:32:35 +00:00
if fnid in tdepends_fnid :
2009-07-29 13:08:05 +00:00
reccumdepends [ fnid ] = tdepends_fnid [ fnid ]
else :
reccumdepends [ fnid ] = set ( )
2009-07-21 21:32:35 +00:00
reccumdepends [ fnid ] . update ( self . runq_depends [ task ] )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2009-07-21 18:44:23 +00:00
taskfnid = self . runq_fnid [ task ]
for fnid in reccumdepends :
if task in reccumdepends [ fnid ] :
reccumdepends [ fnid ] . add ( task )
if taskfnid in reccumdepends :
reccumdepends [ fnid ] . update ( reccumdepends [ taskfnid ] )
# Resolve recursive 'recrdeptask' dependencies (B)
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2009-07-21 18:44:23 +00:00
if len ( runq_recrdepends [ task ] ) > 0 :
taskfnid = self . runq_fnid [ task ]
for dep in reccumdepends [ taskfnid ] :
2010-03-24 23:56:12 +00:00
# Ignore self references
2009-07-21 21:32:35 +00:00
if dep == task :
continue
2009-07-21 18:44:23 +00:00
for taskname in runq_recrdepends [ task ] :
if taskData . tasks_name [ dep ] == taskname :
self . runq_depends [ task ] . add ( dep )
2008-01-06 16:51:51 +00:00
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
# as active too. If the task is to be 'forced', clear its stamp. Once
# all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Marking Active Tasks " )
2006-11-16 15:02:15 +00:00
def mark_active ( listid , depth ) :
"""
Mark an item as active along with its depends
( calls itself recursively )
"""
if runq_build [ listid ] == 1 :
return
runq_build [ listid ] = 1
depends = self . runq_depends [ listid ]
for depend in depends :
mark_active ( depend , depth + 1 )
2008-03-03 22:01:45 +00:00
self . target_pairs = [ ]
2007-04-01 15:04:49 +00:00
for target in self . targets :
2006-11-16 15:02:15 +00:00
targetid = taskData . getbuild_id ( target [ 0 ] )
if targetid not in taskData . build_targets :
continue
2007-02-21 20:15:13 +00:00
if targetid in taskData . failed_deps :
continue
2006-11-16 15:02:15 +00:00
fnid = taskData . build_targets [ targetid ] [ 0 ]
2008-03-03 22:01:45 +00:00
fn = taskData . fn_index [ fnid ]
self . target_pairs . append ( ( fn , target [ 1 ] ) )
2007-01-08 23:53:01 +00:00
# Remove stamps for targets if force mode active
2007-04-01 15:04:49 +00:00
if self . cooker . configuration . force :
2010-06-10 17:35:31 +00:00
logger . verbose ( " Remove stamp %s , %s " , target [ 1 ] , fn )
2007-04-01 15:04:49 +00:00
bb . build . del_stamp ( target [ 1 ] , self . dataCache , fn )
2007-01-08 23:53:01 +00:00
2006-11-16 15:02:15 +00:00
if fnid in taskData . failed_fnids :
continue
2008-01-06 16:51:51 +00:00
if target [ 1 ] not in taskData . tasks_lookup [ fnid ] :
bb . msg . fatal ( bb . msg . domain . RunQueue , " Task %s does not exist for target %s " % ( target [ 1 ] , target [ 0 ] ) )
2006-11-16 15:02:15 +00:00
listid = taskData . tasks_lookup [ fnid ] [ target [ 1 ] ]
mark_active ( listid , 1 )
2008-01-06 16:51:51 +00:00
# Step C - Prune all inactive tasks
#
# Once all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
maps = [ ]
delcount = 0
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
if runq_build [ listid - delcount ] == 1 :
maps . append ( listid - delcount )
else :
del self . runq_fnid [ listid - delcount ]
del self . runq_task [ listid - delcount ]
del self . runq_depends [ listid - delcount ]
del runq_build [ listid - delcount ]
del self . runq_revdeps [ listid - delcount ]
2010-08-31 13:49:43 +00:00
del self . runq_hash [ listid - delcount ]
2006-11-16 15:02:15 +00:00
delcount = delcount + 1
maps . append ( - 1 )
2008-01-06 16:51:51 +00:00
#
# Step D - Sanity checks and computation
#
# Check to make sure we still have tasks to run
2006-11-16 15:02:15 +00:00
if len ( self . runq_fnid ) == 0 :
if not taskData . abort :
2008-02-11 20:33:43 +00:00
bb . msg . fatal ( bb . msg . domain . RunQueue , " All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above. " )
2009-07-21 18:44:23 +00:00
else :
2008-02-11 20:33:43 +00:00
bb . msg . fatal ( bb . msg . domain . RunQueue , " No active tasks and not in --continue mode?! Please report this bug. " )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Pruned %s inactive tasks, %s left " , delcount , len ( self . runq_fnid ) )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Remap the dependencies to account for the deleted tasks
# Check we didn't delete a task we depend on
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
newdeps = [ ]
origdeps = self . runq_depends [ listid ]
for origdep in origdeps :
if maps [ origdep ] == - 1 :
bb . msg . fatal ( bb . msg . domain . RunQueue , " Invalid mapping - Should never happen! " )
newdeps . append ( maps [ origdep ] )
2009-05-12 15:53:22 +00:00
self . runq_depends [ listid ] = set ( newdeps )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Assign Weightings " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Generate a list of reverse dependencies to ease future calculations
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
for dep in self . runq_depends [ listid ] :
self . runq_revdeps [ dep ] . add ( listid )
2008-01-06 16:51:51 +00:00
# Identify tasks at the end of dependency chains
# Error on circular dependency loops (length two)
2006-11-16 15:02:15 +00:00
endpoints = [ ]
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
revdeps = self . runq_revdeps [ listid ]
if len ( revdeps ) == 0 :
endpoints . append ( listid )
for dep in revdeps :
if dep in self . runq_depends [ listid ] :
#self.dump_data(taskData)
2010-04-12 00:03:55 +00:00
bb . msg . fatal ( bb . msg . domain . RunQueue , " Task %s ( %s ) has circular dependency on %s ( %s ) " % ( taskData . fn_index [ self . runq_fnid [ dep ] ] , self . runq_task [ dep ] , taskData . fn_index [ self . runq_fnid [ listid ] ] , self . runq_task [ listid ] ) )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Compute totals (have %s endpoint(s)) " , len ( endpoints ) )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Calculate task weights
2008-01-06 16:51:51 +00:00
# Check of higher length circular dependencies
self . runq_weight = self . calculate_task_weights ( endpoints )
# Sanity Check - Check for multiple tasks building the same provider
2007-09-02 14:10:08 +00:00
prov_list = { }
seen_fn = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2007-09-02 14:10:08 +00:00
fn = taskData . fn_index [ self . runq_fnid [ task ] ]
if fn in seen_fn :
continue
seen_fn . append ( fn )
for prov in self . dataCache . fn_provides [ fn ] :
if prov not in prov_list :
prov_list [ prov ] = [ fn ]
2010-03-24 23:56:12 +00:00
elif fn not in prov_list [ prov ] :
2007-09-02 14:10:08 +00:00
prov_list [ prov ] . append ( fn )
error = False
for prov in prov_list :
if len ( prov_list [ prov ] ) > 1 and prov not in self . multi_provider_whitelist :
error = True
2010-06-10 17:35:31 +00:00
logger . error ( " Multiple .bb files are due to be built which each provide %s ( %s ). \n This usually means one provides something the other doesn ' t and should. " , prov , " " . join ( prov_list [ prov ] ) )
2007-09-02 14:10:08 +00:00
2008-05-04 23:22:24 +00:00
# Create a whitelist usable by the stamp checks
stampfnwhitelist = [ ]
for entry in self . stampwhitelist . split ( ) :
entryid = self . taskData . getbuild_id ( entry )
if entryid not in self . taskData . build_targets :
continue
fnid = self . taskData . build_targets [ entryid ] [ 0 ]
fn = self . taskData . fn_index [ fnid ]
stampfnwhitelist . append ( fn )
self . stampfnwhitelist = stampfnwhitelist
2006-11-16 15:02:15 +00:00
#self.dump_data(taskData)
2010-08-19 10:36:29 +00:00
# Interate over the task list looking for tasks with a 'setscene' function
self . runq_setscene = [ ]
for task in range ( len ( self . runq_fnid ) ) :
setscene = taskData . gettask_id ( self . taskData . fn_index [ self . runq_fnid [ task ] ] , self . runq_task [ task ] + " _setscene " , False )
if not setscene :
continue
2010-08-19 21:35:33 +00:00
#bb.note("Found setscene for %s %s" % (self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task]))
2010-08-19 10:36:29 +00:00
self . runq_setscene . append ( task )
2010-08-31 13:49:43 +00:00
# Interate over the task list and call into the siggen code
dealtwith = set ( )
todeal = set ( range ( len ( self . runq_fnid ) ) )
while len ( todeal ) > 0 :
for task in todeal . copy ( ) :
if len ( self . runq_depends [ task ] - dealtwith ) == 0 :
dealtwith . add ( task )
todeal . remove ( task )
procdep = [ ]
for dep in self . runq_depends [ task ] :
procdep . append ( self . taskData . fn_index [ self . runq_fnid [ dep ] ] + " . " + self . runq_task [ dep ] )
self . runq_hash [ task ] = bb . parse . siggen . get_taskhash ( self . taskData . fn_index [ self . runq_fnid [ task ] ] , self . runq_task [ task ] , procdep , self . dataCache )
2010-09-27 14:57:13 +00:00
hashdata = { }
hashdata [ " hashes " ] = { }
hashdata [ " deps " ] = { }
for task in range ( len ( self . runq_fnid ) ) :
hashdata [ " hashes " ] [ self . taskData . fn_index [ self . runq_fnid [ task ] ] + " . " + self . runq_task [ task ] ] = self . runq_hash [ task ]
deps = [ ]
for dep in self . runq_depends [ task ] :
deps . append ( self . taskData . fn_index [ self . runq_fnid [ dep ] ] + " . " + self . runq_task [ dep ] )
hashdata [ " deps " ] [ self . taskData . fn_index [ self . runq_fnid [ task ] ] + " . " + self . runq_task [ task ] ] = deps
2010-10-11 11:52:57 +00:00
hashdata [ " msg-debug " ] = self . cooker . configuration . debug
hashdata [ " msg-debug-domains " ] = self . cooker . configuration . debug_domains
2010-10-12 16:46:47 +00:00
hashdata [ " verbose " ] = self . cooker . configuration . verbose
2010-10-11 11:52:57 +00:00
2010-12-08 00:08:04 +00:00
self . hashdata = hashdata
2010-09-27 14:57:13 +00:00
2010-08-24 23:58:23 +00:00
return len ( self . runq_fnid )
2010-08-18 10:30:53 +00:00
def dump_data ( self , taskQueue ) :
"""
Dump some debug information on the internal data structures
"""
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " run_tasks: " )
2010-08-18 10:30:53 +00:00
for task in range ( len ( self . rqdata . runq_task ) ) :
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " ( %s ) %s - %s : %s Deps %s RevDeps %s " % ( task ,
2010-08-18 10:30:53 +00:00
taskQueue . fn_index [ self . rqdata . runq_fnid [ task ] ] ,
self . rqdata . runq_task [ task ] ,
self . rqdata . runq_weight [ task ] ,
self . rqdata . runq_depends [ task ] ,
self . rqdata . runq_revdeps [ task ] ) )
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " sorted_tasks: " )
2010-08-18 10:30:53 +00:00
for task1 in range ( len ( self . rqdata . runq_task ) ) :
if task1 in self . prio_map :
task = self . prio_map [ task1 ]
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " ( %s ) %s - %s : %s Deps %s RevDeps %s " % ( task ,
2010-08-18 10:30:53 +00:00
taskQueue . fn_index [ self . rqdata . runq_fnid [ task ] ] ,
self . rqdata . runq_task [ task ] ,
self . rqdata . runq_weight [ task ] ,
self . rqdata . runq_depends [ task ] ,
self . rqdata . runq_revdeps [ task ] ) )
class RunQueue :
def __init__ ( self , cooker , cfgData , dataCache , taskData , targets ) :
self . cooker = cooker
self . cfgData = cfgData
self . rqdata = RunQueueData ( self , cooker , cfgData , dataCache , taskData , targets )
2010-10-05 21:21:34 +00:00
self . stamppolicy = bb . data . getVar ( " BB_STAMP_POLICY " , cfgData , True ) or " perfile "
self . hashvalidate = bb . data . getVar ( " BB_HASHCHECK_FUNCTION " , cfgData , True ) or None
2010-08-18 10:30:53 +00:00
self . state = runQueuePrepare
2010-01-20 18:46:02 +00:00
2008-03-03 22:01:45 +00:00
def check_stamps ( self ) :
unchecked = { }
current = [ ]
notcurrent = [ ]
buildable = [ ]
if self . stamppolicy == " perfile " :
fulldeptree = False
else :
fulldeptree = True
2008-05-04 23:22:24 +00:00
stampwhitelist = [ ]
if self . stamppolicy == " whitelist " :
2010-08-18 10:30:53 +00:00
stampwhitelist = self . rqdata . stampfnwhitelist
2008-03-03 22:01:45 +00:00
2010-08-18 10:30:53 +00:00
for task in range ( len ( self . rqdata . runq_fnid ) ) :
2008-03-03 22:01:45 +00:00
unchecked [ task ] = " "
2010-08-18 10:30:53 +00:00
if len ( self . rqdata . runq_depends [ task ] ) == 0 :
2008-03-03 22:01:45 +00:00
buildable . append ( task )
2008-03-14 11:44:34 +00:00
def check_buildable ( self , task , buildable ) :
2010-08-18 10:30:53 +00:00
for revdep in self . rqdata . runq_revdeps [ task ] :
2008-03-14 11:44:34 +00:00
alldeps = 1
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ revdep ] :
2008-03-14 11:44:34 +00:00
if dep in unchecked :
alldeps = 0
if alldeps == 1 :
if revdep in unchecked :
buildable . append ( revdep )
2010-08-18 10:30:53 +00:00
for task in range ( len ( self . rqdata . runq_fnid ) ) :
2008-03-03 22:01:45 +00:00
if task not in unchecked :
continue
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
stampfile = " %s . %s " % ( self . rqdata . dataCache . stamp [ fn ] , taskname )
2008-03-03 22:01:45 +00:00
# If the stamp is missing its not current
if not os . access ( stampfile , os . F_OK ) :
del unchecked [ task ]
notcurrent . append ( task )
2008-03-14 11:44:34 +00:00
check_buildable ( self , task , buildable )
2008-03-03 22:01:45 +00:00
continue
# If its a 'nostamp' task, it's not current
2010-08-18 10:30:53 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2008-03-03 22:01:45 +00:00
if ' nostamp ' in taskdep and task in taskdep [ ' nostamp ' ] :
del unchecked [ task ]
notcurrent . append ( task )
2008-03-14 11:44:34 +00:00
check_buildable ( self , task , buildable )
2008-03-03 22:01:45 +00:00
continue
while ( len ( buildable ) > 0 ) :
nextbuildable = [ ]
for task in buildable :
if task in unchecked :
2010-08-18 10:30:53 +00:00
fn = self . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
stampfile = " %s . %s " % ( self . rqdata . dataCache . stamp [ fn ] , taskname )
2008-03-03 22:01:45 +00:00
iscurrent = True
t1 = os . stat ( stampfile ) [ stat . ST_MTIME ]
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ task ] :
2008-03-03 22:01:45 +00:00
if iscurrent :
2010-08-18 10:30:53 +00:00
fn2 = self . taskData . fn_index [ self . rqdata . runq_fnid [ dep ] ]
taskname2 = self . rqdata . runq_task [ dep ]
stampfile2 = " %s . %s " % ( self . rqdata . dataCache . stamp [ fn2 ] , taskname2 )
2008-05-04 23:22:24 +00:00
if fn == fn2 or ( fulldeptree and fn2 not in stampwhitelist ) :
2008-03-03 22:01:45 +00:00
if dep in notcurrent :
iscurrent = False
else :
t2 = os . stat ( stampfile2 ) [ stat . ST_MTIME ]
if t1 < t2 :
iscurrent = False
del unchecked [ task ]
if iscurrent :
current . append ( task )
else :
notcurrent . append ( task )
2008-03-14 11:44:34 +00:00
check_buildable ( self , task , nextbuildable )
2008-03-03 22:01:45 +00:00
buildable = nextbuildable
#for task in range(len(self.runq_fnid)):
# fn = self.taskData.fn_index[self.runq_fnid[task]]
# taskname = self.runq_task[task]
# print "%s %s.%s" % (task, taskname, fn)
#print "Unchecked: %s" % unchecked
#print "Current: %s" % current
#print "Not current: %s" % notcurrent
if len ( unchecked ) > 0 :
2010-04-09 19:33:29 +00:00
bb . msg . fatal ( bb . msg . domain . RunQueue , " check_stamps fatal internal error " )
2008-03-03 22:01:45 +00:00
return current
2010-07-06 16:47:43 +00:00
def check_stamp_task ( self , task , taskname = None ) :
2010-08-19 10:36:29 +00:00
def get_timestamp ( f ) :
try :
if not os . access ( f , os . F_OK ) :
return None
return os . stat ( f ) [ stat . ST_MTIME ]
except :
return None
2008-03-14 11:44:34 +00:00
if self . stamppolicy == " perfile " :
fulldeptree = False
else :
fulldeptree = True
2008-05-04 23:22:24 +00:00
stampwhitelist = [ ]
if self . stamppolicy == " whitelist " :
2010-08-18 10:30:53 +00:00
stampwhitelist = self . rqdata . stampfnwhitelist
2008-03-14 11:44:34 +00:00
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2010-07-06 16:47:43 +00:00
if taskname is None :
2010-08-18 10:30:53 +00:00
taskname = self . rqdata . runq_task [ task ]
2010-11-06 12:20:33 +00:00
stampfile = bb . parse . siggen . stampfile ( self . rqdata . dataCache . stamp [ fn ] , taskname , self . rqdata . runq_hash [ task ] )
2008-03-14 11:44:34 +00:00
# If the stamp is missing its not current
if not os . access ( stampfile , os . F_OK ) :
2010-12-17 21:46:41 +00:00
logger . debug ( 2 , " Stampfile %s not available " , stampfile )
2008-03-14 11:44:34 +00:00
return False
# If its a 'nostamp' task, it's not current
2010-08-18 10:30:53 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2008-10-01 13:55:17 +00:00
if ' nostamp ' in taskdep and taskname in taskdep [ ' nostamp ' ] :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " %s . %s is nostamp \n " % ( fn , taskname ) )
2008-03-14 11:44:34 +00:00
return False
2010-08-19 10:36:29 +00:00
if taskname . endswith ( " _setscene " ) :
return True
2008-03-14 11:44:34 +00:00
iscurrent = True
2010-08-19 10:36:29 +00:00
t1 = get_timestamp ( stampfile )
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ task ] :
2008-03-14 11:44:34 +00:00
if iscurrent :
2010-08-18 10:30:53 +00:00
fn2 = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ dep ] ]
taskname2 = self . rqdata . runq_task [ dep ]
2010-11-06 12:20:33 +00:00
stampfile2 = bb . parse . siggen . stampfile ( self . rqdata . dataCache . stamp [ fn2 ] , taskname2 , self . rqdata . runq_hash [ dep ] )
stampfile3 = bb . parse . siggen . stampfile ( self . rqdata . dataCache . stamp [ fn2 ] , taskname2 + " _setscene " , self . rqdata . runq_hash [ dep ] )
2010-08-19 10:36:29 +00:00
t2 = get_timestamp ( stampfile2 )
2010-11-06 12:20:33 +00:00
t3 = get_timestamp ( stampfile3 )
2010-08-19 10:36:29 +00:00
if t3 and t3 > t2 :
continue
2008-05-04 23:22:24 +00:00
if fn == fn2 or ( fulldeptree and fn2 not in stampwhitelist ) :
2010-12-16 15:14:13 +00:00
if not t2 :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Stampfile %s does not exist " % ( stampfile2 ) )
2008-03-14 11:44:34 +00:00
iscurrent = False
2010-12-16 15:14:13 +00:00
if t1 < t2 :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Stampfile %s < %s " % ( stampfile , stampfile2 ) )
2010-12-16 15:14:13 +00:00
iscurrent = False
2008-03-14 11:44:34 +00:00
return iscurrent
2008-03-03 22:01:45 +00:00
2007-04-01 15:04:49 +00:00
def execute_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
Upon failure , optionally try to recover the build using any alternate providers
( if the abort on failure configuration option isn ' t set)
"""
2010-08-18 16:37:15 +00:00
retval = 0.5
2010-01-20 18:46:02 +00:00
if self . state is runQueuePrepare :
2010-08-24 23:58:23 +00:00
self . rqexe = RunQueueExecuteDummy ( self )
if self . rqdata . prepare ( ) is 0 :
self . state = runQueueComplete
else :
self . state = runQueueSceneInit
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneInit :
2010-08-31 13:49:43 +00:00
if self . cooker . configuration . dump_signatures :
self . dump_signatures ( )
else :
self . rqexe = RunQueueExecuteScenequeue ( self )
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneRun :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunInit :
2010-06-10 17:35:31 +00:00
logger . info ( " Executing RunQueue Tasks " )
2010-08-18 16:13:06 +00:00
self . rqexe = RunQueueExecuteTasks ( self )
self . state = runQueueRunning
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunning :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueCleanUp :
2010-08-18 16:13:06 +00:00
self . rqexe . finish ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueFailed :
2010-08-18 10:30:53 +00:00
if not self . rqdata . taskData . tryaltconfigs :
2010-08-19 21:35:33 +00:00
raise bb . runqueue . TaskFailure ( self . rqexe . failed_fnids )
for fnid in self . rqexe . failed_fnids :
2010-08-18 10:30:53 +00:00
self . rqdata . taskData . fail_fnid ( fnid )
self . rqdata . reset ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueComplete :
# All done
2011-01-01 23:55:54 +00:00
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and %d failed. " , self . rqexe . stats . completed , self . rqexe . stats . skipped , self . rqexe . stats . failed )
2010-01-20 18:46:02 +00:00
return False
if self . state is runQueueChildProcess :
2010-07-06 16:46:13 +00:00
print ( " Child process, eeek, shouldn ' t happen! " )
2010-01-20 18:46:02 +00:00
return False
# Loop
2010-08-18 16:37:15 +00:00
return retval
2007-04-01 15:04:49 +00:00
2010-08-18 16:13:06 +00:00
def finish_runqueue ( self , now = False ) :
if now :
self . rqexe . finish_now ( )
else :
self . rqexe . finish ( )
2007-04-01 15:04:49 +00:00
2010-08-31 13:49:43 +00:00
def dump_signatures ( self ) :
self . state = runQueueComplete
done = set ( )
bb . note ( " Reparsing files to collect dependency data " )
for task in range ( len ( self . rqdata . runq_fnid ) ) :
if self . rqdata . runq_fnid [ task ] not in done :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2010-11-19 18:46:42 +00:00
the_data = bb . cache . Cache . loadDataFull ( fn , self . cooker . get_file_appends ( fn ) , self . cooker . configuration . data )
2010-08-31 13:49:43 +00:00
done . add ( self . rqdata . runq_fnid [ task ] )
bb . parse . siggen . dump_sigs ( self . rqdata . dataCache )
return
2010-08-18 16:13:06 +00:00
class RunQueueExecute :
def __init__ ( self , rq ) :
self . rq = rq
self . cooker = rq . cooker
self . cfgData = rq . cfgData
self . rqdata = rq . rqdata
self . number_tasks = int ( bb . data . getVar ( " BB_NUMBER_THREADS " , self . cfgData , 1 ) or 1 )
self . scheduler = bb . data . getVar ( " BB_SCHEDULER " , self . cfgData , 1 ) or " speed "
2007-04-01 15:04:49 +00:00
self . runq_buildable = [ ]
self . runq_running = [ ]
self . runq_complete = [ ]
self . build_pids = { }
2010-01-20 18:46:02 +00:00
self . build_pipes = { }
2007-04-01 15:04:49 +00:00
self . failed_fnids = [ ]
2010-08-18 16:13:06 +00:00
def runqueue_process_waitpid ( self ) :
"""
Return none is there are no processes awaiting result collection , otherwise
collect the process exit codes and close the information pipe .
"""
2010-12-08 00:08:04 +00:00
result = os . waitpid ( - 1 , os . WNOHANG )
if result [ 0 ] is 0 and result [ 1 ] is 0 :
return None
task = self . build_pids [ result [ 0 ] ]
del self . build_pids [ result [ 0 ] ]
self . build_pipes [ result [ 0 ] ] . close ( )
del self . build_pipes [ result [ 0 ] ]
if result [ 1 ] != 0 :
2010-09-10 18:14:54 +00:00
self . task_fail ( task , result [ 1 ] >> 8 )
2010-12-08 00:08:04 +00:00
else :
self . task_complete ( task )
2010-08-18 16:13:06 +00:00
def finish_now ( self ) :
if self . stats . active :
2010-06-10 17:35:31 +00:00
logger . info ( " Sending SIGTERM to remaining %s tasks " , self . stats . active )
2010-08-18 16:13:06 +00:00
for k , v in self . build_pids . iteritems ( ) :
try :
2010-08-05 04:58:51 +00:00
os . kill ( - k , signal . SIGTERM )
2010-08-18 16:13:06 +00:00
except :
pass
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
def finish ( self ) :
self . rq . state = runQueueCleanUp
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
2010-08-19 10:36:29 +00:00
if self . stats . active > 0 :
bb . event . fire ( runQueueExitWait ( self . stats . active ) , self . cfgData )
self . runqueue_process_waitpid ( )
return
2010-08-18 16:13:06 +00:00
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
return
self . rq . state = runQueueComplete
return
def fork_off_task ( self , fn , task , taskname ) :
2010-11-19 18:46:42 +00:00
the_data = bb . cache . Cache . loadDataFull ( fn , self . cooker . get_file_appends ( fn ) , self . cooker . configuration . data )
2010-08-16 15:37:29 +00:00
2010-12-08 00:08:04 +00:00
env = bb . data . export_vars ( the_data )
env = bb . data . export_envvars ( env , the_data )
2010-08-16 15:37:29 +00:00
2010-12-08 00:08:04 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' fakeroot ' in taskdep and taskname in taskdep [ ' fakeroot ' ] :
envvars = the_data . getVar ( " FAKEROOTENV " , True ) . split ( )
for var in envvars :
comps = var . split ( " = " )
env [ comps [ 0 ] ] = comps [ 1 ]
fakedirs = ( the_data . getVar ( " FAKEROOTDIRS " , True ) or " " ) . split ( )
for p in fakedirs :
bb . mkdirhier ( p )
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Running %s : %s under fakeroot, state dir is %s " % ( fn , taskname , fakedirs ) )
2010-12-08 00:08:04 +00:00
env [ ' PATH ' ] = self . cooker . configuration . initial_path
envbackup = os . environ . copy ( )
2010-12-21 00:51:24 +00:00
for e in envbackup :
os . unsetenv ( e )
for e in env :
os . putenv ( e , env [ e ] )
2010-12-08 00:08:04 +00:00
sys . stdout . flush ( )
sys . stderr . flush ( )
try :
pipeinfd , pipeoutfd = os . pipe ( )
pipein = os . fdopen ( pipeinfd , ' rb ' , 4096 )
pipeout = os . fdopen ( pipeoutfd , ' wb ' , 4096 )
pid = os . fork ( )
except OSError as e :
2010-08-16 15:37:29 +00:00
bb . msg . fatal ( bb . msg . domain . RunQueue , " fork failed: %d ( %s ) " % ( e . errno , e . strerror ) )
2010-12-08 00:08:04 +00:00
if pid == 0 :
pipein . close ( )
2011-01-01 23:55:54 +00:00
2010-12-08 00:08:04 +00:00
# Save out the PID so that the event can include it the
# events
bb . event . worker_pid = os . getpid ( )
bb . event . worker_pipe = pipeout
bb . event . useStdout = False
2010-06-10 15:05:52 +00:00
# Child processes should send their messages to the UI
# process via the server process, not print them
# themselves
bblogger . handlers = [ bb . event . LogHandler ( ) ]
2010-12-08 00:08:04 +00:00
self . rq . state = runQueueChildProcess
# Make the child the process group leader
os . setpgid ( 0 , 0 )
# No stdin
2010-12-20 02:30:49 +00:00
newsi = os . open ( os . devnull , os . O_RDWR )
2010-12-08 00:08:04 +00:00
os . dup2 ( newsi , sys . stdin . fileno ( ) )
# Stdout to a logfile
#logout = data.expand("${TMPDIR}/log/stdout.%s" % os.getpid(), self.cfgData, True)
#mkdirhier(os.path.dirname(logout))
#newso = open(logout, 'w')
#os.dup2(newso.fileno(), sys.stdout.fileno())
#os.dup2(newso.fileno(), sys.stderr.fileno())
if taskname . endswith ( " _setscene " ) :
the_data . setVarFlag ( taskname , " quieterrors " , " 1 " )
bb . data . setVar ( " BB_WORKERCONTEXT " , " 1 " , the_data )
bb . parse . siggen . set_taskdata ( self . rqdata . hashdata [ " hashes " ] , self . rqdata . hashdata [ " deps " ] )
for h in self . rqdata . hashdata [ " hashes " ] :
bb . data . setVar ( " BBHASH_ %s " % h , self . rqdata . hashdata [ " hashes " ] [ h ] , the_data )
for h in self . rqdata . hashdata [ " deps " ] :
bb . data . setVar ( " BBHASHDEPS_ %s " % h , self . rqdata . hashdata [ " deps " ] [ h ] , the_data )
2010-12-21 07:02:22 +00:00
bb . data . setVar ( " BB_TASKHASH " , self . rqdata . runq_hash [ task ] , the_data )
2010-12-08 00:08:04 +00:00
ret = 0
try :
if not self . cooker . configuration . dry_run :
ret = bb . build . exec_task ( fn , taskname , the_data )
os . _exit ( ret )
except :
os . _exit ( 1 )
2010-12-21 00:51:24 +00:00
for e in env :
os . unsetenv ( e )
for e in envbackup :
os . putenv ( e , envbackup [ e ] )
2010-08-16 15:37:29 +00:00
2010-12-08 00:08:04 +00:00
return pid , pipein , pipeout
2010-08-18 16:13:06 +00:00
2010-08-24 23:58:23 +00:00
class RunQueueExecuteDummy ( RunQueueExecute ) :
def __init__ ( self , rq ) :
self . rq = rq
self . stats = RunQueueStats ( 0 )
2011-01-01 23:55:54 +00:00
2010-08-24 23:58:23 +00:00
def finish ( self ) :
self . rq . state = runQueueComplete
2011-01-01 23:55:54 +00:00
return
2010-08-24 23:58:23 +00:00
2010-08-18 16:13:06 +00:00
class RunQueueExecuteTasks ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . stats = RunQueueStats ( len ( self . rqdata . runq_fnid ) )
2007-04-01 15:04:49 +00:00
# Mark initial buildable tasks
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2007-04-01 15:04:49 +00:00
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
2010-08-18 10:30:53 +00:00
if len ( self . rqdata . runq_depends [ task ] ) == 0 :
2007-04-01 15:04:49 +00:00
self . runq_buildable . append ( 1 )
else :
self . runq_buildable . append ( 0 )
2010-08-19 10:36:29 +00:00
if len ( self . rqdata . runq_revdeps [ task ] ) > 0 and self . rqdata . runq_revdeps [ task ] . issubset ( self . rq . scenequeue_covered ) :
self . rq . scenequeue_covered . add ( task )
found = True
while found :
found = False
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
if task in self . rq . scenequeue_covered :
continue
if len ( self . rqdata . runq_revdeps [ task ] ) > 0 and self . rqdata . runq_revdeps [ task ] . issubset ( self . rq . scenequeue_covered ) :
self . rq . scenequeue_covered . add ( task )
found = True
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Full skip list %s ' , self . rq . scenequeue_covered )
2010-08-19 10:36:29 +00:00
for task in self . rq . scenequeue_covered :
self . task_skip ( task )
2006-11-16 15:02:15 +00:00
2010-08-18 10:30:53 +00:00
event . fire ( bb . event . StampUpdate ( self . rqdata . target_pairs , self . rqdata . dataCache . stamp ) , self . cfgData )
2010-07-22 18:27:10 +00:00
for scheduler in self . rqdata . schedulers :
2010-08-18 10:30:53 +00:00
if self . scheduler == scheduler . name :
self . sched = scheduler ( self , self . rqdata )
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Using runqueue scheduler ' %s ' " , scheduler . name )
2010-08-18 10:30:53 +00:00
break
else :
2010-07-22 18:27:10 +00:00
bb . fatal ( " Invalid scheduler ' %s ' . Available schedulers: %s " %
( self . scheduler , " , " . join ( obj . name for obj in self . rqdata . schedulers ) ) )
2010-08-18 16:13:06 +00:00
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2007-04-01 15:04:49 +00:00
"""
Mark a task as completed
2010-03-24 23:56:12 +00:00
Look at the reverse dependencies and mark any task with
2007-04-01 15:04:49 +00:00
completed dependencies as buildable
"""
self . runq_complete [ task ] = 1
2010-08-18 10:30:53 +00:00
for revdep in self . rqdata . runq_revdeps [ task ] :
2007-04-01 15:04:49 +00:00
if self . runq_running [ revdep ] == 1 :
continue
if self . runq_buildable [ revdep ] == 1 :
continue
alldeps = 1
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ revdep ] :
2007-04-01 15:04:49 +00:00
if self . runq_complete [ dep ] != 1 :
alldeps = 0
if alldeps == 1 :
self . runq_buildable [ revdep ] = 1
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ revdep ] ]
taskname = self . rqdata . runq_task [ revdep ]
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Marking task %s ( %s , %s ) as buildable " , revdep , fn , taskname )
2007-04-01 15:04:49 +00:00
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
bb . event . fire ( runQueueTaskCompleted ( task , self . stats , self . rq ) , self . cfgData )
self . task_completeoutright ( task )
2010-01-20 18:46:02 +00:00
def task_fail ( self , task , exitcode ) :
"""
Called when a task has failed
Updates the state engine with the failure
"""
self . stats . taskFailed ( )
2010-08-18 10:30:53 +00:00
fnid = self . rqdata . runq_fnid [ task ]
2010-01-20 18:46:02 +00:00
self . failed_fnids . append ( fnid )
2010-12-06 21:58:55 +00:00
bb . event . fire ( runQueueTaskFailed ( task , self . stats , exitcode , self . rq ) , self . cfgData )
2010-08-18 10:30:53 +00:00
if self . rqdata . taskData . abort :
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2010-01-20 18:46:02 +00:00
2010-08-18 16:21:40 +00:00
def task_skip ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-18 16:21:40 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
2010-08-18 16:13:06 +00:00
def execute ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
"""
2010-01-20 18:46:02 +00:00
if self . stats . total == 0 :
2006-11-16 15:02:15 +00:00
# nothing to do
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2008-03-03 22:01:45 +00:00
2010-07-23 20:42:54 +00:00
for task in iter ( self . sched . next , None ) :
2010-09-13 20:00:10 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2006-11-16 15:02:15 +00:00
2010-09-13 20:00:10 +00:00
taskname = self . rqdata . runq_task [ task ]
if self . rq . check_stamp_task ( task , taskname ) :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Stamp current task %s ( %s ) " % ( task , self . rqdata . get_user_idstring ( task ) ) )
2010-09-13 20:00:10 +00:00
self . task_skip ( task )
return True
2010-01-20 18:46:02 +00:00
2010-11-07 20:33:12 +00:00
bb . event . fire ( runQueueTaskStarted ( task , self . stats , self . rq ) , self . cfgData )
2010-11-06 14:33:05 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
2011-01-01 23:55:54 +00:00
startevent = runQueueTaskStarted ( task , self . stats , self . rq ,
noexec = True )
2011-01-01 14:36:38 +00:00
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
self . runq_running [ task ] = 1
2010-11-07 20:33:12 +00:00
self . stats . taskActive ( )
2010-11-28 12:07:57 +00:00
bb . build . make_stamp ( taskname , self . rqdata . dataCache , fn )
2010-11-06 14:33:05 +00:00
self . task_complete ( task )
return True
2011-01-01 14:36:38 +00:00
else :
startevent = runQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
2010-12-08 00:08:04 +00:00
pid , pipein , pipeout = self . fork_off_task ( fn , task , taskname )
2010-01-20 18:46:02 +00:00
2010-12-08 00:08:04 +00:00
self . build_pids [ pid ] = task
self . build_pipes [ pid ] = runQueuePipe ( pipein , pipeout , self . cfgData )
2010-09-13 20:00:10 +00:00
self . runq_running [ task ] = 1
self . stats . taskActive ( )
2010-01-20 18:46:02 +00:00
2010-09-13 20:00:10 +00:00
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
if self . stats . active > 0 :
if self . runqueue_process_waitpid ( ) is None :
return 0.5
return True
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
2010-09-13 15:57:13 +00:00
return True
2006-11-16 15:02:15 +00:00
2010-09-13 20:00:10 +00:00
# Sanity Checks
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-09-13 20:00:10 +00:00
if self . runq_buildable [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never buildable! " , task )
2010-09-13 20:00:10 +00:00
if self . runq_running [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never ran! " , task )
2010-09-13 20:00:10 +00:00
if self . runq_complete [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never completed! " , task )
2010-09-13 20:00:10 +00:00
self . rq . state = runQueueComplete
return True
2010-08-19 10:36:29 +00:00
class RunQueueExecuteScenequeue ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . scenequeue_covered = set ( )
self . scenequeue_notcovered = set ( )
# If we don't have any setscene functions, skip this step
if len ( self . rqdata . runq_setscene ) == 0 :
rq . scenequeue_covered = set ( )
rq . state = runQueueRunInit
return
self . stats = RunQueueStats ( len ( self . rqdata . runq_setscene ) )
endpoints = { }
sq_revdeps = [ ]
sq_revdeps_new = [ ]
sq_revdeps_squash = [ ]
# We need to construct a dependency graph for the setscene functions. Intermediate
# dependencies between the setscene tasks only complicate the code. This code
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
# only containing the setscene functions.
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
self . runq_buildable . append ( 0 )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-08-19 10:36:29 +00:00
sq_revdeps . append ( copy . copy ( self . rqdata . runq_revdeps [ task ] ) )
sq_revdeps_new . append ( set ( ) )
if ( len ( self . rqdata . runq_revdeps [ task ] ) == 0 ) and task not in self . rqdata . runq_setscene :
endpoints [ task ] = None
for task in self . rqdata . runq_setscene :
for dep in self . rqdata . runq_depends [ task ] :
endpoints [ dep ] = task
def process_endpoints ( endpoints ) :
newendpoints = { }
for point , task in endpoints . items ( ) :
tasks = set ( )
if task :
tasks . add ( task )
if sq_revdeps_new [ point ] :
tasks | = sq_revdeps_new [ point ]
sq_revdeps_new [ point ] = set ( )
for dep in self . rqdata . runq_depends [ point ] :
if point in sq_revdeps [ dep ] :
sq_revdeps [ dep ] . remove ( point )
if tasks :
sq_revdeps_new [ dep ] | = tasks
if ( len ( sq_revdeps [ dep ] ) == 0 or len ( sq_revdeps_new [ dep ] ) != 0 ) and dep not in self . rqdata . runq_setscene :
newendpoints [ dep ] = task
if len ( newendpoints ) != 0 :
process_endpoints ( newendpoints )
process_endpoints ( endpoints )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-08-19 10:36:29 +00:00
if task in self . rqdata . runq_setscene :
deps = set ( )
for dep in sq_revdeps_new [ task ] :
deps . add ( self . rqdata . runq_setscene . index ( dep ) )
sq_revdeps_squash . append ( deps )
elif len ( sq_revdeps_new [ task ] ) != 0 :
bb . msg . fatal ( bb . msg . domain . RunQueue , " Something went badly wrong during scenequeue generation, aborting. Please report this problem. " )
2010-11-19 20:39:22 +00:00
#for task in xrange(len(sq_revdeps_squash)):
2010-08-19 10:36:29 +00:00
# print "Task %s: %s.%s is %s " % (task, self.taskData.fn_index[self.runq_fnid[self.runq_setscene[task]]], self.runq_task[self.runq_setscene[task]] + "_setscene", sq_revdeps_squash[task])
self . sq_deps = [ ]
self . sq_revdeps = sq_revdeps_squash
self . sq_revdeps2 = copy . deepcopy ( self . sq_revdeps )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
self . sq_deps . append ( set ( ) )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
for dep in self . sq_revdeps [ task ] :
self . sq_deps [ dep ] . add ( task )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
if len ( self . sq_revdeps [ task ] ) == 0 :
self . runq_buildable [ task ] = 1
2010-10-05 21:21:34 +00:00
if self . rq . hashvalidate :
sq_hash = [ ]
sq_hashfn = [ ]
sq_fn = [ ]
2010-12-14 11:21:24 +00:00
sq_taskname = [ ]
2010-10-05 21:21:34 +00:00
sq_task = [ ]
2010-11-28 15:59:16 +00:00
noexec = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-10-05 21:21:34 +00:00
realtask = self . rqdata . runq_setscene [ task ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
2010-11-28 15:59:16 +00:00
taskname = self . rqdata . runq_task [ realtask ]
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
noexec . append ( task )
2010-12-17 10:46:33 +00:00
self . task_skip ( task )
bb . build . make_stamp ( taskname + " _setscene " , self . rqdata . dataCache , fn )
2010-11-28 15:59:16 +00:00
continue
2010-10-05 21:21:34 +00:00
sq_fn . append ( fn )
sq_hashfn . append ( self . rqdata . dataCache . hashfn [ fn ] )
sq_hash . append ( self . rqdata . runq_hash [ realtask ] )
2010-12-14 11:21:24 +00:00
sq_taskname . append ( taskname )
sq_task . append ( task )
2010-10-05 21:21:34 +00:00
call = self . rq . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d) "
2010-12-14 11:21:24 +00:00
locs = { " sq_fn " : sq_fn , " sq_task " : sq_taskname , " sq_hash " : sq_hash , " sq_hashfn " : sq_hashfn , " d " : self . cooker . configuration . data }
2010-10-05 21:21:34 +00:00
valid = bb . utils . better_eval ( call , locs )
2010-12-14 11:21:24 +00:00
valid_new = [ ]
for v in valid :
valid_new . append ( sq_task [ v ] )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-12-17 10:46:33 +00:00
if task not in valid_new and task not in noexec :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " No package found so skipping setscene task %s " % ( self . rqdata . get_user_idstring ( self . rqdata . runq_setscene [ task ] ) ) )
2010-10-05 21:21:34 +00:00
self . task_failoutright ( task )
#print(str(valid))
2010-06-10 17:35:31 +00:00
logger . info ( " Executing SetScene Tasks " )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueSceneRun
def scenequeue_updatecounters ( self , task ) :
for dep in self . sq_deps [ task ] :
self . sq_revdeps2 [ dep ] . remove ( task )
if len ( self . sq_revdeps2 [ dep ] ) == 0 :
self . runq_buildable [ dep ] = 1
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2010-08-19 10:36:29 +00:00
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
index = self . rqdata . runq_setscene [ task ]
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Found task %s which could be accelerated ' ,
self . rqdata . get_user_idstring ( index ) )
2010-08-19 10:36:29 +00:00
self . scenequeue_covered . add ( task )
self . scenequeue_updatecounters ( task )
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
def task_fail ( self , task , result ) :
self . stats . taskFailed ( )
index = self . rqdata . runq_setscene [ task ]
2010-12-06 21:58:55 +00:00
bb . event . fire ( runQueueTaskFailed ( task , self . stats , result , self ) , self . cfgData )
2010-08-19 10:36:29 +00:00
self . scenequeue_notcovered . add ( task )
self . scenequeue_updatecounters ( task )
def task_failoutright ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
index = self . rqdata . runq_setscene [ task ]
self . scenequeue_notcovered . add ( task )
self . scenequeue_updatecounters ( task )
def task_skip ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
def execute ( self ) :
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
task = None
if self . stats . active < self . number_tasks :
# Find the next setscene to run
2010-11-19 20:39:22 +00:00
for nexttask in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
if self . runq_buildable [ nexttask ] == 1 and self . runq_running [ nexttask ] != 1 :
#bb.note("Comparing %s to %s" % (self.sq_revdeps[nexttask], self.scenequeue_covered))
#if len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered):
# bb.note("Skipping task %s" % nexttask)
# self.scenequeue_skip(nexttask)
# return True
task = nexttask
break
if task is not None :
realtask = self . rqdata . runq_setscene [ task ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
taskname = self . rqdata . runq_task [ realtask ] + " _setscene "
if self . rq . check_stamp_task ( realtask , self . rqdata . runq_task [ realtask ] ) :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Stamp for underlying task %s ( %s ) is current so skipping setscene varient " % ( task , self . rqdata . get_user_idstring ( task ) ) )
2010-08-19 10:36:29 +00:00
self . task_failoutright ( task )
return True
if self . cooker . configuration . force :
2010-10-07 14:45:02 +00:00
for target in self . rqdata . target_pairs :
2010-08-19 10:36:29 +00:00
if target [ 0 ] == fn and target [ 1 ] == self . rqdata . runq_task [ realtask ] :
self . task_failoutright ( task )
return True
if self . rq . check_stamp_task ( realtask , taskname ) :
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Setscene stamp current task %s ( %s ) so skip it and its dependencies " % ( task , self . rqdata . get_user_idstring ( realtask ) ) )
2010-08-19 10:36:29 +00:00
self . task_skip ( task )
return True
2010-06-10 17:35:31 +00:00
logger . info ( " Running setscene task %d of %d ( %s : %s ) " % ( self . stats . completed + self . stats . active + self . stats . failed + 1 ,
2010-08-19 21:35:33 +00:00
self . stats . total , fn , taskname ) )
2010-12-08 00:08:04 +00:00
pid , pipein , pipeout = self . fork_off_task ( fn , realtask , taskname )
2010-08-19 10:36:29 +00:00
2010-12-08 00:08:04 +00:00
self . build_pids [ pid ] = task
self . build_pipes [ pid ] = runQueuePipe ( pipein , pipeout , self . cfgData )
2010-08-19 10:36:29 +00:00
self . runq_running [ task ] = 1
self . stats . taskActive ( )
if self . stats . active < self . number_tasks :
return True
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
if self . stats . active > 0 :
if self . runqueue_process_waitpid ( ) is None :
2010-09-13 15:57:13 +00:00
return 0.5
2010-08-19 10:36:29 +00:00
return True
# Convert scenequeue_covered task numbers into full taskgraph ids
oldcovered = self . scenequeue_covered
self . rq . scenequeue_covered = set ( )
for task in oldcovered :
self . rq . scenequeue_covered . add ( self . rqdata . runq_setscene [ task ] )
2010-12-30 19:34:37 +00:00
bb . debug ( 1 , " We can skip tasks %s " % self . rq . scenequeue_covered )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueRunInit
return True
2010-07-06 16:41:11 +00:00
2010-01-20 18:46:02 +00:00
class TaskFailure ( Exception ) :
"""
Exception raised when a task in a runqueue fails
"""
2010-03-24 23:56:12 +00:00
def __init__ ( self , x ) :
2010-01-20 18:46:02 +00:00
self . args = x
class runQueueExitWait ( bb . event . Event ) :
"""
Event when waiting for task processes to exit
"""
def __init__ ( self , remain ) :
self . remain = remain
self . message = " Waiting for %s active tasks to finish " % remain
bb . event . Event . __init__ ( self )
class runQueueEvent ( bb . event . Event ) :
"""
Base runQueue event class
"""
def __init__ ( self , task , stats , rq ) :
self . taskid = task
2010-08-18 10:30:53 +00:00
self . taskstring = rq . rqdata . get_user_idstring ( task )
2010-01-20 18:46:02 +00:00
self . stats = stats
bb . event . Event . __init__ ( self )
class runQueueTaskStarted ( runQueueEvent ) :
"""
Event notifing a task was started
"""
2011-01-01 14:36:38 +00:00
def __init__ ( self , task , stats , rq , noexec = False ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2011-01-01 14:36:38 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
class runQueueTaskFailed ( runQueueEvent ) :
"""
Event notifing a task failed
"""
2010-12-06 21:58:55 +00:00
def __init__ ( self , task , stats , exitcode , rq ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2010-12-06 21:58:55 +00:00
self . exitcode = exitcode
2010-01-20 18:46:02 +00:00
class runQueueTaskCompleted ( runQueueEvent ) :
"""
Event notifing a task completed
"""
2010-08-16 15:37:29 +00:00
#def check_stamp_fn(fn, taskname, d):
# rq = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d)
# fn = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY2", d)
# fnid = rq.rqdata.taskData.getfn_id(fn)
# taskid = rq.get_task_id(fnid, taskname)
# if taskid is not None:
# return rq.check_stamp_task(taskid)
# return None
2008-05-13 07:53:18 +00:00
2010-01-20 18:46:02 +00:00
class runQueuePipe ( ) :
"""
Abstraction for a pipe between a worker thread and the server
"""
def __init__ ( self , pipein , pipeout , d ) :
self . fd = pipein
2010-08-16 15:37:29 +00:00
pipeout . close ( )
2010-01-21 23:46:20 +00:00
fcntl . fcntl ( self . fd , fcntl . F_SETFL , fcntl . fcntl ( self . fd , fcntl . F_GETFL ) | os . O_NONBLOCK )
2010-01-20 18:46:02 +00:00
self . queue = " "
self . d = d
def read ( self ) :
start = len ( self . queue )
2010-01-21 23:46:20 +00:00
try :
2010-08-16 15:37:29 +00:00
self . queue = self . queue + self . fd . read ( 1024 )
except IOError :
2010-01-21 23:46:20 +00:00
pass
2010-01-20 18:46:02 +00:00
end = len ( self . queue )
index = self . queue . find ( " </event> " )
while index != - 1 :
bb . event . fire_from_worker ( self . queue [ : index + 8 ] , self . d )
self . queue = self . queue [ index + 8 : ]
index = self . queue . find ( " </event> " )
return ( end > start )
def close ( self ) :
while self . read ( ) :
continue
if len ( self . queue ) > 0 :
2010-08-20 11:25:19 +00:00
print ( " Warning, worker left partial message: %s " % self . queue )
2010-08-16 15:37:29 +00:00
self . fd . close ( )