2006-11-29 22:52:37 +00:00
#!/usr/bin/env python
2006-11-16 15:02:15 +00:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake ' RunQueue ' implementation
Handles preparation and execution of a queue of tasks
"""
2007-09-02 14:10:08 +00:00
# Copyright (C) 2006-2007 Richard Purdie
2007-01-08 23:53:01 +00:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
2011-01-10 12:48:49 +00:00
import copy
2010-06-10 17:35:31 +00:00
import os
import sys
2007-04-01 15:04:49 +00:00
import signal
2008-03-03 22:01:45 +00:00
import stat
2010-01-21 23:46:20 +00:00
import fcntl
2010-06-10 15:05:52 +00:00
import logging
2010-06-10 17:35:31 +00:00
import bb
2010-06-10 15:05:52 +00:00
from bb import msg , data , event
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
from bb import monitordisk
2010-06-10 15:05:52 +00:00
bblogger = logging . getLogger ( " BitBake " )
2010-06-10 17:35:31 +00:00
logger = logging . getLogger ( " BitBake.RunQueue " )
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
class RunQueueStats :
"""
Holds statistics on the tasks handled by the associated runQueue
"""
2010-01-20 18:46:02 +00:00
def __init__ ( self , total ) :
2007-04-01 15:04:49 +00:00
self . completed = 0
self . skipped = 0
self . failed = 0
2010-01-20 18:46:02 +00:00
self . active = 0
self . total = total
2007-04-01 15:04:49 +00:00
2011-02-16 22:18:06 +00:00
def copy ( self ) :
obj = self . __class__ ( self . total )
2011-02-16 22:24:24 +00:00
obj . __dict__ . update ( self . __dict__ )
2011-02-16 22:18:06 +00:00
return obj
2007-04-01 15:04:49 +00:00
def taskFailed ( self ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - 1
2007-04-01 15:04:49 +00:00
self . failed = self . failed + 1
2008-03-03 22:01:45 +00:00
def taskCompleted ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - number
2008-03-03 22:01:45 +00:00
self . completed = self . completed + number
2007-04-01 15:04:49 +00:00
2008-03-03 22:01:45 +00:00
def taskSkipped ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active + number
2008-03-03 22:01:45 +00:00
self . skipped = self . skipped + number
2007-04-01 15:04:49 +00:00
2010-01-20 18:46:02 +00:00
def taskActive ( self ) :
self . active = self . active + 1
2010-03-24 23:56:12 +00:00
# These values indicate the next step due to be run in the
2010-01-20 18:46:02 +00:00
# runQueue state machine
runQueuePrepare = 2
2010-08-19 10:36:29 +00:00
runQueueSceneInit = 3
runQueueSceneRun = 4
runQueueRunInit = 5
runQueueRunning = 6
runQueueFailed = 7
runQueueCleanUp = 8
runQueueComplete = 9
runQueueChildProcess = 10
2010-01-20 18:46:02 +00:00
2010-07-22 17:54:58 +00:00
class RunQueueScheduler ( object ) :
2008-01-06 16:51:51 +00:00
"""
Control the order tasks are scheduled in .
"""
2010-07-22 17:54:58 +00:00
name = " basic "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
2010-03-24 23:56:12 +00:00
The default scheduler just returns the first buildable task ( the
2008-01-06 16:51:51 +00:00
priority map is sorted by task numer )
"""
self . rq = runqueue
2010-08-18 10:30:53 +00:00
self . rqdata = rqdata
2011-01-10 12:48:49 +00:00
numTasks = len ( self . rqdata . runq_fnid )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
self . prio_map . extend ( range ( numTasks ) )
2011-01-10 12:48:49 +00:00
def next_buildable_task ( self ) :
2008-01-06 16:51:51 +00:00
"""
Return the id of the first task we find that is buildable
"""
2011-01-10 12:48:49 +00:00
for tasknum in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-07-23 21:32:14 +00:00
taskid = self . prio_map [ tasknum ]
if self . rq . runq_running [ taskid ] == 1 :
continue
if self . rq . runq_buildable [ taskid ] == 1 :
2011-06-28 09:05:19 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ taskid ] ]
taskname = self . rqdata . runq_task [ taskid ]
stamp = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
if stamp in self . rq . build_stamps . values ( ) :
continue
2011-01-10 12:48:49 +00:00
return taskid
2010-07-23 21:32:14 +00:00
def next ( self ) :
"""
Return the id of the task we should build next
"""
2010-07-23 20:42:54 +00:00
if self . rq . stats . active < self . rq . number_tasks :
2011-01-10 12:48:49 +00:00
return self . next_buildable_task ( )
2008-01-06 16:51:51 +00:00
class RunQueueSchedulerSpeed ( RunQueueScheduler ) :
"""
A scheduler optimised for speed . The priority map is sorted by task weight ,
heavier weighted tasks ( tasks needed by the most other tasks ) are run first .
"""
2010-07-22 17:54:58 +00:00
name = " speed "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
The priority map is sorted by task weight .
"""
self . rq = runqueue
2010-08-18 10:30:53 +00:00
self . rqdata = rqdata
2008-01-06 16:51:51 +00:00
2011-01-10 12:48:49 +00:00
sortweight = sorted ( copy . deepcopy ( self . rqdata . runq_weight ) )
copyweight = copy . deepcopy ( self . rqdata . runq_weight )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
for weight in sortweight :
idx = copyweight . index ( weight )
self . prio_map . append ( idx )
copyweight [ idx ] = - 1
self . prio_map . reverse ( )
class RunQueueSchedulerCompletion ( RunQueueSchedulerSpeed ) :
"""
2010-03-24 23:56:12 +00:00
A scheduler optimised to complete . bb files are quickly as possible . The
priority map is sorted by task weight , but then reordered so once a given
2008-01-06 16:51:51 +00:00
. bb file starts to build , its completed as quickly as possible . This works
2010-03-24 23:56:12 +00:00
well where disk space is at a premium and classes like OE ' s rm_work are in
2008-01-06 16:51:51 +00:00
force .
"""
2010-07-22 17:54:58 +00:00
name = " completion "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
RunQueueSchedulerSpeed . __init__ ( self , runqueue , rqdata )
2008-01-06 16:51:51 +00:00
#FIXME - whilst this groups all fnids together it does not reorder the
#fnid groups optimally.
2010-03-24 23:56:12 +00:00
2011-01-10 12:48:49 +00:00
basemap = copy . deepcopy ( self . prio_map )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
while ( len ( basemap ) > 0 ) :
entry = basemap . pop ( 0 )
self . prio_map . append ( entry )
2010-08-18 10:30:53 +00:00
fnid = self . rqdata . runq_fnid [ entry ]
2008-01-06 16:51:51 +00:00
todel = [ ]
for entry in basemap :
2010-08-18 10:30:53 +00:00
entry_fnid = self . rqdata . runq_fnid [ entry ]
2008-01-06 16:51:51 +00:00
if entry_fnid == fnid :
todel . append ( basemap . index ( entry ) )
self . prio_map . append ( entry )
todel . reverse ( )
for idx in todel :
del basemap [ idx ]
2010-08-18 10:30:53 +00:00
class RunQueueData :
2006-11-16 15:02:15 +00:00
"""
BitBake Run Queue implementation
"""
2010-08-18 10:30:53 +00:00
def __init__ ( self , rq , cooker , cfgData , dataCache , taskData , targets ) :
2007-04-01 15:04:49 +00:00
self . cooker = cooker
self . dataCache = dataCache
self . taskData = taskData
self . targets = targets
2010-08-18 10:30:53 +00:00
self . rq = rq
2012-02-13 11:41:31 +00:00
self . warn_multi_bb = False
2007-04-01 15:04:49 +00:00
2012-03-03 10:41:41 +00:00
self . stampwhitelist = cfgData . getVar ( " BB_STAMP_WHITELIST " , True ) or " "
self . multi_provider_whitelist = ( cfgData . getVar ( " MULTI_PROVIDER_WHITELIST " , True ) or " " ) . split ( )
2010-08-18 10:30:53 +00:00
self . reset ( )
2006-11-16 15:02:15 +00:00
2010-08-18 10:30:53 +00:00
def reset ( self ) :
2006-11-16 15:02:15 +00:00
self . runq_fnid = [ ]
self . runq_task = [ ]
self . runq_depends = [ ]
self . runq_revdeps = [ ]
2010-08-31 13:49:43 +00:00
self . runq_hash = [ ]
2010-01-20 18:46:02 +00:00
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
def runq_depends_names ( self , ids ) :
import re
ret = [ ]
for id in self . runq_depends [ ids ] :
nam = os . path . basename ( self . get_user_idstring ( id ) )
nam = re . sub ( " _[^,]*, " , " , " , nam )
ret . extend ( [ nam ] )
return ret
2011-02-28 14:28:25 +00:00
def get_user_idstring ( self , task , task_name_suffix = " " ) :
2007-04-01 15:04:49 +00:00
fn = self . taskData . fn_index [ self . runq_fnid [ task ] ]
2011-02-28 14:28:25 +00:00
taskname = self . runq_task [ task ] + task_name_suffix
2006-11-16 15:02:15 +00:00
return " %s , %s " % ( fn , taskname )
2008-05-13 07:53:18 +00:00
def get_task_id ( self , fnid , taskname ) :
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2008-05-13 07:53:18 +00:00
if self . runq_fnid [ listid ] == fnid and self . runq_task [ listid ] == taskname :
return listid
return None
2008-01-06 16:51:51 +00:00
def circular_depchains_handler ( self , tasks ) :
"""
Some tasks aren ' t buildable, likely due to circular dependency issues.
Identify the circular dependencies and print them in a user readable format .
"""
from copy import deepcopy
valid_chains = [ ]
explored_deps = { }
msgs = [ ]
def chain_reorder ( chain ) :
"""
Reorder a dependency chain so the lowest task id is first
"""
lowest = 0
new_chain = [ ]
2010-11-19 20:39:22 +00:00
for entry in xrange ( len ( chain ) ) :
2008-01-06 16:51:51 +00:00
if chain [ entry ] < chain [ lowest ] :
lowest = entry
new_chain . extend ( chain [ lowest : ] )
new_chain . extend ( chain [ : lowest ] )
return new_chain
def chain_compare_equal ( chain1 , chain2 ) :
"""
Compare two dependency chains and see if they ' re the same
"""
if len ( chain1 ) != len ( chain2 ) :
return False
2010-11-19 20:39:22 +00:00
for index in xrange ( len ( chain1 ) ) :
2008-01-06 16:51:51 +00:00
if chain1 [ index ] != chain2 [ index ] :
return False
return True
2010-03-24 23:56:12 +00:00
2008-01-06 16:51:51 +00:00
def chain_array_contains ( chain , chain_array ) :
"""
Return True if chain_array contains chain
"""
for ch in chain_array :
if chain_compare_equal ( ch , chain ) :
return True
return False
def find_chains ( taskid , prev_chain ) :
prev_chain . append ( taskid )
total_deps = [ ]
total_deps . extend ( self . runq_revdeps [ taskid ] )
for revdep in self . runq_revdeps [ taskid ] :
if revdep in prev_chain :
idx = prev_chain . index ( revdep )
# To prevent duplicates, reorder the chain to start with the lowest taskid
# and search through an array of those we've already printed
chain = prev_chain [ idx : ]
new_chain = chain_reorder ( chain )
if not chain_array_contains ( new_chain , valid_chains ) :
valid_chains . append ( new_chain )
msgs . append ( " Dependency loop # %d found: \n " % len ( valid_chains ) )
for dep in new_chain :
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
msgs . append ( " Task %s ( %s ) (dependent Tasks %s ) \n " % ( dep , self . get_user_idstring ( dep ) , self . runq_depends_names ( dep ) ) )
2008-01-06 16:51:51 +00:00
msgs . append ( " \n " )
if len ( valid_chains ) > 10 :
msgs . append ( " Aborted dependency loops search after 10 matches. \n " )
return msgs
continue
scan = False
if revdep not in explored_deps :
scan = True
elif revdep in explored_deps [ revdep ] :
scan = True
else :
for dep in prev_chain :
if dep in explored_deps [ revdep ] :
scan = True
if scan :
2011-01-10 12:48:49 +00:00
find_chains ( revdep , copy . deepcopy ( prev_chain ) )
2008-01-06 16:51:51 +00:00
for dep in explored_deps [ revdep ] :
if dep not in total_deps :
total_deps . append ( dep )
explored_deps [ taskid ] = total_deps
for task in tasks :
find_chains ( task , [ ] )
return msgs
def calculate_task_weights ( self , endpoints ) :
"""
2010-03-24 23:56:12 +00:00
Calculate a number representing the " weight " of each task . Heavier weighted tasks
2008-01-06 16:51:51 +00:00
have more dependencies and hence should be executed sooner for maximum speed .
2010-12-22 15:41:32 +00:00
This function also sanity checks the task list finding tasks that are not
2008-01-06 16:51:51 +00:00
possible to execute due to circular dependencies .
"""
numTasks = len ( self . runq_fnid )
weight = [ ]
deps_left = [ ]
task_done = [ ]
2010-11-19 20:39:22 +00:00
for listid in xrange ( numTasks ) :
2008-01-06 16:51:51 +00:00
task_done . append ( False )
weight . append ( 0 )
deps_left . append ( len ( self . runq_revdeps [ listid ] ) )
for listid in endpoints :
weight [ listid ] = 1
task_done [ listid ] = True
2010-04-12 00:03:55 +00:00
while True :
2008-01-06 16:51:51 +00:00
next_points = [ ]
for listid in endpoints :
for revdep in self . runq_depends [ listid ] :
weight [ revdep ] = weight [ revdep ] + weight [ listid ]
deps_left [ revdep ] = deps_left [ revdep ] - 1
if deps_left [ revdep ] == 0 :
next_points . append ( revdep )
task_done [ revdep ] = True
endpoints = next_points
if len ( next_points ) == 0 :
2010-03-24 23:56:12 +00:00
break
2008-01-06 16:51:51 +00:00
# Circular dependency sanity check
problem_tasks = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( numTasks ) :
2008-01-06 16:51:51 +00:00
if task_done [ task ] is False or deps_left [ task ] != 0 :
problem_tasks . append ( task )
2010-12-17 21:56:08 +00:00
logger . debug ( 2 , " Task %s ( %s ) is not buildable " , task , self . get_user_idstring ( task ) )
logger . debug ( 2 , " (Complete marker was %s and the remaining dependency count was %s ) \n " , task_done [ task ] , deps_left [ task ] )
2008-01-06 16:51:51 +00:00
if problem_tasks :
message = " Unbuildable tasks were found. \n "
message = message + " These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks. \n \n "
message = message + " Identifying dependency loops (this may take a short while)... \n "
2010-06-10 17:35:31 +00:00
logger . error ( message )
2008-01-06 16:51:51 +00:00
msgs = self . circular_depchains_handler ( problem_tasks )
message = " \n "
for msg in msgs :
message = message + msg
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , message )
2008-01-06 16:51:51 +00:00
return weight
2010-08-18 10:30:53 +00:00
def prepare ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-03-24 23:56:12 +00:00
Turn a set of taskData into a RunQueue and compute data needed
2006-11-16 15:02:15 +00:00
to optimise the execution order .
"""
runq_build = [ ]
2008-03-14 11:44:34 +00:00
recursive_tdepends = { }
2009-07-21 18:44:23 +00:00
runq_recrdepends = [ ]
tdepends_fnid = { }
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
taskData = self . taskData
2007-05-22 11:50:37 +00:00
if len ( taskData . tasks_name ) == 0 :
# Nothing to do
2010-08-24 23:58:23 +00:00
return 0
2007-05-22 11:50:37 +00:00
2010-06-10 17:35:31 +00:00
logger . info ( " Preparing runqueue " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Step A - Work out a list of tasks to run
#
2009-07-21 18:44:23 +00:00
# Taskdata gives us a list of possible providers for every build and run
2010-03-24 23:56:12 +00:00
# target ordered by priority. It also gives information on each of those
2009-07-21 18:44:23 +00:00
# providers.
2008-01-06 16:51:51 +00:00
#
2010-03-24 23:56:12 +00:00
# To create the actual list of tasks to execute we fix the list of
# providers and then resolve the dependencies into task IDs. This
# process is repeated for each type of dependency (tdepends, deptask,
2008-01-06 16:51:51 +00:00
# rdeptast, recrdeptask, idepends).
2009-07-21 21:38:53 +00:00
def add_build_dependencies ( depids , tasknames , depends ) :
for depid in depids :
# Won't be in build_targets if ASSUME_PROVIDED
if depid not in taskData . build_targets :
continue
depdata = taskData . build_targets [ depid ] [ 0 ]
if depdata is None :
continue
dep = taskData . fn_index [ depdata ]
for taskname in tasknames :
taskid = taskData . gettask_id ( dep , taskname , False )
if taskid is not None :
depends . append ( taskid )
def add_runtime_dependencies ( depids , tasknames , depends ) :
for depid in depids :
if depid not in taskData . run_targets :
continue
depdata = taskData . run_targets [ depid ] [ 0 ]
if depdata is None :
continue
dep = taskData . fn_index [ depdata ]
for taskname in tasknames :
taskid = taskData . gettask_id ( dep , taskname , False )
if taskid is not None :
depends . append ( taskid )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( taskData . tasks_name ) ) :
2009-07-21 18:44:23 +00:00
depends = [ ]
recrdepends = [ ]
2006-11-16 15:02:15 +00:00
fnid = taskData . tasks_fnid [ task ]
fn = taskData . fn_index [ fnid ]
2007-04-01 15:04:49 +00:00
task_deps = self . dataCache . task_deps [ fn ]
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Processing %s : %s " , fn , taskData . tasks_name [ task ] )
2009-07-21 18:44:23 +00:00
2006-11-16 15:02:15 +00:00
if fnid not in taskData . failed_fnids :
2010-03-24 23:56:12 +00:00
# Resolve task internal dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. addtask before X after Y
2006-11-16 15:02:15 +00:00
depends = taskData . tasks_tdepends [ task ]
2010-03-24 23:56:12 +00:00
# Resolve 'deptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[deptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS)
2006-11-16 15:02:15 +00:00
if ' deptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' deptask ' ] :
2007-10-30 12:03:07 +00:00
tasknames = task_deps [ ' deptask ' ] [ taskData . tasks_name [ task ] ] . split ( )
2009-07-21 21:38:53 +00:00
add_build_dependencies ( taskData . depids [ fnid ] , tasknames , depends )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve 'rdeptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
2006-11-16 15:02:15 +00:00
if ' rdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' rdeptask ' ] :
taskname = task_deps [ ' rdeptask ' ] [ taskData . tasks_name [ task ] ]
2009-07-21 21:38:53 +00:00
add_runtime_dependencies ( taskData . rdepids [ fnid ] , [ taskname ] , depends )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve inter-task dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
2009-07-21 18:44:23 +00:00
if fnid not in tdepends_fnid :
tdepends_fnid [ fnid ] = set ( )
2007-04-01 15:04:49 +00:00
idepends = taskData . tasks_idepends [ task ]
2008-03-14 11:44:34 +00:00
for ( depid , idependtask ) in idepends :
2007-04-01 15:04:49 +00:00
if depid in taskData . build_targets :
2007-08-05 22:43:24 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2007-04-01 15:04:49 +00:00
depdata = taskData . build_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2007-04-01 15:04:49 +00:00
dep = taskData . fn_index [ depdata ]
2010-08-06 23:19:12 +00:00
taskid = taskData . gettask_id ( dep , idependtask , False )
if taskid is None :
2012-03-19 13:58:14 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s in %s depends upon non-existent task %s in %s " % ( taskData . tasks_name [ task ] , fn , idependtask , dep ) )
2009-07-21 18:44:23 +00:00
depends . append ( taskid )
if depdata != fnid :
tdepends_fnid [ fnid ] . add ( taskid )
# Resolve recursive 'recrdeptask' dependencies (A)
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2009-07-21 18:44:23 +00:00
# We cover the recursive part of the dependencies below
2006-11-16 15:02:15 +00:00
if ' recrdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' recrdeptask ' ] :
2007-01-08 23:53:01 +00:00
for taskname in task_deps [ ' recrdeptask ' ] [ taskData . tasks_name [ task ] ] . split ( ) :
2009-07-21 18:44:23 +00:00
recrdepends . append ( taskname )
2009-07-21 21:38:53 +00:00
add_build_dependencies ( taskData . depids [ fnid ] , [ taskname ] , depends )
add_runtime_dependencies ( taskData . rdepids [ fnid ] , [ taskname ] , depends )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Rmove all self references
2006-11-16 15:02:15 +00:00
if task in depends :
newdep = [ ]
2010-06-10 17:35:31 +00:00
logger . debug ( 2 , " Task %s ( %s %s ) contains self reference! %s " , task , taskData . fn_index [ taskData . tasks_fnid [ task ] ] , taskData . tasks_name [ task ] , depends )
2006-11-16 15:02:15 +00:00
for dep in depends :
2010-03-24 23:56:12 +00:00
if task != dep :
newdep . append ( dep )
2006-11-16 15:02:15 +00:00
depends = newdep
self . runq_fnid . append ( taskData . tasks_fnid [ task ] )
self . runq_task . append ( taskData . tasks_name [ task ] )
2009-05-12 15:53:22 +00:00
self . runq_depends . append ( set ( depends ) )
self . runq_revdeps . append ( set ( ) )
2010-08-31 13:49:43 +00:00
self . runq_hash . append ( " " )
2006-11-16 15:02:15 +00:00
runq_build . append ( 0 )
2009-07-21 18:44:23 +00:00
runq_recrdepends . append ( recrdepends )
#
# Build a list of recursive cumulative dependencies for each fnid
# We do this by fnid, since if A depends on some task in B
2010-03-24 23:56:12 +00:00
# we're interested in later tasks B's fnid might have but B itself
2009-07-21 18:44:23 +00:00
# doesn't depend on
#
# Algorithm is O(tasks) + O(tasks)*O(fnids)
#
reccumdepends = { }
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2009-07-21 18:44:23 +00:00
fnid = self . runq_fnid [ task ]
if fnid not in reccumdepends :
2009-07-21 21:32:35 +00:00
if fnid in tdepends_fnid :
2009-07-29 13:08:05 +00:00
reccumdepends [ fnid ] = tdepends_fnid [ fnid ]
else :
reccumdepends [ fnid ] = set ( )
2009-07-21 21:32:35 +00:00
reccumdepends [ fnid ] . update ( self . runq_depends [ task ] )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2009-07-21 18:44:23 +00:00
taskfnid = self . runq_fnid [ task ]
for fnid in reccumdepends :
if task in reccumdepends [ fnid ] :
reccumdepends [ fnid ] . add ( task )
if taskfnid in reccumdepends :
reccumdepends [ fnid ] . update ( reccumdepends [ taskfnid ] )
# Resolve recursive 'recrdeptask' dependencies (B)
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2009-07-21 18:44:23 +00:00
if len ( runq_recrdepends [ task ] ) > 0 :
taskfnid = self . runq_fnid [ task ]
for dep in reccumdepends [ taskfnid ] :
2010-03-24 23:56:12 +00:00
# Ignore self references
2009-07-21 21:32:35 +00:00
if dep == task :
continue
2009-07-21 18:44:23 +00:00
for taskname in runq_recrdepends [ task ] :
if taskData . tasks_name [ dep ] == taskname :
self . runq_depends [ task ] . add ( dep )
2008-01-06 16:51:51 +00:00
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
# as active too. If the task is to be 'forced', clear its stamp. Once
# all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Marking Active Tasks " )
2006-11-16 15:02:15 +00:00
def mark_active ( listid , depth ) :
"""
Mark an item as active along with its depends
( calls itself recursively )
"""
if runq_build [ listid ] == 1 :
return
runq_build [ listid ] = 1
depends = self . runq_depends [ listid ]
for depend in depends :
mark_active ( depend , depth + 1 )
2008-03-03 22:01:45 +00:00
self . target_pairs = [ ]
2007-04-01 15:04:49 +00:00
for target in self . targets :
2006-11-16 15:02:15 +00:00
targetid = taskData . getbuild_id ( target [ 0 ] )
if targetid not in taskData . build_targets :
continue
2007-02-21 20:15:13 +00:00
if targetid in taskData . failed_deps :
continue
2006-11-16 15:02:15 +00:00
fnid = taskData . build_targets [ targetid ] [ 0 ]
2008-03-03 22:01:45 +00:00
fn = taskData . fn_index [ fnid ]
self . target_pairs . append ( ( fn , target [ 1 ] ) )
2007-01-08 23:53:01 +00:00
2006-11-16 15:02:15 +00:00
if fnid in taskData . failed_fnids :
continue
2008-01-06 16:51:51 +00:00
if target [ 1 ] not in taskData . tasks_lookup [ fnid ] :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s does not exist for target %s " % ( target [ 1 ] , target [ 0 ] ) )
2008-01-06 16:51:51 +00:00
2006-11-16 15:02:15 +00:00
listid = taskData . tasks_lookup [ fnid ] [ target [ 1 ] ]
mark_active ( listid , 1 )
2008-01-06 16:51:51 +00:00
# Step C - Prune all inactive tasks
#
# Once all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
maps = [ ]
delcount = 0
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
if runq_build [ listid - delcount ] == 1 :
maps . append ( listid - delcount )
else :
del self . runq_fnid [ listid - delcount ]
del self . runq_task [ listid - delcount ]
del self . runq_depends [ listid - delcount ]
del runq_build [ listid - delcount ]
del self . runq_revdeps [ listid - delcount ]
2010-08-31 13:49:43 +00:00
del self . runq_hash [ listid - delcount ]
2006-11-16 15:02:15 +00:00
delcount = delcount + 1
maps . append ( - 1 )
2008-01-06 16:51:51 +00:00
#
# Step D - Sanity checks and computation
#
# Check to make sure we still have tasks to run
2006-11-16 15:02:15 +00:00
if len ( self . runq_fnid ) == 0 :
if not taskData . abort :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above. " )
2009-07-21 18:44:23 +00:00
else :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " No active tasks and not in --continue mode?! Please report this bug. " )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Pruned %s inactive tasks, %s left " , delcount , len ( self . runq_fnid ) )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Remap the dependencies to account for the deleted tasks
# Check we didn't delete a task we depend on
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
newdeps = [ ]
origdeps = self . runq_depends [ listid ]
for origdep in origdeps :
if maps [ origdep ] == - 1 :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Invalid mapping - Should never happen! " )
2006-11-16 15:02:15 +00:00
newdeps . append ( maps [ origdep ] )
2009-05-12 15:53:22 +00:00
self . runq_depends [ listid ] = set ( newdeps )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Assign Weightings " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Generate a list of reverse dependencies to ease future calculations
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
for dep in self . runq_depends [ listid ] :
self . runq_revdeps [ dep ] . add ( listid )
2008-01-06 16:51:51 +00:00
# Identify tasks at the end of dependency chains
# Error on circular dependency loops (length two)
2006-11-16 15:02:15 +00:00
endpoints = [ ]
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
revdeps = self . runq_revdeps [ listid ]
if len ( revdeps ) == 0 :
endpoints . append ( listid )
for dep in revdeps :
if dep in self . runq_depends [ listid ] :
#self.dump_data(taskData)
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s ( %s ) has circular dependency on %s ( %s ) " % ( taskData . fn_index [ self . runq_fnid [ dep ] ] , self . runq_task [ dep ] , taskData . fn_index [ self . runq_fnid [ listid ] ] , self . runq_task [ listid ] ) )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Compute totals (have %s endpoint(s)) " , len ( endpoints ) )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Calculate task weights
2008-01-06 16:51:51 +00:00
# Check of higher length circular dependencies
self . runq_weight = self . calculate_task_weights ( endpoints )
# Sanity Check - Check for multiple tasks building the same provider
2007-09-02 14:10:08 +00:00
prov_list = { }
seen_fn = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2007-09-02 14:10:08 +00:00
fn = taskData . fn_index [ self . runq_fnid [ task ] ]
if fn in seen_fn :
continue
seen_fn . append ( fn )
for prov in self . dataCache . fn_provides [ fn ] :
if prov not in prov_list :
prov_list [ prov ] = [ fn ]
2010-03-24 23:56:12 +00:00
elif fn not in prov_list [ prov ] :
2007-09-02 14:10:08 +00:00
prov_list [ prov ] . append ( fn )
for prov in prov_list :
if len ( prov_list [ prov ] ) > 1 and prov not in self . multi_provider_whitelist :
2012-02-13 11:41:31 +00:00
msg = " Multiple .bb files are due to be built which each provide %s ( %s ). " % ( prov , " " . join ( prov_list [ prov ] ) )
if self . warn_multi_bb :
logger . warn ( msg )
else :
msg + = " \n This usually means one provides something the other doesn ' t and should. "
logger . error ( msg )
2007-09-02 14:10:08 +00:00
2008-05-04 23:22:24 +00:00
# Create a whitelist usable by the stamp checks
stampfnwhitelist = [ ]
for entry in self . stampwhitelist . split ( ) :
entryid = self . taskData . getbuild_id ( entry )
if entryid not in self . taskData . build_targets :
continue
fnid = self . taskData . build_targets [ entryid ] [ 0 ]
fn = self . taskData . fn_index [ fnid ]
stampfnwhitelist . append ( fn )
self . stampfnwhitelist = stampfnwhitelist
2010-08-19 10:36:29 +00:00
# Interate over the task list looking for tasks with a 'setscene' function
self . runq_setscene = [ ]
for task in range ( len ( self . runq_fnid ) ) :
setscene = taskData . gettask_id ( self . taskData . fn_index [ self . runq_fnid [ task ] ] , self . runq_task [ task ] + " _setscene " , False )
if not setscene :
continue
self . runq_setscene . append ( task )
2010-08-31 13:49:43 +00:00
# Interate over the task list and call into the siggen code
dealtwith = set ( )
todeal = set ( range ( len ( self . runq_fnid ) ) )
while len ( todeal ) > 0 :
for task in todeal . copy ( ) :
if len ( self . runq_depends [ task ] - dealtwith ) == 0 :
dealtwith . add ( task )
todeal . remove ( task )
procdep = [ ]
for dep in self . runq_depends [ task ] :
procdep . append ( self . taskData . fn_index [ self . runq_fnid [ dep ] ] + " . " + self . runq_task [ dep ] )
self . runq_hash [ task ] = bb . parse . siggen . get_taskhash ( self . taskData . fn_index [ self . runq_fnid [ task ] ] , self . runq_task [ task ] , procdep , self . dataCache )
2011-01-10 22:32:46 +00:00
self . hashes = { }
self . hash_deps = { }
2011-01-10 12:48:49 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2011-01-10 22:32:46 +00:00
identifier = ' %s . %s ' % ( self . taskData . fn_index [ self . runq_fnid [ task ] ] ,
self . runq_task [ task ] )
self . hashes [ identifier ] = self . runq_hash [ task ]
2010-09-27 14:57:13 +00:00
deps = [ ]
for dep in self . runq_depends [ task ] :
2011-01-10 22:32:46 +00:00
depidentifier = ' %s . %s ' % ( self . taskData . fn_index [ self . runq_fnid [ dep ] ] ,
self . runq_task [ dep ] )
deps . append ( depidentifier )
self . hash_deps [ identifier ] = deps
2010-09-27 14:57:13 +00:00
2011-01-10 21:59:34 +00:00
# Remove stamps for targets if force mode active
if self . cooker . configuration . force :
for ( fn , target ) in self . target_pairs :
logger . verbose ( " Remove stamp %s , %s " , target , fn )
bb . build . del_stamp ( target , self . dataCache , fn )
2010-08-24 23:58:23 +00:00
return len ( self . runq_fnid )
2010-08-18 10:30:53 +00:00
def dump_data ( self , taskQueue ) :
"""
Dump some debug information on the internal data structures
"""
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " run_tasks: " )
2011-01-10 12:48:49 +00:00
for task in xrange ( len ( self . rqdata . runq_task ) ) :
logger . debug ( 3 , " ( %s ) %s - %s : %s Deps %s RevDeps %s " , task ,
taskQueue . fn_index [ self . rqdata . runq_fnid [ task ] ] ,
self . rqdata . runq_task [ task ] ,
self . rqdata . runq_weight [ task ] ,
self . rqdata . runq_depends [ task ] ,
self . rqdata . runq_revdeps [ task ] )
2010-08-18 10:30:53 +00:00
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " sorted_tasks: " )
2011-01-10 12:48:49 +00:00
for task1 in xrange ( len ( self . rqdata . runq_task ) ) :
2010-08-18 10:30:53 +00:00
if task1 in self . prio_map :
task = self . prio_map [ task1 ]
2011-01-10 12:48:49 +00:00
logger . debug ( 3 , " ( %s ) %s - %s : %s Deps %s RevDeps %s " , task ,
taskQueue . fn_index [ self . rqdata . runq_fnid [ task ] ] ,
self . rqdata . runq_task [ task ] ,
self . rqdata . runq_weight [ task ] ,
self . rqdata . runq_depends [ task ] ,
self . rqdata . runq_revdeps [ task ] )
2010-08-18 10:30:53 +00:00
class RunQueue :
def __init__ ( self , cooker , cfgData , dataCache , taskData , targets ) :
self . cooker = cooker
self . cfgData = cfgData
self . rqdata = RunQueueData ( self , cooker , cfgData , dataCache , taskData , targets )
2011-11-25 14:57:53 +00:00
self . stamppolicy = cfgData . getVar ( " BB_STAMP_POLICY " , True ) or " perfile "
self . hashvalidate = cfgData . getVar ( " BB_HASHCHECK_FUNCTION " , True ) or None
self . setsceneverify = cfgData . getVar ( " BB_SETSCENE_VERIFY_FUNCTION " , True ) or None
2010-08-18 10:30:53 +00:00
self . state = runQueuePrepare
2010-01-20 18:46:02 +00:00
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
# For disk space monitor
self . dm = monitordisk . diskMonitor ( cfgData )
2012-02-29 14:15:28 +00:00
self . rqexe = None
2008-03-03 22:01:45 +00:00
def check_stamps ( self ) :
unchecked = { }
current = [ ]
notcurrent = [ ]
buildable = [ ]
if self . stamppolicy == " perfile " :
fulldeptree = False
else :
fulldeptree = True
2008-05-04 23:22:24 +00:00
stampwhitelist = [ ]
if self . stamppolicy == " whitelist " :
2010-08-18 10:30:53 +00:00
stampwhitelist = self . rqdata . stampfnwhitelist
2008-03-03 22:01:45 +00:00
2011-01-10 12:48:49 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2008-03-03 22:01:45 +00:00
unchecked [ task ] = " "
2010-08-18 10:30:53 +00:00
if len ( self . rqdata . runq_depends [ task ] ) == 0 :
2008-03-03 22:01:45 +00:00
buildable . append ( task )
2008-03-14 11:44:34 +00:00
def check_buildable ( self , task , buildable ) :
2010-08-18 10:30:53 +00:00
for revdep in self . rqdata . runq_revdeps [ task ] :
2008-03-14 11:44:34 +00:00
alldeps = 1
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ revdep ] :
2008-03-14 11:44:34 +00:00
if dep in unchecked :
alldeps = 0
if alldeps == 1 :
if revdep in unchecked :
buildable . append ( revdep )
2011-01-10 12:48:49 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2008-03-03 22:01:45 +00:00
if task not in unchecked :
continue
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
2011-01-18 08:18:18 +00:00
stampfile = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
2008-03-03 22:01:45 +00:00
# If the stamp is missing its not current
if not os . access ( stampfile , os . F_OK ) :
del unchecked [ task ]
notcurrent . append ( task )
2008-03-14 11:44:34 +00:00
check_buildable ( self , task , buildable )
2008-03-03 22:01:45 +00:00
continue
# If its a 'nostamp' task, it's not current
2010-08-18 10:30:53 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2008-03-03 22:01:45 +00:00
if ' nostamp ' in taskdep and task in taskdep [ ' nostamp ' ] :
del unchecked [ task ]
notcurrent . append ( task )
2008-03-14 11:44:34 +00:00
check_buildable ( self , task , buildable )
2008-03-03 22:01:45 +00:00
continue
while ( len ( buildable ) > 0 ) :
nextbuildable = [ ]
for task in buildable :
if task in unchecked :
2010-08-18 10:30:53 +00:00
fn = self . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
2011-01-18 08:18:18 +00:00
stampfile = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
2008-03-03 22:01:45 +00:00
iscurrent = True
t1 = os . stat ( stampfile ) [ stat . ST_MTIME ]
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ task ] :
2008-03-03 22:01:45 +00:00
if iscurrent :
2010-08-18 10:30:53 +00:00
fn2 = self . taskData . fn_index [ self . rqdata . runq_fnid [ dep ] ]
taskname2 = self . rqdata . runq_task [ dep ]
2011-01-18 08:18:18 +00:00
stampfile2 = bb . build . stampfile ( taskname2 , self . rqdata . dataCache , fn2 )
2008-05-04 23:22:24 +00:00
if fn == fn2 or ( fulldeptree and fn2 not in stampwhitelist ) :
2008-03-03 22:01:45 +00:00
if dep in notcurrent :
iscurrent = False
else :
t2 = os . stat ( stampfile2 ) [ stat . ST_MTIME ]
if t1 < t2 :
iscurrent = False
del unchecked [ task ]
if iscurrent :
current . append ( task )
else :
notcurrent . append ( task )
2008-03-14 11:44:34 +00:00
check_buildable ( self , task , nextbuildable )
2008-03-03 22:01:45 +00:00
buildable = nextbuildable
#for task in range(len(self.runq_fnid)):
# fn = self.taskData.fn_index[self.runq_fnid[task]]
# taskname = self.runq_task[task]
# print "%s %s.%s" % (task, taskname, fn)
#print "Unchecked: %s" % unchecked
#print "Current: %s" % current
#print "Not current: %s" % notcurrent
if len ( unchecked ) > 0 :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " check_stamps fatal internal error " )
2008-03-03 22:01:45 +00:00
return current
2012-03-16 10:46:05 +00:00
def check_stamp_task ( self , task , taskname = None , recurse = False ) :
2010-08-19 10:36:29 +00:00
def get_timestamp ( f ) :
try :
if not os . access ( f , os . F_OK ) :
return None
return os . stat ( f ) [ stat . ST_MTIME ]
except :
return None
2008-03-14 11:44:34 +00:00
if self . stamppolicy == " perfile " :
fulldeptree = False
else :
fulldeptree = True
2008-05-04 23:22:24 +00:00
stampwhitelist = [ ]
if self . stamppolicy == " whitelist " :
2010-08-18 10:30:53 +00:00
stampwhitelist = self . rqdata . stampfnwhitelist
2008-03-14 11:44:34 +00:00
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2010-07-06 16:47:43 +00:00
if taskname is None :
2010-08-18 10:30:53 +00:00
taskname = self . rqdata . runq_task [ task ]
2011-01-10 12:48:49 +00:00
2011-01-18 08:18:18 +00:00
stampfile = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
2010-11-06 12:20:33 +00:00
2008-03-14 11:44:34 +00:00
# If the stamp is missing its not current
if not os . access ( stampfile , os . F_OK ) :
2010-12-17 21:46:41 +00:00
logger . debug ( 2 , " Stampfile %s not available " , stampfile )
2008-03-14 11:44:34 +00:00
return False
# If its a 'nostamp' task, it's not current
2010-08-18 10:30:53 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2008-10-01 13:55:17 +00:00
if ' nostamp ' in taskdep and taskname in taskdep [ ' nostamp ' ] :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , " %s . %s is nostamp \n " , fn , taskname )
2008-03-14 11:44:34 +00:00
return False
2011-01-06 19:48:47 +00:00
if taskname != " do_setscene " and taskname . endswith ( " _setscene " ) :
2010-08-19 10:36:29 +00:00
return True
2008-03-14 11:44:34 +00:00
iscurrent = True
2010-08-19 10:36:29 +00:00
t1 = get_timestamp ( stampfile )
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ task ] :
2008-03-14 11:44:34 +00:00
if iscurrent :
2010-08-18 10:30:53 +00:00
fn2 = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ dep ] ]
taskname2 = self . rqdata . runq_task [ dep ]
2011-01-18 08:18:18 +00:00
stampfile2 = bb . build . stampfile ( taskname2 , self . rqdata . dataCache , fn2 )
stampfile3 = bb . build . stampfile ( taskname2 + " _setscene " , self . rqdata . dataCache , fn2 )
2010-08-19 10:36:29 +00:00
t2 = get_timestamp ( stampfile2 )
2010-11-06 12:20:33 +00:00
t3 = get_timestamp ( stampfile3 )
2010-08-19 10:36:29 +00:00
if t3 and t3 > t2 :
continue
2008-05-04 23:22:24 +00:00
if fn == fn2 or ( fulldeptree and fn2 not in stampwhitelist ) :
2010-12-16 15:14:13 +00:00
if not t2 :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stampfile %s does not exist ' , stampfile2 )
2008-03-14 11:44:34 +00:00
iscurrent = False
2010-12-16 15:14:13 +00:00
if t1 < t2 :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stampfile %s < %s ' , stampfile , stampfile2 )
2010-12-16 15:14:13 +00:00
iscurrent = False
2012-03-16 10:46:05 +00:00
if recurse and iscurrent :
iscurrent = self . check_stamp_task ( dep , recurse = True )
2008-03-14 11:44:34 +00:00
return iscurrent
2008-03-03 22:01:45 +00:00
2007-04-01 15:04:49 +00:00
def execute_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
Upon failure , optionally try to recover the build using any alternate providers
( if the abort on failure configuration option isn ' t set)
"""
2010-08-18 16:37:15 +00:00
retval = 0.5
2010-01-20 18:46:02 +00:00
if self . state is runQueuePrepare :
2010-08-24 23:58:23 +00:00
self . rqexe = RunQueueExecuteDummy ( self )
2011-03-08 19:07:24 +00:00
if self . rqdata . prepare ( ) == 0 :
2010-08-24 23:58:23 +00:00
self . state = runQueueComplete
else :
self . state = runQueueSceneInit
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneInit :
2010-08-31 13:49:43 +00:00
if self . cooker . configuration . dump_signatures :
self . dump_signatures ( )
else :
self . rqexe = RunQueueExecuteScenequeue ( self )
2010-08-19 10:36:29 +00:00
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
if self . state in [ runQueueSceneRun , runQueueRunning , runQueueCleanUp ] :
self . dm . check ( self )
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneRun :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunInit :
2010-06-10 17:35:31 +00:00
logger . info ( " Executing RunQueue Tasks " )
2010-08-18 16:13:06 +00:00
self . rqexe = RunQueueExecuteTasks ( self )
self . state = runQueueRunning
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunning :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueCleanUp :
2010-08-18 16:13:06 +00:00
self . rqexe . finish ( )
2010-01-20 18:46:02 +00:00
2012-01-19 14:36:03 +00:00
if self . state is runQueueComplete or self . state is runQueueFailed :
if self . rqexe . stats . failed :
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and %d failed. " , self . rqexe . stats . completed + self . rqexe . stats . failed , self . rqexe . stats . skipped , self . rqexe . stats . failed )
else :
# Let's avoid the word "failed" if nothing actually did
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and all succeeded. " , self . rqexe . stats . completed , self . rqexe . stats . skipped )
2010-01-20 18:46:02 +00:00
if self . state is runQueueFailed :
2010-08-18 10:30:53 +00:00
if not self . rqdata . taskData . tryaltconfigs :
2010-08-19 21:35:33 +00:00
raise bb . runqueue . TaskFailure ( self . rqexe . failed_fnids )
for fnid in self . rqexe . failed_fnids :
2010-08-18 10:30:53 +00:00
self . rqdata . taskData . fail_fnid ( fnid )
self . rqdata . reset ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueComplete :
# All done
return False
if self . state is runQueueChildProcess :
2010-07-06 16:46:13 +00:00
print ( " Child process, eeek, shouldn ' t happen! " )
2010-01-20 18:46:02 +00:00
return False
# Loop
2010-08-18 16:37:15 +00:00
return retval
2007-04-01 15:04:49 +00:00
2010-08-18 16:13:06 +00:00
def finish_runqueue ( self , now = False ) :
2012-02-29 14:15:28 +00:00
if not self . rqexe :
return
2010-08-18 16:13:06 +00:00
if now :
self . rqexe . finish_now ( )
else :
self . rqexe . finish ( )
2007-04-01 15:04:49 +00:00
2010-08-31 13:49:43 +00:00
def dump_signatures ( self ) :
self . state = runQueueComplete
done = set ( )
bb . note ( " Reparsing files to collect dependency data " )
for task in range ( len ( self . rqdata . runq_fnid ) ) :
if self . rqdata . runq_fnid [ task ] not in done :
2011-01-10 12:48:49 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2010-11-19 18:46:42 +00:00
the_data = bb . cache . Cache . loadDataFull ( fn , self . cooker . get_file_appends ( fn ) , self . cooker . configuration . data )
2010-08-31 13:49:43 +00:00
done . add ( self . rqdata . runq_fnid [ task ] )
bb . parse . siggen . dump_sigs ( self . rqdata . dataCache )
return
2010-08-18 16:13:06 +00:00
class RunQueueExecute :
def __init__ ( self , rq ) :
self . rq = rq
self . cooker = rq . cooker
self . cfgData = rq . cfgData
self . rqdata = rq . rqdata
2012-03-03 10:41:41 +00:00
self . number_tasks = int ( self . cfgData . getVar ( " BB_NUMBER_THREADS " , True ) or 1 )
self . scheduler = self . cfgData . getVar ( " BB_SCHEDULER " , True ) or " speed "
2007-04-01 15:04:49 +00:00
self . runq_buildable = [ ]
self . runq_running = [ ]
self . runq_complete = [ ]
self . build_pids = { }
2010-01-20 18:46:02 +00:00
self . build_pipes = { }
2011-06-28 09:05:19 +00:00
self . build_stamps = { }
2007-04-01 15:04:49 +00:00
self . failed_fnids = [ ]
2010-08-18 16:13:06 +00:00
def runqueue_process_waitpid ( self ) :
"""
Return none is there are no processes awaiting result collection , otherwise
collect the process exit codes and close the information pipe .
"""
2012-05-09 23:32:20 +00:00
pid , status = os . waitpid ( - 1 , os . WNOHANG )
if pid == 0 or os . WIFSTOPPED ( status ) :
2010-12-08 00:08:04 +00:00
return None
2012-05-09 23:32:20 +00:00
if os . WIFEXITED ( status ) :
status = os . WEXITSTATUS ( status )
elif os . WIFSIGNALED ( status ) :
# Per shell conventions for $?, when a process exits due to
# a signal, we return an exit code of 128 + SIGNUM
status = 128 + os . WTERMSIG ( status )
task = self . build_pids [ pid ]
del self . build_pids [ pid ]
self . build_pipes [ pid ] . close ( )
del self . build_pipes [ pid ]
# self.build_stamps[pid] may not exist when use shared work directory.
if pid in self . build_stamps . keys ( ) :
del self . build_stamps [ pid ]
if status != 0 :
self . task_fail ( task , status )
2010-12-08 00:08:04 +00:00
else :
self . task_complete ( task )
2011-02-15 23:20:20 +00:00
return True
2010-08-18 16:13:06 +00:00
def finish_now ( self ) :
if self . stats . active :
2010-06-10 17:35:31 +00:00
logger . info ( " Sending SIGTERM to remaining %s tasks " , self . stats . active )
2010-08-18 16:13:06 +00:00
for k , v in self . build_pids . iteritems ( ) :
try :
2010-08-05 04:58:51 +00:00
os . kill ( - k , signal . SIGTERM )
2012-03-30 09:12:41 +00:00
os . waitpid ( - 1 , 0 )
2010-08-18 16:13:06 +00:00
except :
pass
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
2012-02-25 16:02:29 +00:00
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
return
self . rq . state = runQueueComplete
return
2010-08-18 16:13:06 +00:00
def finish ( self ) :
self . rq . state = runQueueCleanUp
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
2010-08-19 10:36:29 +00:00
if self . stats . active > 0 :
bb . event . fire ( runQueueExitWait ( self . stats . active ) , self . cfgData )
self . runqueue_process_waitpid ( )
return
2010-08-18 16:13:06 +00:00
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
return
self . rq . state = runQueueComplete
return
2011-01-10 13:13:08 +00:00
def fork_off_task ( self , fn , task , taskname , quieterrors = False ) :
2011-03-18 16:42:33 +00:00
# We need to setup the environment BEFORE the fork, since
# a fork() or exec*() activates PSEUDO...
2010-08-16 15:37:29 +00:00
2011-03-18 16:42:33 +00:00
envbackup = { }
2011-09-09 17:25:41 +00:00
fakeenv = { }
2011-06-22 15:10:58 +00:00
umask = None
2010-08-16 15:37:29 +00:00
2010-12-08 00:08:04 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2011-06-22 15:10:58 +00:00
if ' umask ' in taskdep and taskname in taskdep [ ' umask ' ] :
# umask might come in as a number or text string..
try :
umask = int ( taskdep [ ' umask ' ] [ taskname ] , 8 )
except TypeError :
umask = taskdep [ ' umask ' ] [ taskname ]
2010-12-08 00:08:04 +00:00
if ' fakeroot ' in taskdep and taskname in taskdep [ ' fakeroot ' ] :
2011-02-28 15:31:20 +00:00
envvars = ( self . rqdata . dataCache . fakerootenv [ fn ] or " " ) . split ( )
2011-03-18 16:42:33 +00:00
for key , value in ( var . split ( ' = ' ) for var in envvars ) :
envbackup [ key ] = os . environ . get ( key )
os . environ [ key ] = value
2011-09-09 17:25:41 +00:00
fakeenv [ key ] = value
2011-02-28 15:31:20 +00:00
fakedirs = ( self . rqdata . dataCache . fakerootdirs [ fn ] or " " ) . split ( )
2010-12-08 00:08:04 +00:00
for p in fakedirs :
2011-03-18 16:42:33 +00:00
bb . utils . mkdirhier ( p )
logger . debug ( 2 , ' Running %s : %s under fakeroot, fakedirs: %s ' %
( fn , taskname , ' , ' . join ( fakedirs ) ) )
2011-11-08 22:19:38 +00:00
else :
envvars = ( self . rqdata . dataCache . fakerootnoenv [ fn ] or " " ) . split ( )
for key , value in ( var . split ( ' = ' ) for var in envvars ) :
envbackup [ key ] = os . environ . get ( key )
os . environ [ key ] = value
fakeenv [ key ] = value
2010-12-08 00:08:04 +00:00
sys . stdout . flush ( )
sys . stderr . flush ( )
try :
2011-01-10 13:12:35 +00:00
pipein , pipeout = os . pipe ( )
pipein = os . fdopen ( pipein , ' rb ' , 4096 )
pipeout = os . fdopen ( pipeout , ' wb ' , 0 )
2010-12-08 00:08:04 +00:00
pid = os . fork ( )
except OSError as e :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " fork failed: %d ( %s ) " % ( e . errno , e . strerror ) )
2011-03-18 16:42:33 +00:00
2010-12-08 00:08:04 +00:00
if pid == 0 :
pipein . close ( )
2011-01-01 23:55:54 +00:00
2010-12-08 00:08:04 +00:00
# Save out the PID so that the event can include it the
# events
bb . event . worker_pid = os . getpid ( )
bb . event . worker_pipe = pipeout
2010-06-10 15:05:52 +00:00
2010-12-08 00:08:04 +00:00
self . rq . state = runQueueChildProcess
# Make the child the process group leader
os . setpgid ( 0 , 0 )
# No stdin
2010-12-20 02:30:49 +00:00
newsi = os . open ( os . devnull , os . O_RDWR )
2010-12-08 00:08:04 +00:00
os . dup2 ( newsi , sys . stdin . fileno ( ) )
2011-02-28 15:31:20 +00:00
2011-06-22 15:10:58 +00:00
if umask :
os . umask ( umask )
2011-11-25 14:57:53 +00:00
self . cooker . configuration . data . setVar ( " BB_WORKERCONTEXT " , " 1 " )
self . cooker . configuration . data . setVar ( " __RUNQUEUE_DO_NOT_USE_EXTERNALLY " , self )
self . cooker . configuration . data . setVar ( " __RUNQUEUE_DO_NOT_USE_EXTERNALLY2 " , fn )
2011-06-08 16:23:42 +00:00
bb . parse . siggen . set_taskdata ( self . rqdata . hashes , self . rqdata . hash_deps )
2010-12-08 00:08:04 +00:00
ret = 0
2011-06-08 18:42:24 +00:00
try :
the_data = bb . cache . Cache . loadDataFull ( fn , self . cooker . get_file_appends ( fn ) , self . cooker . configuration . data )
the_data . setVar ( ' BB_TASKHASH ' , self . rqdata . runq_hash [ task ] )
for h in self . rqdata . hashes :
the_data . setVar ( " BBHASH_ %s " % h , self . rqdata . hashes [ h ] )
for h in self . rqdata . hash_deps :
the_data . setVar ( " BBHASHDEPS_ %s " % h , self . rqdata . hash_deps [ h ] )
2011-09-09 17:25:41 +00:00
# exported_vars() returns a generator which *cannot* be passed to os.environ.update()
# successfully. We also need to unset anything from the environment which shouldn't be there
exports = bb . data . exported_vars ( the_data )
bb . utils . empty_environment ( )
for e , v in exports :
os . environ [ e ] = v
for e in fakeenv :
os . environ [ e ] = fakeenv [ e ]
2011-09-09 18:07:40 +00:00
the_data . setVar ( e , fakeenv [ e ] )
2011-06-08 18:42:24 +00:00
if quieterrors :
the_data . setVarFlag ( taskname , " quieterrors " , " 1 " )
except Exception as exc :
if not quieterrors :
logger . critical ( str ( exc ) )
os . _exit ( 1 )
2010-12-08 00:08:04 +00:00
try :
2012-02-27 18:54:12 +00:00
if not self . cooker . configuration . dry_run :
ret = bb . build . exec_task ( fn , taskname , the_data )
2010-12-08 00:08:04 +00:00
os . _exit ( ret )
except :
os . _exit ( 1 )
2011-03-18 16:42:33 +00:00
else :
for key , value in envbackup . iteritems ( ) :
if value is None :
del os . environ [ key ]
else :
os . environ [ key ] = value
2010-08-16 15:37:29 +00:00
2010-12-08 00:08:04 +00:00
return pid , pipein , pipeout
2010-08-18 16:13:06 +00:00
2010-08-24 23:58:23 +00:00
class RunQueueExecuteDummy ( RunQueueExecute ) :
def __init__ ( self , rq ) :
self . rq = rq
self . stats = RunQueueStats ( 0 )
2011-01-01 23:55:54 +00:00
2010-08-24 23:58:23 +00:00
def finish ( self ) :
self . rq . state = runQueueComplete
2011-01-01 23:55:54 +00:00
return
2010-08-24 23:58:23 +00:00
2010-08-18 16:13:06 +00:00
class RunQueueExecuteTasks ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . stats = RunQueueStats ( len ( self . rqdata . runq_fnid ) )
2007-04-01 15:04:49 +00:00
# Mark initial buildable tasks
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2007-04-01 15:04:49 +00:00
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
2010-08-18 10:30:53 +00:00
if len ( self . rqdata . runq_depends [ task ] ) == 0 :
2007-04-01 15:04:49 +00:00
self . runq_buildable . append ( 1 )
else :
self . runq_buildable . append ( 0 )
2010-08-19 10:36:29 +00:00
if len ( self . rqdata . runq_revdeps [ task ] ) > 0 and self . rqdata . runq_revdeps [ task ] . issubset ( self . rq . scenequeue_covered ) :
self . rq . scenequeue_covered . add ( task )
found = True
while found :
found = False
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
if task in self . rq . scenequeue_covered :
continue
2011-11-21 14:40:37 +00:00
logger . debug ( 1 , ' Considering %s ( %s ): %s ' % ( task , self . rqdata . get_user_idstring ( task ) , str ( self . rqdata . runq_revdeps [ task ] ) ) )
2010-08-19 10:36:29 +00:00
if len ( self . rqdata . runq_revdeps [ task ] ) > 0 and self . rqdata . runq_revdeps [ task ] . issubset ( self . rq . scenequeue_covered ) :
2011-11-21 14:02:00 +00:00
ok = True
2011-11-11 18:38:15 +00:00
for revdep in self . rqdata . runq_revdeps [ task ] :
if self . rqdata . runq_fnid [ task ] != self . rqdata . runq_fnid [ revdep ] :
2011-11-21 14:40:37 +00:00
logger . debug ( 1 , ' Found " bad " dep %s ( %s ) for %s ( %s ) ' % ( revdep , self . rqdata . get_user_idstring ( revdep ) , task , self . rqdata . get_user_idstring ( task ) ) )
2011-11-21 14:02:00 +00:00
ok = False
2011-11-11 18:38:15 +00:00
break
2011-11-21 14:02:00 +00:00
if ok :
found = True
2011-11-11 18:38:15 +00:00
self . rq . scenequeue_covered . add ( task )
2010-08-19 10:36:29 +00:00
2011-11-21 14:34:23 +00:00
logger . debug ( 1 , ' Skip list (pre setsceneverify) %s ' , sorted ( self . rq . scenequeue_covered ) )
# Allow the metadata to elect for setscene tasks to run anyway
2011-03-25 13:22:01 +00:00
covered_remove = set ( )
2011-11-21 14:34:23 +00:00
if self . rq . setsceneverify :
call = self . rq . setsceneverify + " (covered, tasknames, fnids, fns, d) "
locs = { " covered " : self . rq . scenequeue_covered , " tasknames " : self . rqdata . runq_task , " fnids " : self . rqdata . runq_fnid , " fns " : self . rqdata . taskData . fn_index , " d " : self . cooker . configuration . data }
covered_remove = bb . utils . better_eval ( call , locs )
2011-03-25 13:22:01 +00:00
for task in covered_remove :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ] + ' _setscene '
bb . build . del_stamp ( taskname , self . rqdata . dataCache , fn )
2011-11-21 14:34:23 +00:00
logger . debug ( 1 , ' Not skipping task %s due to setsceneverify ' , task )
2011-03-25 13:22:01 +00:00
self . rq . scenequeue_covered . remove ( task )
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Full skip list %s ' , self . rq . scenequeue_covered )
2010-08-19 10:36:29 +00:00
2010-08-18 10:30:53 +00:00
event . fire ( bb . event . StampUpdate ( self . rqdata . target_pairs , self . rqdata . dataCache . stamp ) , self . cfgData )
2011-01-10 12:48:49 +00:00
schedulers = self . get_schedulers ( )
for scheduler in schedulers :
2010-08-18 10:30:53 +00:00
if self . scheduler == scheduler . name :
self . sched = scheduler ( self , self . rqdata )
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Using runqueue scheduler ' %s ' " , scheduler . name )
2010-08-18 10:30:53 +00:00
break
else :
2011-01-10 12:48:49 +00:00
bb . fatal ( " Invalid scheduler ' %s ' . Available schedulers: %s " %
( self . scheduler , " , " . join ( obj . name for obj in schedulers ) ) )
def get_schedulers ( self ) :
schedulers = set ( obj for obj in globals ( ) . values ( )
if type ( obj ) is type and
issubclass ( obj , RunQueueScheduler ) )
2011-11-25 14:57:53 +00:00
user_schedulers = self . cfgData . getVar ( " BB_SCHEDULERS " , True )
2011-01-10 12:48:49 +00:00
if user_schedulers :
for sched in user_schedulers . split ( ) :
if not " . " in sched :
bb . note ( " Ignoring scheduler ' %s ' from BB_SCHEDULERS: not an import " % sched )
continue
modname , name = sched . rsplit ( " . " , 1 )
try :
module = __import__ ( modname , fromlist = ( name , ) )
2011-06-14 23:44:58 +00:00
except ImportError as exc :
2011-01-10 12:48:49 +00:00
logger . critical ( " Unable to import scheduler ' %s ' from ' %s ' : %s " % ( name , modname , exc ) )
raise SystemExit ( 1 )
else :
schedulers . add ( getattr ( module , name ) )
return schedulers
2010-08-18 16:13:06 +00:00
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2007-04-01 15:04:49 +00:00
"""
Mark a task as completed
2010-03-24 23:56:12 +00:00
Look at the reverse dependencies and mark any task with
2007-04-01 15:04:49 +00:00
completed dependencies as buildable
"""
self . runq_complete [ task ] = 1
2010-08-18 10:30:53 +00:00
for revdep in self . rqdata . runq_revdeps [ task ] :
2007-04-01 15:04:49 +00:00
if self . runq_running [ revdep ] == 1 :
continue
if self . runq_buildable [ revdep ] == 1 :
continue
alldeps = 1
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ revdep ] :
2007-04-01 15:04:49 +00:00
if self . runq_complete [ dep ] != 1 :
alldeps = 0
if alldeps == 1 :
self . runq_buildable [ revdep ] = 1
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ revdep ] ]
taskname = self . rqdata . runq_task [ revdep ]
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Marking task %s ( %s , %s ) as buildable " , revdep , fn , taskname )
2007-04-01 15:04:49 +00:00
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
bb . event . fire ( runQueueTaskCompleted ( task , self . stats , self . rq ) , self . cfgData )
self . task_completeoutright ( task )
2010-01-20 18:46:02 +00:00
def task_fail ( self , task , exitcode ) :
"""
Called when a task has failed
Updates the state engine with the failure
"""
self . stats . taskFailed ( )
2010-08-18 10:30:53 +00:00
fnid = self . rqdata . runq_fnid [ task ]
2010-01-20 18:46:02 +00:00
self . failed_fnids . append ( fnid )
2010-12-06 21:58:55 +00:00
bb . event . fire ( runQueueTaskFailed ( task , self . stats , exitcode , self . rq ) , self . cfgData )
2010-08-18 10:30:53 +00:00
if self . rqdata . taskData . abort :
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2010-01-20 18:46:02 +00:00
2010-08-18 16:21:40 +00:00
def task_skip ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-18 16:21:40 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
2010-08-18 16:13:06 +00:00
def execute ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
"""
2010-01-20 18:46:02 +00:00
if self . stats . total == 0 :
2006-11-16 15:02:15 +00:00
# nothing to do
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2008-03-03 22:01:45 +00:00
2011-01-10 12:48:49 +00:00
task = self . sched . next ( )
if task is not None :
2010-09-13 20:00:10 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
2011-11-23 12:38:44 +00:00
if task in self . rq . scenequeue_covered :
logger . debug ( 2 , " Setscene covered task %s ( %s ) " , task ,
self . rqdata . get_user_idstring ( task ) )
self . task_skip ( task )
return True
2010-09-13 20:00:10 +00:00
if self . rq . check_stamp_task ( task , taskname ) :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , " Stamp current task %s ( %s ) " , task ,
self . rqdata . get_user_idstring ( task ) )
2010-09-13 20:00:10 +00:00
self . task_skip ( task )
return True
2010-01-20 18:46:02 +00:00
2010-11-06 14:33:05 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
2011-01-01 23:55:54 +00:00
startevent = runQueueTaskStarted ( task , self . stats , self . rq ,
noexec = True )
2011-01-01 14:36:38 +00:00
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
self . runq_running [ task ] = 1
2010-11-07 20:33:12 +00:00
self . stats . taskActive ( )
2010-11-28 12:07:57 +00:00
bb . build . make_stamp ( taskname , self . rqdata . dataCache , fn )
2010-11-06 14:33:05 +00:00
self . task_complete ( task )
return True
2011-01-01 14:36:38 +00:00
else :
startevent = runQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
2010-12-08 00:08:04 +00:00
pid , pipein , pipeout = self . fork_off_task ( fn , task , taskname )
2010-01-20 18:46:02 +00:00
2010-12-08 00:08:04 +00:00
self . build_pids [ pid ] = task
self . build_pipes [ pid ] = runQueuePipe ( pipein , pipeout , self . cfgData )
2011-06-28 09:05:19 +00:00
self . build_stamps [ pid ] = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
2010-09-13 20:00:10 +00:00
self . runq_running [ task ] = 1
self . stats . taskActive ( )
2011-02-15 23:20:54 +00:00
if self . stats . active < self . number_tasks :
return True
2010-01-20 18:46:02 +00:00
2010-09-13 20:00:10 +00:00
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
if self . stats . active > 0 :
if self . runqueue_process_waitpid ( ) is None :
return 0.5
return True
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
2010-09-13 15:57:13 +00:00
return True
2006-11-16 15:02:15 +00:00
2010-09-13 20:00:10 +00:00
# Sanity Checks
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-09-13 20:00:10 +00:00
if self . runq_buildable [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never buildable! " , task )
2010-09-13 20:00:10 +00:00
if self . runq_running [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never ran! " , task )
2010-09-13 20:00:10 +00:00
if self . runq_complete [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never completed! " , task )
2010-09-13 20:00:10 +00:00
self . rq . state = runQueueComplete
return True
2010-08-19 10:36:29 +00:00
class RunQueueExecuteScenequeue ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . scenequeue_covered = set ( )
self . scenequeue_notcovered = set ( )
# If we don't have any setscene functions, skip this step
if len ( self . rqdata . runq_setscene ) == 0 :
rq . scenequeue_covered = set ( )
rq . state = runQueueRunInit
return
self . stats = RunQueueStats ( len ( self . rqdata . runq_setscene ) )
endpoints = { }
sq_revdeps = [ ]
sq_revdeps_new = [ ]
sq_revdeps_squash = [ ]
# We need to construct a dependency graph for the setscene functions. Intermediate
# dependencies between the setscene tasks only complicate the code. This code
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
# only containing the setscene functions.
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
self . runq_buildable . append ( 0 )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-08-19 10:36:29 +00:00
sq_revdeps . append ( copy . copy ( self . rqdata . runq_revdeps [ task ] ) )
sq_revdeps_new . append ( set ( ) )
if ( len ( self . rqdata . runq_revdeps [ task ] ) == 0 ) and task not in self . rqdata . runq_setscene :
2012-01-26 12:53:21 +00:00
endpoints [ task ] = set ( )
2010-08-19 10:36:29 +00:00
for task in self . rqdata . runq_setscene :
for dep in self . rqdata . runq_depends [ task ] :
2012-01-26 12:53:21 +00:00
if dep not in endpoints :
endpoints [ dep ] = set ( )
endpoints [ dep ] . add ( task )
2010-08-19 10:36:29 +00:00
def process_endpoints ( endpoints ) :
newendpoints = { }
for point , task in endpoints . items ( ) :
tasks = set ( )
if task :
2012-01-26 12:53:21 +00:00
tasks | = task
2010-08-19 10:36:29 +00:00
if sq_revdeps_new [ point ] :
tasks | = sq_revdeps_new [ point ]
sq_revdeps_new [ point ] = set ( )
for dep in self . rqdata . runq_depends [ point ] :
if point in sq_revdeps [ dep ] :
sq_revdeps [ dep ] . remove ( point )
if tasks :
sq_revdeps_new [ dep ] | = tasks
if ( len ( sq_revdeps [ dep ] ) == 0 or len ( sq_revdeps_new [ dep ] ) != 0 ) and dep not in self . rqdata . runq_setscene :
newendpoints [ dep ] = task
if len ( newendpoints ) != 0 :
process_endpoints ( newendpoints )
process_endpoints ( endpoints )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-08-19 10:36:29 +00:00
if task in self . rqdata . runq_setscene :
deps = set ( )
for dep in sq_revdeps_new [ task ] :
deps . add ( self . rqdata . runq_setscene . index ( dep ) )
sq_revdeps_squash . append ( deps )
elif len ( sq_revdeps_new [ task ] ) != 0 :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Something went badly wrong during scenequeue generation, aborting. Please report this problem. " )
2010-08-19 10:36:29 +00:00
2012-01-26 20:09:08 +00:00
# Resolve setscene inter-task dependencies
# e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
# Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
for task in self . rqdata . runq_setscene :
realid = self . rqdata . taskData . gettask_id ( self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ] , self . rqdata . runq_task [ task ] + " _setscene " , False )
idepends = self . rqdata . taskData . tasks_idepends [ realid ]
for ( depid , idependtask ) in idepends :
if depid not in self . rqdata . taskData . build_targets :
continue
depdata = self . rqdata . taskData . build_targets [ depid ] [ 0 ]
if depdata is None :
continue
dep = self . rqdata . taskData . fn_index [ depdata ]
taskid = self . rqdata . get_task_id ( self . rqdata . taskData . getfn_id ( dep ) , idependtask . replace ( " _setscene " , " " ) )
if taskid is None :
2012-03-19 13:58:14 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s depends upon non-existent task %s : %s " % ( self . rqdata . taskData . tasks_name [ realid ] , dep , idependtask ) )
2012-01-26 20:09:08 +00:00
sq_revdeps_squash [ self . rqdata . runq_setscene . index ( task ) ] . add ( self . rqdata . runq_setscene . index ( taskid ) )
# Have to zero this to avoid circular dependencies
sq_revdeps_squash [ self . rqdata . runq_setscene . index ( taskid ) ] = set ( )
2010-11-19 20:39:22 +00:00
#for task in xrange(len(sq_revdeps_squash)):
2012-01-26 12:52:04 +00:00
# print "Task %s: %s.%s is %s " % (task, self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[self.rqdata.runq_setscene[task]]], self.rqdata.runq_task[self.rqdata.runq_setscene[task]] + "_setscene", sq_revdeps_squash[task])
2010-08-19 10:36:29 +00:00
self . sq_deps = [ ]
self . sq_revdeps = sq_revdeps_squash
self . sq_revdeps2 = copy . deepcopy ( self . sq_revdeps )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
self . sq_deps . append ( set ( ) )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
for dep in self . sq_revdeps [ task ] :
self . sq_deps [ dep ] . add ( task )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
if len ( self . sq_revdeps [ task ] ) == 0 :
self . runq_buildable [ task ] = 1
2010-10-05 21:21:34 +00:00
if self . rq . hashvalidate :
sq_hash = [ ]
sq_hashfn = [ ]
sq_fn = [ ]
2010-12-14 11:21:24 +00:00
sq_taskname = [ ]
2010-10-05 21:21:34 +00:00
sq_task = [ ]
2010-11-28 15:59:16 +00:00
noexec = [ ]
2011-05-27 14:03:51 +00:00
stamppresent = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-10-05 21:21:34 +00:00
realtask = self . rqdata . runq_setscene [ task ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
2010-11-28 15:59:16 +00:00
taskname = self . rqdata . runq_task [ realtask ]
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2011-05-27 14:03:51 +00:00
2010-11-28 15:59:16 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
noexec . append ( task )
2010-12-17 10:46:33 +00:00
self . task_skip ( task )
bb . build . make_stamp ( taskname + " _setscene " , self . rqdata . dataCache , fn )
2010-11-28 15:59:16 +00:00
continue
2011-05-27 14:03:51 +00:00
if self . rq . check_stamp_task ( realtask , taskname + " _setscene " ) :
logger . debug ( 2 , ' Setscene stamp current for task %s ( %s ) ' , task , self . rqdata . get_user_idstring ( realtask ) )
stamppresent . append ( task )
self . task_skip ( task )
continue
2010-10-05 21:21:34 +00:00
sq_fn . append ( fn )
sq_hashfn . append ( self . rqdata . dataCache . hashfn [ fn ] )
sq_hash . append ( self . rqdata . runq_hash [ realtask ] )
2010-12-14 11:21:24 +00:00
sq_taskname . append ( taskname )
sq_task . append ( task )
2010-10-05 21:21:34 +00:00
call = self . rq . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d) "
2010-12-14 11:21:24 +00:00
locs = { " sq_fn " : sq_fn , " sq_task " : sq_taskname , " sq_hash " : sq_hash , " sq_hashfn " : sq_hashfn , " d " : self . cooker . configuration . data }
2010-10-05 21:21:34 +00:00
valid = bb . utils . better_eval ( call , locs )
2010-12-14 11:21:24 +00:00
2011-05-27 14:03:51 +00:00
valid_new = stamppresent
2010-12-14 11:21:24 +00:00
for v in valid :
valid_new . append ( sq_task [ v ] )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-12-17 10:46:33 +00:00
if task not in valid_new and task not in noexec :
2011-11-24 14:14:55 +00:00
realtask = self . rqdata . runq_setscene [ task ]
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' No package found, so skipping setscene task %s ' ,
2011-11-24 14:14:55 +00:00
self . rqdata . get_user_idstring ( realtask ) )
2010-10-05 21:21:34 +00:00
self . task_failoutright ( task )
2011-01-10 12:48:49 +00:00
logger . info ( ' Executing SetScene Tasks ' )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueSceneRun
def scenequeue_updatecounters ( self , task ) :
for dep in self . sq_deps [ task ] :
self . sq_revdeps2 [ dep ] . remove ( task )
if len ( self . sq_revdeps2 [ dep ] ) == 0 :
self . runq_buildable [ dep ] = 1
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2010-08-19 10:36:29 +00:00
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
index = self . rqdata . runq_setscene [ task ]
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Found task %s which could be accelerated ' ,
self . rqdata . get_user_idstring ( index ) )
2010-08-19 10:36:29 +00:00
self . scenequeue_covered . add ( task )
self . scenequeue_updatecounters ( task )
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
def task_fail ( self , task , result ) :
self . stats . taskFailed ( )
2012-03-01 14:57:35 +00:00
bb . event . fire ( sceneQueueTaskFailed ( task , self . stats , result , self ) , self . cfgData )
2010-08-19 10:36:29 +00:00
self . scenequeue_notcovered . add ( task )
self . scenequeue_updatecounters ( task )
def task_failoutright ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
index = self . rqdata . runq_setscene [ task ]
self . scenequeue_notcovered . add ( task )
self . scenequeue_updatecounters ( task )
def task_skip ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
def execute ( self ) :
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
task = None
if self . stats . active < self . number_tasks :
# Find the next setscene to run
2010-11-19 20:39:22 +00:00
for nexttask in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
if self . runq_buildable [ nexttask ] == 1 and self . runq_running [ nexttask ] != 1 :
task = nexttask
break
if task is not None :
realtask = self . rqdata . runq_setscene [ task ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
taskname = self . rqdata . runq_task [ realtask ] + " _setscene "
2012-03-16 10:46:05 +00:00
if self . rq . check_stamp_task ( realtask , self . rqdata . runq_task [ realtask ] , recurse = True ) :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stamp for underlying task %s ( %s ) is current, so skipping setscene variant ' ,
2011-11-21 14:39:29 +00:00
task , self . rqdata . get_user_idstring ( realtask ) )
2010-08-19 10:36:29 +00:00
self . task_failoutright ( task )
return True
if self . cooker . configuration . force :
2010-10-07 14:45:02 +00:00
for target in self . rqdata . target_pairs :
2010-08-19 10:36:29 +00:00
if target [ 0 ] == fn and target [ 1 ] == self . rqdata . runq_task [ realtask ] :
self . task_failoutright ( task )
return True
if self . rq . check_stamp_task ( realtask , taskname ) :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Setscene stamp current task %s ( %s ), so skip it and its dependencies ' ,
task , self . rqdata . get_user_idstring ( realtask ) )
2010-08-19 10:36:29 +00:00
self . task_skip ( task )
return True
2012-02-24 01:31:30 +00:00
startevent = sceneQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2010-12-08 00:08:04 +00:00
pid , pipein , pipeout = self . fork_off_task ( fn , realtask , taskname )
2010-08-19 10:36:29 +00:00
2010-12-08 00:08:04 +00:00
self . build_pids [ pid ] = task
self . build_pipes [ pid ] = runQueuePipe ( pipein , pipeout , self . cfgData )
2010-08-19 10:36:29 +00:00
self . runq_running [ task ] = 1
self . stats . taskActive ( )
if self . stats . active < self . number_tasks :
return True
for pipe in self . build_pipes :
self . build_pipes [ pipe ] . read ( )
if self . stats . active > 0 :
if self . runqueue_process_waitpid ( ) is None :
2010-09-13 15:57:13 +00:00
return 0.5
2010-08-19 10:36:29 +00:00
return True
# Convert scenequeue_covered task numbers into full taskgraph ids
oldcovered = self . scenequeue_covered
self . rq . scenequeue_covered = set ( )
for task in oldcovered :
self . rq . scenequeue_covered . add ( self . rqdata . runq_setscene [ task ] )
2011-11-21 14:40:21 +00:00
logger . debug ( 1 , ' We can skip tasks %s ' , sorted ( self . rq . scenequeue_covered ) )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueRunInit
return True
2010-07-06 16:41:11 +00:00
2011-01-10 13:13:08 +00:00
def fork_off_task ( self , fn , task , taskname ) :
return RunQueueExecute . fork_off_task ( self , fn , task , taskname , quieterrors = True )
2010-01-20 18:46:02 +00:00
class TaskFailure ( Exception ) :
"""
Exception raised when a task in a runqueue fails
"""
2010-03-24 23:56:12 +00:00
def __init__ ( self , x ) :
2010-01-20 18:46:02 +00:00
self . args = x
class runQueueExitWait ( bb . event . Event ) :
"""
Event when waiting for task processes to exit
"""
def __init__ ( self , remain ) :
self . remain = remain
self . message = " Waiting for %s active tasks to finish " % remain
bb . event . Event . __init__ ( self )
class runQueueEvent ( bb . event . Event ) :
"""
Base runQueue event class
"""
def __init__ ( self , task , stats , rq ) :
self . taskid = task
2010-08-18 10:30:53 +00:00
self . taskstring = rq . rqdata . get_user_idstring ( task )
2011-02-16 22:18:06 +00:00
self . stats = stats . copy ( )
2010-01-20 18:46:02 +00:00
bb . event . Event . __init__ ( self )
2012-02-27 18:54:11 +00:00
class sceneQueueEvent ( runQueueEvent ) :
"""
Base sceneQueue event class
"""
def __init__ ( self , task , stats , rq , noexec = False ) :
runQueueEvent . __init__ ( self , task , stats , rq )
realtask = rq . rqdata . runq_setscene [ task ]
self . taskstring = rq . rqdata . get_user_idstring ( realtask , " _setscene " )
2010-01-20 18:46:02 +00:00
class runQueueTaskStarted ( runQueueEvent ) :
"""
Event notifing a task was started
"""
2011-01-01 14:36:38 +00:00
def __init__ ( self , task , stats , rq , noexec = False ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2011-01-01 14:36:38 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
2012-02-27 18:54:11 +00:00
class sceneQueueTaskStarted ( sceneQueueEvent ) :
2012-02-24 01:31:30 +00:00
"""
Event notifing a setscene task was started
"""
def __init__ ( self , task , stats , rq , noexec = False ) :
2012-02-27 18:54:11 +00:00
sceneQueueEvent . __init__ ( self , task , stats , rq )
2012-02-24 01:31:30 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
class runQueueTaskFailed ( runQueueEvent ) :
"""
Event notifing a task failed
"""
2010-12-06 21:58:55 +00:00
def __init__ ( self , task , stats , exitcode , rq ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2010-12-06 21:58:55 +00:00
self . exitcode = exitcode
2010-01-20 18:46:02 +00:00
2012-02-27 18:54:11 +00:00
class sceneQueueTaskFailed ( sceneQueueEvent ) :
2011-02-28 14:28:25 +00:00
"""
Event notifing a setscene task failed
"""
def __init__ ( self , task , stats , exitcode , rq ) :
2012-02-27 18:54:11 +00:00
sceneQueueEvent . __init__ ( self , task , stats , rq )
2012-02-24 01:31:30 +00:00
self . exitcode = exitcode
2011-02-28 14:28:25 +00:00
2010-01-20 18:46:02 +00:00
class runQueueTaskCompleted ( runQueueEvent ) :
"""
Event notifing a task completed
"""
2011-01-10 22:36:23 +00:00
def check_stamp_fn ( fn , taskname , d ) :
2011-11-25 14:57:53 +00:00
rqexe = d . getVar ( " __RUNQUEUE_DO_NOT_USE_EXTERNALLY " )
fn = d . getVar ( " __RUNQUEUE_DO_NOT_USE_EXTERNALLY2 " )
2011-01-10 22:36:23 +00:00
fnid = rqexe . rqdata . taskData . getfn_id ( fn )
taskid = rqexe . rqdata . get_task_id ( fnid , taskname )
if taskid is not None :
return rqexe . rq . check_stamp_task ( taskid )
return None
2008-05-13 07:53:18 +00:00
2010-01-20 18:46:02 +00:00
class runQueuePipe ( ) :
"""
Abstraction for a pipe between a worker thread and the server
"""
def __init__ ( self , pipein , pipeout , d ) :
2011-01-10 12:48:49 +00:00
self . input = pipein
2010-08-16 15:37:29 +00:00
pipeout . close ( )
2011-01-10 12:48:49 +00:00
fcntl . fcntl ( self . input , fcntl . F_SETFL , fcntl . fcntl ( self . input , fcntl . F_GETFL ) | os . O_NONBLOCK )
2010-01-20 18:46:02 +00:00
self . queue = " "
self . d = d
def read ( self ) :
start = len ( self . queue )
2010-01-21 23:46:20 +00:00
try :
2011-01-28 10:21:41 +00:00
self . queue = self . queue + self . input . read ( 102400 )
2011-01-10 12:48:49 +00:00
except ( OSError , IOError ) :
2010-01-21 23:46:20 +00:00
pass
2010-01-20 18:46:02 +00:00
end = len ( self . queue )
index = self . queue . find ( " </event> " )
while index != - 1 :
bb . event . fire_from_worker ( self . queue [ : index + 8 ] , self . d )
self . queue = self . queue [ index + 8 : ]
index = self . queue . find ( " </event> " )
return ( end > start )
def close ( self ) :
while self . read ( ) :
continue
if len ( self . queue ) > 0 :
2010-08-20 11:25:19 +00:00
print ( " Warning, worker left partial message: %s " % self . queue )
2011-01-10 12:48:49 +00:00
self . input . close ( )