2006-11-29 22:52:37 +00:00
#!/usr/bin/env python
2006-11-16 15:02:15 +00:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake ' RunQueue ' implementation
Handles preparation and execution of a queue of tasks
"""
2007-09-02 14:10:08 +00:00
# Copyright (C) 2006-2007 Richard Purdie
2007-01-08 23:53:01 +00:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
2011-01-10 12:48:49 +00:00
import copy
2010-06-10 17:35:31 +00:00
import os
import sys
2007-04-01 15:04:49 +00:00
import signal
2008-03-03 22:01:45 +00:00
import stat
2010-01-21 23:46:20 +00:00
import fcntl
2013-06-07 17:11:09 +00:00
import errno
2010-06-10 15:05:52 +00:00
import logging
2013-12-19 09:40:52 +00:00
import re
2010-06-10 17:35:31 +00:00
import bb
2010-06-10 15:05:52 +00:00
from bb import msg , data , event
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
from bb import monitordisk
2013-06-07 17:11:09 +00:00
import subprocess
try :
import cPickle as pickle
except ImportError :
import pickle
2010-06-10 15:05:52 +00:00
bblogger = logging . getLogger ( " BitBake " )
2010-06-10 17:35:31 +00:00
logger = logging . getLogger ( " BitBake.RunQueue " )
2006-11-16 15:02:15 +00:00
2013-12-19 09:40:52 +00:00
__find_md5__ = re . compile ( r ' (?i)(?<![a-z0-9])[a-f0-9] {32} (?![a-z0-9]) ' )
2007-04-01 15:04:49 +00:00
class RunQueueStats :
"""
Holds statistics on the tasks handled by the associated runQueue
"""
2010-01-20 18:46:02 +00:00
def __init__ ( self , total ) :
2007-04-01 15:04:49 +00:00
self . completed = 0
self . skipped = 0
self . failed = 0
2010-01-20 18:46:02 +00:00
self . active = 0
self . total = total
2007-04-01 15:04:49 +00:00
2011-02-16 22:18:06 +00:00
def copy ( self ) :
obj = self . __class__ ( self . total )
2011-02-16 22:24:24 +00:00
obj . __dict__ . update ( self . __dict__ )
2011-02-16 22:18:06 +00:00
return obj
2007-04-01 15:04:49 +00:00
def taskFailed ( self ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - 1
2007-04-01 15:04:49 +00:00
self . failed = self . failed + 1
2008-03-03 22:01:45 +00:00
def taskCompleted ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - number
2008-03-03 22:01:45 +00:00
self . completed = self . completed + number
2007-04-01 15:04:49 +00:00
2008-03-03 22:01:45 +00:00
def taskSkipped ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active + number
2008-03-03 22:01:45 +00:00
self . skipped = self . skipped + number
2007-04-01 15:04:49 +00:00
2010-01-20 18:46:02 +00:00
def taskActive ( self ) :
self . active = self . active + 1
2010-03-24 23:56:12 +00:00
# These values indicate the next step due to be run in the
2010-01-20 18:46:02 +00:00
# runQueue state machine
runQueuePrepare = 2
2010-08-19 10:36:29 +00:00
runQueueSceneInit = 3
runQueueSceneRun = 4
runQueueRunInit = 5
runQueueRunning = 6
runQueueFailed = 7
runQueueCleanUp = 8
runQueueComplete = 9
2010-01-20 18:46:02 +00:00
2010-07-22 17:54:58 +00:00
class RunQueueScheduler ( object ) :
2008-01-06 16:51:51 +00:00
"""
Control the order tasks are scheduled in .
"""
2010-07-22 17:54:58 +00:00
name = " basic "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
2010-03-24 23:56:12 +00:00
The default scheduler just returns the first buildable task ( the
2014-08-12 08:53:16 +00:00
priority map is sorted by task number )
2008-01-06 16:51:51 +00:00
"""
self . rq = runqueue
2010-08-18 10:30:53 +00:00
self . rqdata = rqdata
2013-11-25 23:12:27 +00:00
self . numTasks = len ( self . rqdata . runq_fnid )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
2013-11-25 23:12:27 +00:00
self . prio_map . extend ( range ( self . numTasks ) )
self . buildable = [ ]
self . stamps = { }
for taskid in xrange ( self . numTasks ) :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ taskid ] ]
taskname = self . rqdata . runq_task [ taskid ]
self . stamps [ taskid ] = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
if self . rq . runq_buildable [ taskid ] == 1 :
self . buildable . append ( taskid )
self . rev_prio_map = None
2008-01-06 16:51:51 +00:00
2011-01-10 12:48:49 +00:00
def next_buildable_task ( self ) :
2008-01-06 16:51:51 +00:00
"""
Return the id of the first task we find that is buildable
"""
2013-11-25 23:12:27 +00:00
self . buildable = [ x for x in self . buildable if not self . rq . runq_running [ x ] == 1 ]
if not self . buildable :
return None
if len ( self . buildable ) == 1 :
2014-01-31 11:17:34 +00:00
taskid = self . buildable [ 0 ]
stamp = self . stamps [ taskid ]
if stamp not in self . rq . build_stamps . itervalues ( ) :
return taskid
2013-11-25 23:12:27 +00:00
if not self . rev_prio_map :
self . rev_prio_map = range ( self . numTasks )
for taskid in xrange ( self . numTasks ) :
self . rev_prio_map [ self . prio_map [ taskid ] ] = taskid
best = None
bestprio = None
for taskid in self . buildable :
prio = self . rev_prio_map [ taskid ]
2014-04-13 10:44:25 +00:00
if bestprio is None or bestprio > prio :
2013-11-25 23:12:27 +00:00
stamp = self . stamps [ taskid ]
if stamp in self . rq . build_stamps . itervalues ( ) :
2011-06-28 09:05:19 +00:00
continue
2013-11-25 23:12:27 +00:00
bestprio = prio
best = taskid
return best
2010-07-23 21:32:14 +00:00
def next ( self ) :
"""
Return the id of the task we should build next
"""
2010-07-23 20:42:54 +00:00
if self . rq . stats . active < self . rq . number_tasks :
2011-01-10 12:48:49 +00:00
return self . next_buildable_task ( )
2008-01-06 16:51:51 +00:00
2013-11-25 23:12:27 +00:00
def newbuilable ( self , task ) :
self . buildable . append ( task )
2008-01-06 16:51:51 +00:00
class RunQueueSchedulerSpeed ( RunQueueScheduler ) :
"""
A scheduler optimised for speed . The priority map is sorted by task weight ,
heavier weighted tasks ( tasks needed by the most other tasks ) are run first .
"""
2010-07-22 17:54:58 +00:00
name = " speed "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
The priority map is sorted by task weight .
"""
2013-11-25 23:12:27 +00:00
RunQueueScheduler . __init__ ( self , runqueue , rqdata )
2008-01-06 16:51:51 +00:00
2011-01-10 12:48:49 +00:00
sortweight = sorted ( copy . deepcopy ( self . rqdata . runq_weight ) )
copyweight = copy . deepcopy ( self . rqdata . runq_weight )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
for weight in sortweight :
idx = copyweight . index ( weight )
self . prio_map . append ( idx )
copyweight [ idx ] = - 1
self . prio_map . reverse ( )
class RunQueueSchedulerCompletion ( RunQueueSchedulerSpeed ) :
"""
2010-03-24 23:56:12 +00:00
A scheduler optimised to complete . bb files are quickly as possible . The
priority map is sorted by task weight , but then reordered so once a given
2014-08-12 08:53:16 +00:00
. bb file starts to build , it ' s completed as quickly as possible. This works
2010-03-24 23:56:12 +00:00
well where disk space is at a premium and classes like OE ' s rm_work are in
2008-01-06 16:51:51 +00:00
force .
"""
2010-07-22 17:54:58 +00:00
name = " completion "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
RunQueueSchedulerSpeed . __init__ ( self , runqueue , rqdata )
2008-01-06 16:51:51 +00:00
#FIXME - whilst this groups all fnids together it does not reorder the
#fnid groups optimally.
2010-03-24 23:56:12 +00:00
2011-01-10 12:48:49 +00:00
basemap = copy . deepcopy ( self . prio_map )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
while ( len ( basemap ) > 0 ) :
entry = basemap . pop ( 0 )
self . prio_map . append ( entry )
2010-08-18 10:30:53 +00:00
fnid = self . rqdata . runq_fnid [ entry ]
2008-01-06 16:51:51 +00:00
todel = [ ]
for entry in basemap :
2010-08-18 10:30:53 +00:00
entry_fnid = self . rqdata . runq_fnid [ entry ]
2008-01-06 16:51:51 +00:00
if entry_fnid == fnid :
todel . append ( basemap . index ( entry ) )
self . prio_map . append ( entry )
todel . reverse ( )
for idx in todel :
del basemap [ idx ]
2010-08-18 10:30:53 +00:00
class RunQueueData :
2006-11-16 15:02:15 +00:00
"""
BitBake Run Queue implementation
"""
2010-08-18 10:30:53 +00:00
def __init__ ( self , rq , cooker , cfgData , dataCache , taskData , targets ) :
2007-04-01 15:04:49 +00:00
self . cooker = cooker
self . dataCache = dataCache
self . taskData = taskData
self . targets = targets
2010-08-18 10:30:53 +00:00
self . rq = rq
2012-02-13 11:41:31 +00:00
self . warn_multi_bb = False
2007-04-01 15:04:49 +00:00
2012-03-03 10:41:41 +00:00
self . stampwhitelist = cfgData . getVar ( " BB_STAMP_WHITELIST " , True ) or " "
self . multi_provider_whitelist = ( cfgData . getVar ( " MULTI_PROVIDER_WHITELIST " , True ) or " " ) . split ( )
2010-08-18 10:30:53 +00:00
self . reset ( )
2006-11-16 15:02:15 +00:00
2010-08-18 10:30:53 +00:00
def reset ( self ) :
2006-11-16 15:02:15 +00:00
self . runq_fnid = [ ]
self . runq_task = [ ]
self . runq_depends = [ ]
self . runq_revdeps = [ ]
2010-08-31 13:49:43 +00:00
self . runq_hash = [ ]
2010-01-20 18:46:02 +00:00
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
def runq_depends_names ( self , ids ) :
import re
ret = [ ]
for id in self . runq_depends [ ids ] :
nam = os . path . basename ( self . get_user_idstring ( id ) )
nam = re . sub ( " _[^,]*, " , " , " , nam )
ret . extend ( [ nam ] )
return ret
2013-09-18 12:15:49 +00:00
def get_task_name ( self , task ) :
return self . runq_task [ task ]
def get_task_file ( self , task ) :
return self . taskData . fn_index [ self . runq_fnid [ task ] ]
2013-09-18 12:15:53 +00:00
def get_task_hash ( self , task ) :
return self . runq_hash [ task ]
2011-02-28 14:28:25 +00:00
def get_user_idstring ( self , task , task_name_suffix = " " ) :
2007-04-01 15:04:49 +00:00
fn = self . taskData . fn_index [ self . runq_fnid [ task ] ]
2011-02-28 14:28:25 +00:00
taskname = self . runq_task [ task ] + task_name_suffix
2006-11-16 15:02:15 +00:00
return " %s , %s " % ( fn , taskname )
2008-05-13 07:53:18 +00:00
def get_task_id ( self , fnid , taskname ) :
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2008-05-13 07:53:18 +00:00
if self . runq_fnid [ listid ] == fnid and self . runq_task [ listid ] == taskname :
return listid
return None
2008-01-06 16:51:51 +00:00
def circular_depchains_handler ( self , tasks ) :
"""
Some tasks aren ' t buildable, likely due to circular dependency issues.
Identify the circular dependencies and print them in a user readable format .
"""
from copy import deepcopy
valid_chains = [ ]
explored_deps = { }
msgs = [ ]
def chain_reorder ( chain ) :
"""
Reorder a dependency chain so the lowest task id is first
"""
lowest = 0
new_chain = [ ]
2010-11-19 20:39:22 +00:00
for entry in xrange ( len ( chain ) ) :
2008-01-06 16:51:51 +00:00
if chain [ entry ] < chain [ lowest ] :
lowest = entry
new_chain . extend ( chain [ lowest : ] )
new_chain . extend ( chain [ : lowest ] )
return new_chain
def chain_compare_equal ( chain1 , chain2 ) :
"""
Compare two dependency chains and see if they ' re the same
"""
if len ( chain1 ) != len ( chain2 ) :
return False
2010-11-19 20:39:22 +00:00
for index in xrange ( len ( chain1 ) ) :
2008-01-06 16:51:51 +00:00
if chain1 [ index ] != chain2 [ index ] :
return False
return True
2010-03-24 23:56:12 +00:00
2008-01-06 16:51:51 +00:00
def chain_array_contains ( chain , chain_array ) :
"""
Return True if chain_array contains chain
"""
for ch in chain_array :
if chain_compare_equal ( ch , chain ) :
return True
return False
def find_chains ( taskid , prev_chain ) :
prev_chain . append ( taskid )
total_deps = [ ]
total_deps . extend ( self . runq_revdeps [ taskid ] )
for revdep in self . runq_revdeps [ taskid ] :
if revdep in prev_chain :
idx = prev_chain . index ( revdep )
# To prevent duplicates, reorder the chain to start with the lowest taskid
# and search through an array of those we've already printed
chain = prev_chain [ idx : ]
new_chain = chain_reorder ( chain )
if not chain_array_contains ( new_chain , valid_chains ) :
valid_chains . append ( new_chain )
msgs . append ( " Dependency loop # %d found: \n " % len ( valid_chains ) )
for dep in new_chain :
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
msgs . append ( " Task %s ( %s ) (dependent Tasks %s ) \n " % ( dep , self . get_user_idstring ( dep ) , self . runq_depends_names ( dep ) ) )
2008-01-06 16:51:51 +00:00
msgs . append ( " \n " )
if len ( valid_chains ) > 10 :
msgs . append ( " Aborted dependency loops search after 10 matches. \n " )
return msgs
continue
scan = False
if revdep not in explored_deps :
scan = True
elif revdep in explored_deps [ revdep ] :
scan = True
else :
for dep in prev_chain :
if dep in explored_deps [ revdep ] :
scan = True
if scan :
2011-01-10 12:48:49 +00:00
find_chains ( revdep , copy . deepcopy ( prev_chain ) )
2008-01-06 16:51:51 +00:00
for dep in explored_deps [ revdep ] :
if dep not in total_deps :
total_deps . append ( dep )
explored_deps [ taskid ] = total_deps
for task in tasks :
find_chains ( task , [ ] )
return msgs
def calculate_task_weights ( self , endpoints ) :
"""
2010-03-24 23:56:12 +00:00
Calculate a number representing the " weight " of each task . Heavier weighted tasks
2008-01-06 16:51:51 +00:00
have more dependencies and hence should be executed sooner for maximum speed .
2010-12-22 15:41:32 +00:00
This function also sanity checks the task list finding tasks that are not
2008-01-06 16:51:51 +00:00
possible to execute due to circular dependencies .
"""
numTasks = len ( self . runq_fnid )
weight = [ ]
deps_left = [ ]
task_done = [ ]
2010-11-19 20:39:22 +00:00
for listid in xrange ( numTasks ) :
2008-01-06 16:51:51 +00:00
task_done . append ( False )
2014-04-13 10:45:03 +00:00
weight . append ( 1 )
2008-01-06 16:51:51 +00:00
deps_left . append ( len ( self . runq_revdeps [ listid ] ) )
for listid in endpoints :
2014-04-13 10:45:03 +00:00
weight [ listid ] = 10
2008-01-06 16:51:51 +00:00
task_done [ listid ] = True
2010-04-12 00:03:55 +00:00
while True :
2008-01-06 16:51:51 +00:00
next_points = [ ]
for listid in endpoints :
for revdep in self . runq_depends [ listid ] :
weight [ revdep ] = weight [ revdep ] + weight [ listid ]
deps_left [ revdep ] = deps_left [ revdep ] - 1
if deps_left [ revdep ] == 0 :
next_points . append ( revdep )
task_done [ revdep ] = True
endpoints = next_points
if len ( next_points ) == 0 :
2010-03-24 23:56:12 +00:00
break
2008-01-06 16:51:51 +00:00
# Circular dependency sanity check
problem_tasks = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( numTasks ) :
2008-01-06 16:51:51 +00:00
if task_done [ task ] is False or deps_left [ task ] != 0 :
problem_tasks . append ( task )
2010-12-17 21:56:08 +00:00
logger . debug ( 2 , " Task %s ( %s ) is not buildable " , task , self . get_user_idstring ( task ) )
logger . debug ( 2 , " (Complete marker was %s and the remaining dependency count was %s ) \n " , task_done [ task ] , deps_left [ task ] )
2008-01-06 16:51:51 +00:00
if problem_tasks :
message = " Unbuildable tasks were found. \n "
message = message + " These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks. \n \n "
message = message + " Identifying dependency loops (this may take a short while)... \n "
2010-06-10 17:35:31 +00:00
logger . error ( message )
2008-01-06 16:51:51 +00:00
msgs = self . circular_depchains_handler ( problem_tasks )
message = " \n "
for msg in msgs :
message = message + msg
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , message )
2008-01-06 16:51:51 +00:00
return weight
2010-08-18 10:30:53 +00:00
def prepare ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-03-24 23:56:12 +00:00
Turn a set of taskData into a RunQueue and compute data needed
2006-11-16 15:02:15 +00:00
to optimise the execution order .
"""
runq_build = [ ]
2012-06-26 18:00:58 +00:00
recursivetasks = { }
2013-06-19 13:03:39 +00:00
recursiveitasks = { }
2012-07-04 16:39:11 +00:00
recursivetasksselfref = set ( )
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
taskData = self . taskData
2007-05-22 11:50:37 +00:00
if len ( taskData . tasks_name ) == 0 :
# Nothing to do
2010-08-24 23:58:23 +00:00
return 0
2007-05-22 11:50:37 +00:00
2014-11-05 15:08:24 +00:00
logger . info ( " Preparing RunQueue " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Step A - Work out a list of tasks to run
#
2009-07-21 18:44:23 +00:00
# Taskdata gives us a list of possible providers for every build and run
2010-03-24 23:56:12 +00:00
# target ordered by priority. It also gives information on each of those
2009-07-21 18:44:23 +00:00
# providers.
2008-01-06 16:51:51 +00:00
#
2010-03-24 23:56:12 +00:00
# To create the actual list of tasks to execute we fix the list of
# providers and then resolve the dependencies into task IDs. This
# process is repeated for each type of dependency (tdepends, deptask,
2008-01-06 16:51:51 +00:00
# rdeptast, recrdeptask, idepends).
2009-07-21 21:38:53 +00:00
def add_build_dependencies ( depids , tasknames , depends ) :
for depid in depids :
# Won't be in build_targets if ASSUME_PROVIDED
if depid not in taskData . build_targets :
continue
depdata = taskData . build_targets [ depid ] [ 0 ]
if depdata is None :
continue
for taskname in tasknames :
2012-06-27 10:04:06 +00:00
taskid = taskData . gettask_id_fromfnid ( depdata , taskname )
2009-07-21 21:38:53 +00:00
if taskid is not None :
2012-06-27 10:04:48 +00:00
depends . add ( taskid )
2009-07-21 21:38:53 +00:00
def add_runtime_dependencies ( depids , tasknames , depends ) :
for depid in depids :
if depid not in taskData . run_targets :
continue
depdata = taskData . run_targets [ depid ] [ 0 ]
if depdata is None :
continue
for taskname in tasknames :
2012-06-27 10:04:06 +00:00
taskid = taskData . gettask_id_fromfnid ( depdata , taskname )
2009-07-21 21:38:53 +00:00
if taskid is not None :
2012-06-27 10:04:48 +00:00
depends . add ( taskid )
2009-07-21 21:38:53 +00:00
2012-06-26 18:00:58 +00:00
def add_resolved_dependencies ( depids , tasknames , depends ) :
for depid in depids :
for taskname in tasknames :
taskid = taskData . gettask_id_fromfnid ( depid , taskname )
if taskid is not None :
depends . add ( taskid )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( taskData . tasks_name ) ) :
2012-06-27 10:04:48 +00:00
depends = set ( )
2006-11-16 15:02:15 +00:00
fnid = taskData . tasks_fnid [ task ]
fn = taskData . fn_index [ fnid ]
2007-04-01 15:04:49 +00:00
task_deps = self . dataCache . task_deps [ fn ]
2006-11-16 15:02:15 +00:00
2014-03-10 00:55:45 +00:00
#logger.debug(2, "Processing %s:%s", fn, taskData.tasks_name[task])
2009-07-21 18:44:23 +00:00
2006-11-16 15:02:15 +00:00
if fnid not in taskData . failed_fnids :
2010-03-24 23:56:12 +00:00
# Resolve task internal dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. addtask before X after Y
2012-06-27 10:04:48 +00:00
depends = set ( taskData . tasks_tdepends [ task ] )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve 'deptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[deptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS)
2006-11-16 15:02:15 +00:00
if ' deptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' deptask ' ] :
2007-10-30 12:03:07 +00:00
tasknames = task_deps [ ' deptask ' ] [ taskData . tasks_name [ task ] ] . split ( )
2009-07-21 21:38:53 +00:00
add_build_dependencies ( taskData . depids [ fnid ] , tasknames , depends )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve 'rdeptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
2006-11-16 15:02:15 +00:00
if ' rdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' rdeptask ' ] :
2012-06-27 21:01:29 +00:00
tasknames = task_deps [ ' rdeptask ' ] [ taskData . tasks_name [ task ] ] . split ( )
add_runtime_dependencies ( taskData . rdepids [ fnid ] , tasknames , depends )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve inter-task dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
2007-04-01 15:04:49 +00:00
idepends = taskData . tasks_idepends [ task ]
2008-03-14 11:44:34 +00:00
for ( depid , idependtask ) in idepends :
2012-09-23 13:14:24 +00:00
if depid in taskData . build_targets and not depid in taskData . failed_deps :
2007-08-05 22:43:24 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2007-04-01 15:04:49 +00:00
depdata = taskData . build_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2012-06-27 10:04:06 +00:00
taskid = taskData . gettask_id_fromfnid ( depdata , idependtask )
2010-08-06 23:19:12 +00:00
if taskid is None :
2013-03-13 20:01:51 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s in %s depends upon non-existent task %s in %s " % ( taskData . tasks_name [ task ] , fn , idependtask , taskData . fn_index [ depdata ] ) )
2012-06-27 10:04:48 +00:00
depends . add ( taskid )
2012-06-22 11:51:29 +00:00
irdepends = taskData . tasks_irdepends [ task ]
for ( depid , idependtask ) in irdepends :
if depid in taskData . run_targets :
# Won't be in run_targets if ASSUME_PROVIDED
depdata = taskData . run_targets [ depid ] [ 0 ]
if depdata is not None :
2012-06-27 10:04:06 +00:00
taskid = taskData . gettask_id_fromfnid ( depdata , idependtask )
2012-06-22 11:51:29 +00:00
if taskid is None :
2013-03-13 20:01:51 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s in %s rdepends upon non-existent task %s in %s " % ( taskData . tasks_name [ task ] , fn , idependtask , taskData . fn_index [ depdata ] ) )
2012-06-27 10:04:48 +00:00
depends . add ( taskid )
2009-07-21 18:44:23 +00:00
2012-06-26 18:00:58 +00:00
# Resolve recursive 'recrdeptask' dependencies (Part A)
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2009-07-21 18:44:23 +00:00
# We cover the recursive part of the dependencies below
2006-11-16 15:02:15 +00:00
if ' recrdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' recrdeptask ' ] :
2012-06-26 18:00:58 +00:00
tasknames = task_deps [ ' recrdeptask ' ] [ taskData . tasks_name [ task ] ] . split ( )
recursivetasks [ task ] = tasknames
add_build_dependencies ( taskData . depids [ fnid ] , tasknames , depends )
add_runtime_dependencies ( taskData . rdepids [ fnid ] , tasknames , depends )
2012-07-04 16:39:11 +00:00
if taskData . tasks_name [ task ] in tasknames :
recursivetasksselfref . add ( task )
2006-11-16 15:02:15 +00:00
2013-06-19 13:03:39 +00:00
if ' recideptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' recideptask ' ] :
recursiveitasks [ task ] = [ ]
for t in task_deps [ ' recideptask ' ] [ taskData . tasks_name [ task ] ] . split ( ) :
newdep = taskData . gettask_id_fromfnid ( fnid , t )
recursiveitasks [ task ] . append ( newdep )
2006-11-16 15:02:15 +00:00
self . runq_fnid . append ( taskData . tasks_fnid [ task ] )
self . runq_task . append ( taskData . tasks_name [ task ] )
2012-06-27 10:04:48 +00:00
self . runq_depends . append ( depends )
2009-05-12 15:53:22 +00:00
self . runq_revdeps . append ( set ( ) )
2010-08-31 13:49:43 +00:00
self . runq_hash . append ( " " )
2006-11-16 15:02:15 +00:00
runq_build . append ( 0 )
2009-07-21 18:44:23 +00:00
2012-06-26 18:00:58 +00:00
# Resolve recursive 'recrdeptask' dependencies (Part B)
2009-07-21 18:44:23 +00:00
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2012-06-26 18:00:58 +00:00
# We need to do this separately since we need all of self.runq_depends to be complete before this is processed
extradeps = { }
for task in recursivetasks :
2012-07-02 12:29:53 +00:00
extradeps [ task ] = set ( self . runq_depends [ task ] )
2012-06-26 18:00:58 +00:00
tasknames = recursivetasks [ task ]
seendeps = set ( )
seenfnid = [ ]
def generate_recdeps ( t ) :
newdeps = set ( )
add_resolved_dependencies ( [ taskData . tasks_fnid [ t ] ] , tasknames , newdeps )
extradeps [ task ] . update ( newdeps )
seendeps . add ( t )
newdeps . add ( t )
for i in newdeps :
for n in self . runq_depends [ i ] :
if n not in seendeps :
generate_recdeps ( n )
generate_recdeps ( task )
2013-06-19 13:03:39 +00:00
if task in recursiveitasks :
for dep in recursiveitasks [ task ] :
generate_recdeps ( dep )
2012-07-02 12:29:53 +00:00
# Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
for task in recursivetasks :
2012-07-04 16:39:11 +00:00
extradeps [ task ] . difference_update ( recursivetasksselfref )
2012-07-02 12:29:53 +00:00
2012-06-26 18:00:58 +00:00
for task in xrange ( len ( taskData . tasks_name ) ) :
# Add in extra dependencies
if task in extradeps :
2012-07-02 12:29:53 +00:00
self . runq_depends [ task ] = extradeps [ task ]
2012-06-26 18:00:58 +00:00
# Remove all self references
if task in self . runq_depends [ task ] :
logger . debug ( 2 , " Task %s ( %s %s ) contains self reference! %s " , task , taskData . fn_index [ taskData . tasks_fnid [ task ] ] , taskData . tasks_name [ task ] , self . runq_depends [ task ] )
self . runq_depends [ task ] . remove ( task )
2008-01-06 16:51:51 +00:00
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
# as active too. If the task is to be 'forced', clear its stamp. Once
# all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Marking Active Tasks " )
2006-11-16 15:02:15 +00:00
def mark_active ( listid , depth ) :
"""
Mark an item as active along with its depends
( calls itself recursively )
"""
if runq_build [ listid ] == 1 :
return
runq_build [ listid ] = 1
depends = self . runq_depends [ listid ]
for depend in depends :
mark_active ( depend , depth + 1 )
2008-03-03 22:01:45 +00:00
self . target_pairs = [ ]
2007-04-01 15:04:49 +00:00
for target in self . targets :
2006-11-16 15:02:15 +00:00
targetid = taskData . getbuild_id ( target [ 0 ] )
if targetid not in taskData . build_targets :
continue
2007-02-21 20:15:13 +00:00
if targetid in taskData . failed_deps :
continue
2006-11-16 15:02:15 +00:00
fnid = taskData . build_targets [ targetid ] [ 0 ]
2008-03-03 22:01:45 +00:00
fn = taskData . fn_index [ fnid ]
self . target_pairs . append ( ( fn , target [ 1 ] ) )
2007-01-08 23:53:01 +00:00
2006-11-16 15:02:15 +00:00
if fnid in taskData . failed_fnids :
continue
2008-01-06 16:51:51 +00:00
if target [ 1 ] not in taskData . tasks_lookup [ fnid ] :
2013-08-09 13:51:22 +00:00
import difflib
close_matches = difflib . get_close_matches ( target [ 1 ] , taskData . tasks_lookup [ fnid ] , cutoff = 0.7 )
if close_matches :
extra = " . Close matches: \n %s " % " \n " . join ( close_matches )
else :
extra = " "
bb . msg . fatal ( " RunQueue " , " Task %s does not exist for target %s %s " % ( target [ 1 ] , target [ 0 ] , extra ) )
2008-01-06 16:51:51 +00:00
2006-11-16 15:02:15 +00:00
listid = taskData . tasks_lookup [ fnid ] [ target [ 1 ] ]
mark_active ( listid , 1 )
2008-01-06 16:51:51 +00:00
# Step C - Prune all inactive tasks
#
# Once all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
maps = [ ]
delcount = 0
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
if runq_build [ listid - delcount ] == 1 :
maps . append ( listid - delcount )
else :
del self . runq_fnid [ listid - delcount ]
del self . runq_task [ listid - delcount ]
del self . runq_depends [ listid - delcount ]
del runq_build [ listid - delcount ]
del self . runq_revdeps [ listid - delcount ]
2010-08-31 13:49:43 +00:00
del self . runq_hash [ listid - delcount ]
2006-11-16 15:02:15 +00:00
delcount = delcount + 1
maps . append ( - 1 )
2008-01-06 16:51:51 +00:00
#
# Step D - Sanity checks and computation
#
# Check to make sure we still have tasks to run
2006-11-16 15:02:15 +00:00
if len ( self . runq_fnid ) == 0 :
if not taskData . abort :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above. " )
2009-07-21 18:44:23 +00:00
else :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " No active tasks and not in --continue mode?! Please report this bug. " )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Pruned %s inactive tasks, %s left " , delcount , len ( self . runq_fnid ) )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Remap the dependencies to account for the deleted tasks
# Check we didn't delete a task we depend on
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
newdeps = [ ]
origdeps = self . runq_depends [ listid ]
for origdep in origdeps :
if maps [ origdep ] == - 1 :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Invalid mapping - Should never happen! " )
2006-11-16 15:02:15 +00:00
newdeps . append ( maps [ origdep ] )
2009-05-12 15:53:22 +00:00
self . runq_depends [ listid ] = set ( newdeps )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Assign Weightings " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Generate a list of reverse dependencies to ease future calculations
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
for dep in self . runq_depends [ listid ] :
self . runq_revdeps [ dep ] . add ( listid )
2008-01-06 16:51:51 +00:00
# Identify tasks at the end of dependency chains
# Error on circular dependency loops (length two)
2006-11-16 15:02:15 +00:00
endpoints = [ ]
2010-11-19 20:39:22 +00:00
for listid in xrange ( len ( self . runq_fnid ) ) :
2006-11-16 15:02:15 +00:00
revdeps = self . runq_revdeps [ listid ]
if len ( revdeps ) == 0 :
endpoints . append ( listid )
for dep in revdeps :
if dep in self . runq_depends [ listid ] :
#self.dump_data(taskData)
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s ( %s ) has circular dependency on %s ( %s ) " % ( taskData . fn_index [ self . runq_fnid [ dep ] ] , self . runq_task [ dep ] , taskData . fn_index [ self . runq_fnid [ listid ] ] , self . runq_task [ listid ] ) )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Compute totals (have %s endpoint(s)) " , len ( endpoints ) )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Calculate task weights
2008-01-06 16:51:51 +00:00
# Check of higher length circular dependencies
self . runq_weight = self . calculate_task_weights ( endpoints )
# Sanity Check - Check for multiple tasks building the same provider
2007-09-02 14:10:08 +00:00
prov_list = { }
seen_fn = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . runq_fnid ) ) :
2007-09-02 14:10:08 +00:00
fn = taskData . fn_index [ self . runq_fnid [ task ] ]
if fn in seen_fn :
continue
seen_fn . append ( fn )
for prov in self . dataCache . fn_provides [ fn ] :
if prov not in prov_list :
prov_list [ prov ] = [ fn ]
2010-03-24 23:56:12 +00:00
elif fn not in prov_list [ prov ] :
2007-09-02 14:10:08 +00:00
prov_list [ prov ] . append ( fn )
for prov in prov_list :
if len ( prov_list [ prov ] ) > 1 and prov not in self . multi_provider_whitelist :
2013-09-02 13:47:21 +00:00
seen_pn = [ ]
# If two versions of the same PN are being built its fatal, we don't support it.
for fn in prov_list [ prov ] :
pn = self . dataCache . pkg_fn [ fn ]
if pn not in seen_pn :
seen_pn . append ( pn )
else :
bb . fatal ( " Multiple versions of %s are due to be built ( %s ). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_ %s to select the correct version or don ' t depend on multiple versions. " % ( pn , " " . join ( prov_list [ prov ] ) , pn ) )
2012-02-13 11:41:31 +00:00
msg = " Multiple .bb files are due to be built which each provide %s ( %s ). " % ( prov , " " . join ( prov_list [ prov ] ) )
if self . warn_multi_bb :
logger . warn ( msg )
else :
msg + = " \n This usually means one provides something the other doesn ' t and should. "
logger . error ( msg )
2007-09-02 14:10:08 +00:00
2008-05-04 23:22:24 +00:00
# Create a whitelist usable by the stamp checks
stampfnwhitelist = [ ]
for entry in self . stampwhitelist . split ( ) :
entryid = self . taskData . getbuild_id ( entry )
if entryid not in self . taskData . build_targets :
continue
fnid = self . taskData . build_targets [ entryid ] [ 0 ]
fn = self . taskData . fn_index [ fnid ]
stampfnwhitelist . append ( fn )
self . stampfnwhitelist = stampfnwhitelist
2012-09-17 22:43:17 +00:00
# Iterate over the task list looking for tasks with a 'setscene' function
2010-08-19 10:36:29 +00:00
self . runq_setscene = [ ]
2012-09-17 22:43:17 +00:00
if not self . cooker . configuration . nosetscene :
for task in range ( len ( self . runq_fnid ) ) :
setscene = taskData . gettask_id ( self . taskData . fn_index [ self . runq_fnid [ task ] ] , self . runq_task [ task ] + " _setscene " , False )
if not setscene :
continue
self . runq_setscene . append ( task )
2010-08-19 10:36:29 +00:00
2012-06-18 15:45:36 +00:00
def invalidate_task ( fn , taskname , error_nostamp ) :
taskdep = self . dataCache . task_deps [ fn ]
2013-07-24 10:31:04 +00:00
fnid = self . taskData . getfn_id ( fn )
if taskname not in taskData . tasks_lookup [ fnid ] :
logger . warn ( " Task %s does not exist, invalidating this task will have no effect " % taskname )
2012-06-18 15:45:36 +00:00
if ' nostamp ' in taskdep and taskname in taskdep [ ' nostamp ' ] :
if error_nostamp :
bb . fatal ( " Task %s is marked nostamp, cannot invalidate this task " % taskname )
else :
bb . debug ( 1 , " Task %s is marked nostamp, cannot invalidate this task " % taskname )
else :
logger . verbose ( " Invalidate task %s , %s " , taskname , fn )
bb . parse . siggen . invalidate_task ( taskname , self . dataCache , fn )
2012-06-18 15:45:35 +00:00
# Invalidate task if force mode active
if self . cooker . configuration . force :
for ( fn , target ) in self . target_pairs :
2012-06-18 15:45:36 +00:00
invalidate_task ( fn , target , False )
# Invalidate task if invalidate mode active
if self . cooker . configuration . invalidate_stamp :
for ( fn , target ) in self . target_pairs :
for st in self . cooker . configuration . invalidate_stamp . split ( ' , ' ) :
invalidate_task ( fn , " do_ %s " % st , True )
2012-06-18 15:45:35 +00:00
2014-08-12 08:53:16 +00:00
# Iterate over the task list and call into the siggen code
2010-08-31 13:49:43 +00:00
dealtwith = set ( )
todeal = set ( range ( len ( self . runq_fnid ) ) )
while len ( todeal ) > 0 :
for task in todeal . copy ( ) :
if len ( self . runq_depends [ task ] - dealtwith ) == 0 :
dealtwith . add ( task )
todeal . remove ( task )
procdep = [ ]
for dep in self . runq_depends [ task ] :
procdep . append ( self . taskData . fn_index [ self . runq_fnid [ dep ] ] + " . " + self . runq_task [ dep ] )
self . runq_hash [ task ] = bb . parse . siggen . get_taskhash ( self . taskData . fn_index [ self . runq_fnid [ task ] ] , self . runq_task [ task ] , procdep , self . dataCache )
2010-08-24 23:58:23 +00:00
return len ( self . runq_fnid )
2010-08-18 10:30:53 +00:00
def dump_data ( self , taskQueue ) :
"""
Dump some debug information on the internal data structures
"""
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " run_tasks: " )
2011-01-10 12:48:49 +00:00
for task in xrange ( len ( self . rqdata . runq_task ) ) :
logger . debug ( 3 , " ( %s ) %s - %s : %s Deps %s RevDeps %s " , task ,
taskQueue . fn_index [ self . rqdata . runq_fnid [ task ] ] ,
self . rqdata . runq_task [ task ] ,
self . rqdata . runq_weight [ task ] ,
self . rqdata . runq_depends [ task ] ,
self . rqdata . runq_revdeps [ task ] )
2010-08-18 10:30:53 +00:00
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " sorted_tasks: " )
2011-01-10 12:48:49 +00:00
for task1 in xrange ( len ( self . rqdata . runq_task ) ) :
2010-08-18 10:30:53 +00:00
if task1 in self . prio_map :
task = self . prio_map [ task1 ]
2011-01-10 12:48:49 +00:00
logger . debug ( 3 , " ( %s ) %s - %s : %s Deps %s RevDeps %s " , task ,
taskQueue . fn_index [ self . rqdata . runq_fnid [ task ] ] ,
self . rqdata . runq_task [ task ] ,
self . rqdata . runq_weight [ task ] ,
self . rqdata . runq_depends [ task ] ,
self . rqdata . runq_revdeps [ task ] )
2010-08-18 10:30:53 +00:00
class RunQueue :
def __init__ ( self , cooker , cfgData , dataCache , taskData , targets ) :
self . cooker = cooker
self . cfgData = cfgData
self . rqdata = RunQueueData ( self , cooker , cfgData , dataCache , taskData , targets )
2011-11-25 14:57:53 +00:00
self . stamppolicy = cfgData . getVar ( " BB_STAMP_POLICY " , True ) or " perfile "
self . hashvalidate = cfgData . getVar ( " BB_HASHCHECK_FUNCTION " , True ) or None
self . setsceneverify = cfgData . getVar ( " BB_SETSCENE_VERIFY_FUNCTION " , True ) or None
2012-11-16 15:30:52 +00:00
self . depvalidate = cfgData . getVar ( " BB_SETSCENE_DEPVALID " , True ) or None
2010-08-18 10:30:53 +00:00
self . state = runQueuePrepare
2010-01-20 18:46:02 +00:00
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
# For disk space monitor
self . dm = monitordisk . diskMonitor ( cfgData )
2012-02-29 14:15:28 +00:00
self . rqexe = None
2013-06-07 17:11:49 +00:00
self . worker = None
2013-06-07 17:12:30 +00:00
self . workerpipe = None
2013-06-07 17:13:04 +00:00
self . fakeworker = None
self . fakeworkerpipe = None
2013-06-07 17:11:49 +00:00
2013-06-07 17:13:04 +00:00
def _start_worker ( self , fakeroot = False , rqexec = None ) :
2013-06-07 17:11:49 +00:00
logger . debug ( 1 , " Starting bitbake-worker " )
2014-08-27 13:55:50 +00:00
magic = " decafbad "
if self . cooker . configuration . profile :
magic = " decafbadbad "
2013-06-07 17:13:04 +00:00
if fakeroot :
fakerootcmd = self . cfgData . getVar ( " FAKEROOTCMD " , True )
fakerootenv = ( self . cfgData . getVar ( " FAKEROOTBASEENV " , True ) or " " ) . split ( )
env = os . environ . copy ( )
for key , value in ( var . split ( ' = ' ) for var in fakerootenv ) :
env [ key ] = value
2014-08-27 13:55:50 +00:00
worker = subprocess . Popen ( [ fakerootcmd , " bitbake-worker " , magic ] , stdout = subprocess . PIPE , stdin = subprocess . PIPE , env = env )
2013-06-07 17:13:04 +00:00
else :
2014-08-27 13:55:50 +00:00
worker = subprocess . Popen ( [ " bitbake-worker " , magic ] , stdout = subprocess . PIPE , stdin = subprocess . PIPE )
2013-06-07 17:12:30 +00:00
bb . utils . nonblockingfd ( worker . stdout )
2014-03-09 17:00:17 +00:00
workerpipe = runQueuePipe ( worker . stdout , None , self . cfgData , self , rqexec )
2013-06-07 17:11:49 +00:00
workerdata = {
" taskdeps " : self . rqdata . dataCache . task_deps ,
" fakerootenv " : self . rqdata . dataCache . fakerootenv ,
" fakerootdirs " : self . rqdata . dataCache . fakerootdirs ,
" fakerootnoenv " : self . rqdata . dataCache . fakerootnoenv ,
2014-09-05 09:34:41 +00:00
" sigdata " : bb . parse . siggen . get_taskdata ( ) ,
2013-06-07 17:11:49 +00:00
" runq_hash " : self . rqdata . runq_hash ,
" logdefaultdebug " : bb . msg . loggerDefaultDebugLevel ,
" logdefaultverbose " : bb . msg . loggerDefaultVerbose ,
" logdefaultverboselogs " : bb . msg . loggerVerboseLogs ,
" logdefaultdomain " : bb . msg . loggerDefaultDomains ,
2013-06-07 17:13:04 +00:00
" prhost " : self . cooker . prhost ,
2013-09-02 17:26:28 +00:00
" buildname " : self . cfgData . getVar ( " BUILDNAME " , True ) ,
2013-09-06 15:53:20 +00:00
" date " : self . cfgData . getVar ( " DATE " , True ) ,
" time " : self . cfgData . getVar ( " TIME " , True ) ,
2013-06-07 17:11:49 +00:00
}
2013-06-07 17:12:30 +00:00
worker . stdin . write ( " <cookerconfig> " + pickle . dumps ( self . cooker . configuration ) + " </cookerconfig> " )
worker . stdin . write ( " <workerdata> " + pickle . dumps ( workerdata ) + " </workerdata> " )
worker . stdin . flush ( )
2013-06-07 17:11:49 +00:00
2013-06-07 17:12:30 +00:00
return worker , workerpipe
def _teardown_worker ( self , worker , workerpipe ) :
if not worker :
return
2013-06-07 17:11:49 +00:00
logger . debug ( 1 , " Teardown for bitbake-worker " )
2014-03-09 17:01:19 +00:00
try :
worker . stdin . write ( " <quit></quit> " )
worker . stdin . flush ( )
except IOError :
pass
2013-06-07 17:12:30 +00:00
while worker . returncode is None :
workerpipe . read ( )
worker . poll ( )
while workerpipe . read ( ) :
2013-06-07 17:11:49 +00:00
continue
2013-06-07 17:12:30 +00:00
workerpipe . close ( )
def start_worker ( self ) :
if self . worker :
2013-06-07 17:13:04 +00:00
self . teardown_workers ( )
2014-03-09 17:00:17 +00:00
self . teardown = False
2013-06-07 17:12:30 +00:00
self . worker , self . workerpipe = self . _start_worker ( )
2013-06-07 17:13:04 +00:00
def start_fakeworker ( self , rqexec ) :
if not self . fakeworker :
self . fakeworker , self . fakeworkerpipe = self . _start_worker ( True , rqexec )
def teardown_workers ( self ) :
2014-03-09 17:00:17 +00:00
self . teardown = True
2013-06-07 17:12:30 +00:00
self . _teardown_worker ( self . worker , self . workerpipe )
self . worker = None
self . workerpipe = None
2013-06-07 17:13:04 +00:00
self . _teardown_worker ( self . fakeworker , self . fakeworkerpipe )
self . fakeworker = None
self . fakeworkerpipe = None
def read_workers ( self ) :
self . workerpipe . read ( )
if self . fakeworkerpipe :
self . fakeworkerpipe . read ( )
2012-02-29 14:15:28 +00:00
2013-08-31 22:40:55 +00:00
def active_fds ( self ) :
fds = [ ]
if self . workerpipe :
fds . append ( self . workerpipe . input )
if self . fakeworkerpipe :
fds . append ( self . fakeworkerpipe . input )
return fds
2012-05-10 08:21:41 +00:00
def check_stamp_task ( self , task , taskname = None , recurse = False , cache = None ) :
2010-08-19 10:36:29 +00:00
def get_timestamp ( f ) :
try :
if not os . access ( f , os . F_OK ) :
return None
return os . stat ( f ) [ stat . ST_MTIME ]
except :
return None
2008-03-14 11:44:34 +00:00
if self . stamppolicy == " perfile " :
fulldeptree = False
else :
fulldeptree = True
2008-05-04 23:22:24 +00:00
stampwhitelist = [ ]
if self . stamppolicy == " whitelist " :
2010-08-18 10:30:53 +00:00
stampwhitelist = self . rqdata . stampfnwhitelist
2008-03-14 11:44:34 +00:00
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2010-07-06 16:47:43 +00:00
if taskname is None :
2010-08-18 10:30:53 +00:00
taskname = self . rqdata . runq_task [ task ]
2011-01-10 12:48:49 +00:00
2011-01-18 08:18:18 +00:00
stampfile = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
2010-11-06 12:20:33 +00:00
2014-08-12 08:53:16 +00:00
# If the stamp is missing, it's not current
2008-03-14 11:44:34 +00:00
if not os . access ( stampfile , os . F_OK ) :
2010-12-17 21:46:41 +00:00
logger . debug ( 2 , " Stampfile %s not available " , stampfile )
2008-03-14 11:44:34 +00:00
return False
2014-08-12 08:53:16 +00:00
# If it's a 'nostamp' task, it's not current
2010-08-18 10:30:53 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2008-10-01 13:55:17 +00:00
if ' nostamp ' in taskdep and taskname in taskdep [ ' nostamp ' ] :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , " %s . %s is nostamp \n " , fn , taskname )
2008-03-14 11:44:34 +00:00
return False
2011-01-06 19:48:47 +00:00
if taskname != " do_setscene " and taskname . endswith ( " _setscene " ) :
2010-08-19 10:36:29 +00:00
return True
2012-05-10 08:21:41 +00:00
if cache is None :
cache = { }
2008-03-14 11:44:34 +00:00
iscurrent = True
2010-08-19 10:36:29 +00:00
t1 = get_timestamp ( stampfile )
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ task ] :
2008-03-14 11:44:34 +00:00
if iscurrent :
2010-08-18 10:30:53 +00:00
fn2 = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ dep ] ]
taskname2 = self . rqdata . runq_task [ dep ]
2011-01-18 08:18:18 +00:00
stampfile2 = bb . build . stampfile ( taskname2 , self . rqdata . dataCache , fn2 )
stampfile3 = bb . build . stampfile ( taskname2 + " _setscene " , self . rqdata . dataCache , fn2 )
2010-08-19 10:36:29 +00:00
t2 = get_timestamp ( stampfile2 )
2010-11-06 12:20:33 +00:00
t3 = get_timestamp ( stampfile3 )
2010-08-19 10:36:29 +00:00
if t3 and t3 > t2 :
continue
2008-05-04 23:22:24 +00:00
if fn == fn2 or ( fulldeptree and fn2 not in stampwhitelist ) :
2010-12-16 15:14:13 +00:00
if not t2 :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stampfile %s does not exist ' , stampfile2 )
2008-03-14 11:44:34 +00:00
iscurrent = False
2010-12-16 15:14:13 +00:00
if t1 < t2 :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stampfile %s < %s ' , stampfile , stampfile2 )
2010-12-16 15:14:13 +00:00
iscurrent = False
2012-03-16 10:46:05 +00:00
if recurse and iscurrent :
2012-07-25 18:40:38 +00:00
if dep in cache :
iscurrent = cache [ dep ]
if not iscurrent :
logger . debug ( 2 , ' Stampfile for dependency %s : %s invalid (cached) ' % ( fn2 , taskname2 ) )
else :
iscurrent = self . check_stamp_task ( dep , recurse = True , cache = cache )
cache [ dep ] = iscurrent
if recurse :
cache [ task ] = iscurrent
2008-03-14 11:44:34 +00:00
return iscurrent
2008-03-03 22:01:45 +00:00
2012-07-25 18:43:53 +00:00
def _execute_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
Upon failure , optionally try to recover the build using any alternate providers
( if the abort on failure configuration option isn ' t set)
"""
2013-08-31 22:40:55 +00:00
retval = True
2010-08-18 16:37:15 +00:00
2010-01-20 18:46:02 +00:00
if self . state is runQueuePrepare :
2010-08-24 23:58:23 +00:00
self . rqexe = RunQueueExecuteDummy ( self )
2011-03-08 19:07:24 +00:00
if self . rqdata . prepare ( ) == 0 :
2010-08-24 23:58:23 +00:00
self . state = runQueueComplete
else :
self . state = runQueueSceneInit
2010-08-19 10:36:29 +00:00
2013-09-18 12:15:48 +00:00
# we are ready to run, see if any UI client needs the dependency info
if bb . cooker . CookerFeatures . SEND_DEPENDS_TREE in self . cooker . featureset :
depgraph = self . cooker . buildDependTree ( self , self . rqdata . taskData )
bb . event . fire ( bb . event . DepTreeGenerated ( depgraph ) , self . cooker . data )
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneInit :
2014-03-26 13:46:54 +00:00
dump = self . cooker . configuration . dump_signatures
if dump :
if ' printdiff ' in dump :
invalidtasks = self . print_diffscenetasks ( )
2014-03-26 13:47:29 +00:00
self . dump_signatures ( dump )
2014-03-26 13:46:54 +00:00
if ' printdiff ' in dump :
self . write_diffscenetasks ( invalidtasks )
2013-12-19 09:40:52 +00:00
self . state = runQueueComplete
2010-08-31 13:49:43 +00:00
else :
2013-06-07 17:11:49 +00:00
self . start_worker ( )
2010-08-31 13:49:43 +00:00
self . rqexe = RunQueueExecuteScenequeue ( self )
2010-08-19 10:36:29 +00:00
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
if self . state in [ runQueueSceneRun , runQueueRunning , runQueueCleanUp ] :
self . dm . check ( self )
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneRun :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunInit :
2010-06-10 17:35:31 +00:00
logger . info ( " Executing RunQueue Tasks " )
2010-08-18 16:13:06 +00:00
self . rqexe = RunQueueExecuteTasks ( self )
self . state = runQueueRunning
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunning :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueCleanUp :
2010-08-18 16:13:06 +00:00
self . rqexe . finish ( )
2010-01-20 18:46:02 +00:00
2014-07-21 08:35:53 +00:00
if ( self . state is runQueueComplete or self . state is runQueueFailed ) and self . rqexe :
2013-06-07 17:13:04 +00:00
self . teardown_workers ( )
2012-01-19 14:36:03 +00:00
if self . rqexe . stats . failed :
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and %d failed. " , self . rqexe . stats . completed + self . rqexe . stats . failed , self . rqexe . stats . skipped , self . rqexe . stats . failed )
else :
# Let's avoid the word "failed" if nothing actually did
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and all succeeded. " , self . rqexe . stats . completed , self . rqexe . stats . skipped )
2010-01-20 18:46:02 +00:00
if self . state is runQueueFailed :
2010-08-18 10:30:53 +00:00
if not self . rqdata . taskData . tryaltconfigs :
2010-08-19 21:35:33 +00:00
raise bb . runqueue . TaskFailure ( self . rqexe . failed_fnids )
for fnid in self . rqexe . failed_fnids :
2010-08-18 10:30:53 +00:00
self . rqdata . taskData . fail_fnid ( fnid )
self . rqdata . reset ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueComplete :
# All done
return False
# Loop
2010-08-18 16:37:15 +00:00
return retval
2007-04-01 15:04:49 +00:00
2012-07-25 18:43:53 +00:00
def execute_runqueue ( self ) :
2012-08-02 20:40:36 +00:00
# Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
2012-07-25 18:43:53 +00:00
try :
return self . _execute_runqueue ( )
2012-08-02 20:40:36 +00:00
except bb . runqueue . TaskFailure :
raise
2012-09-23 13:14:24 +00:00
except SystemExit :
raise
2012-07-25 18:43:53 +00:00
except :
logger . error ( " An uncaught exception occured in runqueue, please see the failure below: " )
2013-06-07 17:11:09 +00:00
try :
2013-06-07 17:13:04 +00:00
self . teardown_workers ( )
2013-06-07 17:11:09 +00:00
except :
pass
2012-07-25 18:43:53 +00:00
self . state = runQueueComplete
raise
2010-08-18 16:13:06 +00:00
def finish_runqueue ( self , now = False ) :
2012-02-29 14:15:28 +00:00
if not self . rqexe :
2014-07-21 08:35:53 +00:00
self . state = runQueueComplete
2012-02-29 14:15:28 +00:00
return
2010-08-18 16:13:06 +00:00
if now :
self . rqexe . finish_now ( )
else :
self . rqexe . finish ( )
2007-04-01 15:04:49 +00:00
2014-03-26 13:47:29 +00:00
def dump_signatures ( self , options ) :
2010-08-31 13:49:43 +00:00
done = set ( )
bb . note ( " Reparsing files to collect dependency data " )
for task in range ( len ( self . rqdata . runq_fnid ) ) :
if self . rqdata . runq_fnid [ task ] not in done :
2011-01-10 12:48:49 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
2013-05-30 12:26:58 +00:00
the_data = bb . cache . Cache . loadDataFull ( fn , self . cooker . collection . get_file_appends ( fn ) , self . cooker . data )
2010-08-31 13:49:43 +00:00
done . add ( self . rqdata . runq_fnid [ task ] )
2014-03-26 13:47:29 +00:00
bb . parse . siggen . dump_sigs ( self . rqdata . dataCache , options )
2010-08-31 13:49:43 +00:00
return
2013-12-18 16:21:27 +00:00
def print_diffscenetasks ( self ) :
valid = [ ]
sq_hash = [ ]
sq_hashfn = [ ]
sq_fn = [ ]
sq_taskname = [ ]
sq_task = [ ]
noexec = [ ]
stamppresent = [ ]
valid_new = set ( )
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
noexec . append ( task )
continue
sq_fn . append ( fn )
sq_hashfn . append ( self . rqdata . dataCache . hashfn [ fn ] )
sq_hash . append ( self . rqdata . runq_hash [ task ] )
sq_taskname . append ( taskname )
sq_task . append ( task )
call = self . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d) "
locs = { " sq_fn " : sq_fn , " sq_task " : sq_taskname , " sq_hash " : sq_hash , " sq_hashfn " : sq_hashfn , " d " : self . cooker . data }
valid = bb . utils . better_eval ( call , locs )
for v in valid :
valid_new . add ( sq_task [ v ] )
# Tasks which are both setscene and noexec never care about dependencies
# We therefore find tasks which are setscene and noexec and mark their
# unique dependencies as valid.
for task in noexec :
if task not in self . rqdata . runq_setscene :
continue
for dep in self . rqdata . runq_depends [ task ] :
hasnoexecparents = True
for dep2 in self . rqdata . runq_revdeps [ dep ] :
if dep2 in self . rqdata . runq_setscene and dep2 in noexec :
continue
hasnoexecparents = False
break
if hasnoexecparents :
valid_new . add ( dep )
invalidtasks = set ( )
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
if task not in valid_new and task not in noexec :
invalidtasks . add ( task )
found = set ( )
processed = set ( )
for task in invalidtasks :
toprocess = set ( [ task ] )
while toprocess :
next = set ( )
for t in toprocess :
for dep in self . rqdata . runq_depends [ t ] :
if dep in invalidtasks :
found . add ( task )
if dep not in processed :
processed . add ( dep )
next . add ( dep )
toprocess = next
if task in found :
toprocess = set ( )
tasklist = [ ]
for task in invalidtasks . difference ( found ) :
tasklist . append ( self . rqdata . get_user_idstring ( task ) )
if tasklist :
bb . plain ( " The differences between the current build and any cached tasks start at the following tasks: \n " + " \n " . join ( tasklist ) )
2010-08-18 16:13:06 +00:00
2013-12-19 09:40:52 +00:00
return invalidtasks . difference ( found )
def write_diffscenetasks ( self , invalidtasks ) :
# Define recursion callback
def recursecb ( key , hash1 , hash2 ) :
hashes = [ hash1 , hash2 ]
hashfiles = bb . siggen . find_siginfo ( key , None , hashes , self . cfgData )
recout = [ ]
if len ( hashfiles ) == 2 :
out2 = bb . siggen . compare_sigfiles ( hashfiles [ hash1 ] , hashfiles [ hash2 ] , recursecb )
recout . extend ( list ( ' ' + l for l in out2 ) )
else :
recout . append ( " Unable to find matching sigdata for %s with hashes %s or %s " % ( key , hash1 , hash2 ) )
return recout
for task in invalidtasks :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
pn = self . rqdata . dataCache . pkg_fn [ fn ]
taskname = self . rqdata . runq_task [ task ]
h = self . rqdata . runq_hash [ task ]
matches = bb . siggen . find_siginfo ( pn , taskname , [ ] , self . cfgData )
match = None
for m in matches :
if h in m :
match = m
if match is None :
bb . fatal ( " Can ' t find a task we ' re supposed to have written out? (hash: %s )? " % h )
matches = { k : v for k , v in matches . iteritems ( ) if h not in k }
2014-01-27 21:47:27 +00:00
if matches :
2014-01-20 21:36:36 +00:00
latestmatch = sorted ( matches . keys ( ) , key = lambda f : matches [ f ] ) [ - 1 ]
prevh = __find_md5__ . search ( latestmatch ) . group ( 0 )
output = bb . siggen . compare_sigfiles ( latestmatch , match , recursecb )
bb . plain ( " \n Task %s : %s couldn ' t be used from the cache because: \n We need hash %s , closest matching task was %s \n " % ( pn , taskname , h , prevh ) + ' \n ' . join ( output ) )
2013-12-19 09:40:52 +00:00
2010-08-18 16:13:06 +00:00
class RunQueueExecute :
def __init__ ( self , rq ) :
self . rq = rq
self . cooker = rq . cooker
self . cfgData = rq . cfgData
self . rqdata = rq . rqdata
2012-03-03 10:41:41 +00:00
self . number_tasks = int ( self . cfgData . getVar ( " BB_NUMBER_THREADS " , True ) or 1 )
self . scheduler = self . cfgData . getVar ( " BB_SCHEDULER " , True ) or " speed "
2007-04-01 15:04:49 +00:00
self . runq_buildable = [ ]
self . runq_running = [ ]
self . runq_complete = [ ]
2013-06-07 17:11:09 +00:00
2011-06-28 09:05:19 +00:00
self . build_stamps = { }
2013-11-25 23:12:27 +00:00
self . build_stamps2 = [ ]
2007-04-01 15:04:49 +00:00
self . failed_fnids = [ ]
2012-05-10 08:21:41 +00:00
self . stampcache = { }
2013-06-07 17:11:49 +00:00
rq . workerpipe . setrunqueueexec ( self )
2013-06-07 17:13:04 +00:00
if rq . fakeworkerpipe :
rq . fakeworkerpipe . setrunqueueexec ( self )
2013-06-07 17:11:09 +00:00
def runqueue_process_waitpid ( self , task , status ) :
2012-05-09 23:32:20 +00:00
# self.build_stamps[pid] may not exist when use shared work directory.
2013-06-07 17:11:09 +00:00
if task in self . build_stamps :
2013-11-25 23:12:27 +00:00
self . build_stamps2 . remove ( self . build_stamps [ task ] )
2013-06-07 17:11:09 +00:00
del self . build_stamps [ task ]
2012-05-09 23:32:20 +00:00
if status != 0 :
self . task_fail ( task , status )
2010-12-08 00:08:04 +00:00
else :
self . task_complete ( task )
2011-02-15 23:20:20 +00:00
return True
2010-08-18 16:13:06 +00:00
def finish_now ( self ) :
2013-06-07 17:11:09 +00:00
2014-03-09 17:01:19 +00:00
for worker in [ self . rq . worker , self . rq . fakeworker ] :
if not worker :
continue
try :
worker . stdin . write ( " <finishnow></finishnow> " )
worker . stdin . flush ( )
except IOError :
# worker must have died?
pass
2010-08-18 16:13:06 +00:00
2012-02-25 16:02:29 +00:00
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
return
self . rq . state = runQueueComplete
return
2010-08-18 16:13:06 +00:00
def finish ( self ) :
self . rq . state = runQueueCleanUp
2010-08-19 10:36:29 +00:00
if self . stats . active > 0 :
bb . event . fire ( runQueueExitWait ( self . stats . active ) , self . cfgData )
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2010-08-19 10:36:29 +00:00
return
2010-08-18 16:13:06 +00:00
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
return
self . rq . state = runQueueComplete
return
2012-11-16 15:30:52 +00:00
def check_dependencies ( self , task , taskdeps , setscene = False ) :
if not self . rq . depvalidate :
return False
taskdata = { }
taskdeps . add ( task )
for dep in taskdeps :
if setscene :
depid = self . rqdata . runq_setscene [ dep ]
else :
depid = dep
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ depid ] ]
pn = self . rqdata . dataCache . pkg_fn [ fn ]
taskname = self . rqdata . runq_task [ depid ]
taskdata [ dep ] = [ pn , taskname , fn ]
call = self . rq . depvalidate + " (task, taskdata, notneeded, d) "
2013-05-30 12:26:58 +00:00
locs = { " task " : task , " taskdata " : taskdata , " notneeded " : self . scenequeue_notneeded , " d " : self . cooker . data }
2012-11-16 15:30:52 +00:00
valid = bb . utils . better_eval ( call , locs )
return valid
2010-08-24 23:58:23 +00:00
class RunQueueExecuteDummy ( RunQueueExecute ) :
def __init__ ( self , rq ) :
self . rq = rq
self . stats = RunQueueStats ( 0 )
2011-01-01 23:55:54 +00:00
2010-08-24 23:58:23 +00:00
def finish ( self ) :
self . rq . state = runQueueComplete
2011-01-01 23:55:54 +00:00
return
2010-08-24 23:58:23 +00:00
2010-08-18 16:13:06 +00:00
class RunQueueExecuteTasks ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . stats = RunQueueStats ( len ( self . rqdata . runq_fnid ) )
2012-07-25 19:03:44 +00:00
self . stampcache = { }
2013-11-20 10:46:28 +00:00
initial_covered = self . rq . scenequeue_covered . copy ( )
2007-04-01 15:04:49 +00:00
# Mark initial buildable tasks
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2007-04-01 15:04:49 +00:00
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
2010-08-18 10:30:53 +00:00
if len ( self . rqdata . runq_depends [ task ] ) == 0 :
2007-04-01 15:04:49 +00:00
self . runq_buildable . append ( 1 )
else :
self . runq_buildable . append ( 0 )
2012-07-25 18:56:00 +00:00
if len ( self . rqdata . runq_revdeps [ task ] ) > 0 and self . rqdata . runq_revdeps [ task ] . issubset ( self . rq . scenequeue_covered ) and task not in self . rq . scenequeue_notcovered :
2010-08-19 10:36:29 +00:00
self . rq . scenequeue_covered . add ( task )
found = True
while found :
found = False
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
if task in self . rq . scenequeue_covered :
continue
2011-11-21 14:40:37 +00:00
logger . debug ( 1 , ' Considering %s ( %s ): %s ' % ( task , self . rqdata . get_user_idstring ( task ) , str ( self . rqdata . runq_revdeps [ task ] ) ) )
2012-07-25 18:56:00 +00:00
if len ( self . rqdata . runq_revdeps [ task ] ) > 0 and self . rqdata . runq_revdeps [ task ] . issubset ( self . rq . scenequeue_covered ) and task not in self . rq . scenequeue_notcovered :
2012-11-16 15:30:52 +00:00
found = True
self . rq . scenequeue_covered . add ( task )
2010-08-19 10:36:29 +00:00
2011-11-21 14:34:23 +00:00
logger . debug ( 1 , ' Skip list (pre setsceneverify) %s ' , sorted ( self . rq . scenequeue_covered ) )
# Allow the metadata to elect for setscene tasks to run anyway
2011-03-25 13:22:01 +00:00
covered_remove = set ( )
2011-11-21 14:34:23 +00:00
if self . rq . setsceneverify :
2012-07-25 18:58:06 +00:00
invalidtasks = [ ]
for task in xrange ( len ( self . rqdata . runq_task ) ) :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
continue
if self . rq . check_stamp_task ( task , taskname + " _setscene " , cache = self . stampcache ) :
logger . debug ( 2 , ' Setscene stamp current for task %s ( %s ) ' , task , self . rqdata . get_user_idstring ( task ) )
continue
if self . rq . check_stamp_task ( task , taskname , recurse = True , cache = self . stampcache ) :
logger . debug ( 2 , ' Normal stamp current for task %s ( %s ) ' , task , self . rqdata . get_user_idstring ( task ) )
continue
invalidtasks . append ( task )
call = self . rq . setsceneverify + " (covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks) "
call2 = self . rq . setsceneverify + " (covered, tasknames, fnids, fns, d) "
2013-05-30 12:26:58 +00:00
locs = { " covered " : self . rq . scenequeue_covered , " tasknames " : self . rqdata . runq_task , " fnids " : self . rqdata . runq_fnid , " fns " : self . rqdata . taskData . fn_index , " d " : self . cooker . data , " invalidtasks " : invalidtasks }
2012-07-25 18:58:06 +00:00
# Backwards compatibility with older versions without invalidtasks
try :
covered_remove = bb . utils . better_eval ( call , locs )
except TypeError :
covered_remove = bb . utils . better_eval ( call2 , locs )
2011-03-25 13:22:01 +00:00
2013-11-20 10:46:28 +00:00
def removecoveredtask ( task ) :
2011-03-25 13:22:01 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ] + ' _setscene '
bb . build . del_stamp ( taskname , self . rqdata . dataCache , fn )
self . rq . scenequeue_covered . remove ( task )
2013-11-20 10:46:28 +00:00
toremove = covered_remove
for task in toremove :
logger . debug ( 1 , ' Not skipping task %s due to setsceneverify ' , task )
while toremove :
covered_remove = [ ]
for task in toremove :
removecoveredtask ( task )
for deptask in self . rqdata . runq_depends [ task ] :
if deptask not in self . rq . scenequeue_covered :
continue
if deptask in toremove or deptask in covered_remove or deptask in initial_covered :
continue
logger . debug ( 1 , ' Task %s depends on task %s so not skipping ' % ( task , deptask ) )
covered_remove . append ( deptask )
toremove = covered_remove
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Full skip list %s ' , self . rq . scenequeue_covered )
2010-08-19 10:36:29 +00:00
2010-08-18 10:30:53 +00:00
event . fire ( bb . event . StampUpdate ( self . rqdata . target_pairs , self . rqdata . dataCache . stamp ) , self . cfgData )
2011-01-10 12:48:49 +00:00
schedulers = self . get_schedulers ( )
for scheduler in schedulers :
2010-08-18 10:30:53 +00:00
if self . scheduler == scheduler . name :
self . sched = scheduler ( self , self . rqdata )
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Using runqueue scheduler ' %s ' " , scheduler . name )
2010-08-18 10:30:53 +00:00
break
else :
2011-01-10 12:48:49 +00:00
bb . fatal ( " Invalid scheduler ' %s ' . Available schedulers: %s " %
( self . scheduler , " , " . join ( obj . name for obj in schedulers ) ) )
def get_schedulers ( self ) :
schedulers = set ( obj for obj in globals ( ) . values ( )
if type ( obj ) is type and
issubclass ( obj , RunQueueScheduler ) )
2011-11-25 14:57:53 +00:00
user_schedulers = self . cfgData . getVar ( " BB_SCHEDULERS " , True )
2011-01-10 12:48:49 +00:00
if user_schedulers :
for sched in user_schedulers . split ( ) :
if not " . " in sched :
bb . note ( " Ignoring scheduler ' %s ' from BB_SCHEDULERS: not an import " % sched )
continue
modname , name = sched . rsplit ( " . " , 1 )
try :
module = __import__ ( modname , fromlist = ( name , ) )
2011-06-14 23:44:58 +00:00
except ImportError as exc :
2011-01-10 12:48:49 +00:00
logger . critical ( " Unable to import scheduler ' %s ' from ' %s ' : %s " % ( name , modname , exc ) )
raise SystemExit ( 1 )
else :
schedulers . add ( getattr ( module , name ) )
return schedulers
2010-08-18 16:13:06 +00:00
2013-11-25 23:12:27 +00:00
def setbuildable ( self , task ) :
self . runq_buildable [ task ] = 1
self . sched . newbuilable ( task )
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2007-04-01 15:04:49 +00:00
"""
Mark a task as completed
2010-03-24 23:56:12 +00:00
Look at the reverse dependencies and mark any task with
2007-04-01 15:04:49 +00:00
completed dependencies as buildable
"""
self . runq_complete [ task ] = 1
2010-08-18 10:30:53 +00:00
for revdep in self . rqdata . runq_revdeps [ task ] :
2007-04-01 15:04:49 +00:00
if self . runq_running [ revdep ] == 1 :
continue
if self . runq_buildable [ revdep ] == 1 :
continue
alldeps = 1
2010-08-18 10:30:53 +00:00
for dep in self . rqdata . runq_depends [ revdep ] :
2007-04-01 15:04:49 +00:00
if self . runq_complete [ dep ] != 1 :
alldeps = 0
if alldeps == 1 :
2013-11-25 23:12:27 +00:00
self . setbuildable ( revdep )
2010-08-18 10:30:53 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ revdep ] ]
taskname = self . rqdata . runq_task [ revdep ]
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Marking task %s ( %s , %s ) as buildable " , revdep , fn , taskname )
2007-04-01 15:04:49 +00:00
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
bb . event . fire ( runQueueTaskCompleted ( task , self . stats , self . rq ) , self . cfgData )
self . task_completeoutright ( task )
2010-01-20 18:46:02 +00:00
def task_fail ( self , task , exitcode ) :
"""
Called when a task has failed
Updates the state engine with the failure
"""
self . stats . taskFailed ( )
2010-08-18 10:30:53 +00:00
fnid = self . rqdata . runq_fnid [ task ]
2010-01-20 18:46:02 +00:00
self . failed_fnids . append ( fnid )
2010-12-06 21:58:55 +00:00
bb . event . fire ( runQueueTaskFailed ( task , self . stats , exitcode , self . rq ) , self . cfgData )
2010-08-18 10:30:53 +00:00
if self . rqdata . taskData . abort :
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2010-01-20 18:46:02 +00:00
2013-09-16 12:46:01 +00:00
def task_skip ( self , task , reason ) :
2010-08-18 16:21:40 +00:00
self . runq_running [ task ] = 1
2013-11-25 23:12:27 +00:00
self . setbuildable ( task )
2013-09-16 12:46:01 +00:00
bb . event . fire ( runQueueTaskSkipped ( task , self . stats , self . rq , reason ) , self . cfgData )
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-18 16:21:40 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
2010-08-18 16:13:06 +00:00
def execute ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
"""
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-06-07 17:11:09 +00:00
2010-01-20 18:46:02 +00:00
if self . stats . total == 0 :
2006-11-16 15:02:15 +00:00
# nothing to do
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2008-03-03 22:01:45 +00:00
2011-01-10 12:48:49 +00:00
task = self . sched . next ( )
if task is not None :
2010-09-13 20:00:10 +00:00
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ]
taskname = self . rqdata . runq_task [ task ]
2011-11-23 12:38:44 +00:00
if task in self . rq . scenequeue_covered :
logger . debug ( 2 , " Setscene covered task %s ( %s ) " , task ,
self . rqdata . get_user_idstring ( task ) )
2013-09-16 12:46:01 +00:00
self . task_skip ( task , " covered " )
2011-11-23 12:38:44 +00:00
return True
2012-05-10 08:21:41 +00:00
if self . rq . check_stamp_task ( task , taskname , cache = self . stampcache ) :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , " Stamp current task %s ( %s ) " , task ,
self . rqdata . get_user_idstring ( task ) )
2013-09-16 12:46:01 +00:00
self . task_skip ( task , " existing " )
2010-09-13 20:00:10 +00:00
return True
2010-01-20 18:46:02 +00:00
2010-11-06 14:33:05 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
2011-01-01 23:55:54 +00:00
startevent = runQueueTaskStarted ( task , self . stats , self . rq ,
noexec = True )
2011-01-01 14:36:38 +00:00
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
self . runq_running [ task ] = 1
2010-11-07 20:33:12 +00:00
self . stats . taskActive ( )
2014-04-13 10:45:58 +00:00
if not self . cooker . configuration . dry_run :
bb . build . make_stamp ( taskname , self . rqdata . dataCache , fn )
2010-11-06 14:33:05 +00:00
self . task_complete ( task )
return True
2011-01-01 14:36:38 +00:00
else :
startevent = runQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
2013-11-25 23:18:22 +00:00
taskdepdata = self . build_taskdepdata ( task )
2013-06-07 17:13:04 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2013-11-22 13:31:47 +00:00
if ' fakeroot ' in taskdep and taskname in taskdep [ ' fakeroot ' ] and not self . cooker . configuration . dry_run :
2013-06-07 17:13:04 +00:00
if not self . rq . fakeworker :
self . rq . start_fakeworker ( self )
2013-11-25 23:18:22 +00:00
self . rq . fakeworker . stdin . write ( " <runtask> " + pickle . dumps ( ( fn , task , taskname , False , self . cooker . collection . get_file_appends ( fn ) , taskdepdata ) ) + " </runtask> " )
2013-06-07 17:13:04 +00:00
self . rq . fakeworker . stdin . flush ( )
else :
2013-11-25 23:18:22 +00:00
self . rq . worker . stdin . write ( " <runtask> " + pickle . dumps ( ( fn , task , taskname , False , self . cooker . collection . get_file_appends ( fn ) , taskdepdata ) ) + " </runtask> " )
2013-06-07 17:13:04 +00:00
self . rq . worker . stdin . flush ( )
2010-01-20 18:46:02 +00:00
2013-06-07 17:11:09 +00:00
self . build_stamps [ task ] = bb . build . stampfile ( taskname , self . rqdata . dataCache , fn )
2013-11-25 23:12:27 +00:00
self . build_stamps2 . append ( self . build_stamps [ task ] )
2010-09-13 20:00:10 +00:00
self . runq_running [ task ] = 1
self . stats . taskActive ( )
2011-02-15 23:20:54 +00:00
if self . stats . active < self . number_tasks :
return True
2010-01-20 18:46:02 +00:00
2010-09-13 20:00:10 +00:00
if self . stats . active > 0 :
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-08-31 22:40:55 +00:00
return self . rq . active_fds ( )
2013-06-07 17:11:09 +00:00
2010-09-13 20:00:10 +00:00
if len ( self . failed_fnids ) != 0 :
self . rq . state = runQueueFailed
2010-09-13 15:57:13 +00:00
return True
2006-11-16 15:02:15 +00:00
2010-09-13 20:00:10 +00:00
# Sanity Checks
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-09-13 20:00:10 +00:00
if self . runq_buildable [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never buildable! " , task )
2010-09-13 20:00:10 +00:00
if self . runq_running [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never ran! " , task )
2010-09-13 20:00:10 +00:00
if self . runq_complete [ task ] == 0 :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never completed! " , task )
2010-09-13 20:00:10 +00:00
self . rq . state = runQueueComplete
2013-06-07 17:11:09 +00:00
2010-09-13 20:00:10 +00:00
return True
2013-11-25 23:18:22 +00:00
def build_taskdepdata ( self , task ) :
taskdepdata = { }
next = self . rqdata . runq_depends [ task ]
next . add ( task )
while next :
additional = [ ]
for revdep in next :
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ revdep ] ]
pn = self . rqdata . dataCache . pkg_fn [ fn ]
taskname = self . rqdata . runq_task [ revdep ]
deps = self . rqdata . runq_depends [ revdep ]
taskdepdata [ revdep ] = [ pn , taskname , fn , deps ]
for revdep2 in deps :
if revdep2 not in taskdepdata :
additional . append ( revdep2 )
next = additional
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
2010-08-19 10:36:29 +00:00
class RunQueueExecuteScenequeue ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . scenequeue_covered = set ( )
self . scenequeue_notcovered = set ( )
2012-11-16 15:30:52 +00:00
self . scenequeue_notneeded = set ( )
2010-08-19 10:36:29 +00:00
# If we don't have any setscene functions, skip this step
if len ( self . rqdata . runq_setscene ) == 0 :
rq . scenequeue_covered = set ( )
rq . state = runQueueRunInit
return
self . stats = RunQueueStats ( len ( self . rqdata . runq_setscene ) )
sq_revdeps = [ ]
sq_revdeps_new = [ ]
sq_revdeps_squash = [ ]
2014-02-10 22:50:28 +00:00
self . sq_harddeps = { }
2010-08-19 10:36:29 +00:00
# We need to construct a dependency graph for the setscene functions. Intermediate
# dependencies between the setscene tasks only complicate the code. This code
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
# only containing the setscene functions.
2010-11-19 20:39:22 +00:00
for task in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
self . runq_buildable . append ( 0 )
2012-11-16 15:30:52 +00:00
# First process the chains up to the first setscene task.
endpoints = { }
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-08-19 10:36:29 +00:00
sq_revdeps . append ( copy . copy ( self . rqdata . runq_revdeps [ task ] ) )
sq_revdeps_new . append ( set ( ) )
if ( len ( self . rqdata . runq_revdeps [ task ] ) == 0 ) and task not in self . rqdata . runq_setscene :
2012-01-26 12:53:21 +00:00
endpoints [ task ] = set ( )
2010-08-19 10:36:29 +00:00
2012-11-16 15:30:52 +00:00
# Secondly process the chains between setscene tasks.
2010-08-19 10:36:29 +00:00
for task in self . rqdata . runq_setscene :
for dep in self . rqdata . runq_depends [ task ] :
2012-01-26 12:53:21 +00:00
if dep not in endpoints :
endpoints [ dep ] = set ( )
endpoints [ dep ] . add ( task )
2010-08-19 10:36:29 +00:00
def process_endpoints ( endpoints ) :
newendpoints = { }
for point , task in endpoints . items ( ) :
tasks = set ( )
if task :
2012-01-26 12:53:21 +00:00
tasks | = task
2010-08-19 10:36:29 +00:00
if sq_revdeps_new [ point ] :
tasks | = sq_revdeps_new [ point ]
sq_revdeps_new [ point ] = set ( )
2012-11-16 15:30:52 +00:00
if point in self . rqdata . runq_setscene :
sq_revdeps_new [ point ] = tasks
2010-08-19 10:36:29 +00:00
for dep in self . rqdata . runq_depends [ point ] :
if point in sq_revdeps [ dep ] :
sq_revdeps [ dep ] . remove ( point )
if tasks :
sq_revdeps_new [ dep ] | = tasks
if ( len ( sq_revdeps [ dep ] ) == 0 or len ( sq_revdeps_new [ dep ] ) != 0 ) and dep not in self . rqdata . runq_setscene :
newendpoints [ dep ] = task
if len ( newendpoints ) != 0 :
process_endpoints ( newendpoints )
process_endpoints ( endpoints )
2014-08-12 08:53:16 +00:00
# Build a list of setscene tasks which are "unskippable"
2012-11-16 15:30:52 +00:00
# These are direct endpoints referenced by the build
endpoints2 = { }
sq_revdeps2 = [ ]
sq_revdeps_new2 = [ ]
def process_endpoints2 ( endpoints ) :
newendpoints = { }
for point , task in endpoints . items ( ) :
tasks = set ( [ point ] )
if task :
tasks | = task
if sq_revdeps_new2 [ point ] :
tasks | = sq_revdeps_new2 [ point ]
sq_revdeps_new2 [ point ] = set ( )
if point in self . rqdata . runq_setscene :
sq_revdeps_new2 [ point ] = tasks
for dep in self . rqdata . runq_depends [ point ] :
if point in sq_revdeps2 [ dep ] :
sq_revdeps2 [ dep ] . remove ( point )
if tasks :
sq_revdeps_new2 [ dep ] | = tasks
if ( len ( sq_revdeps2 [ dep ] ) == 0 or len ( sq_revdeps_new2 [ dep ] ) != 0 ) and dep not in self . rqdata . runq_setscene :
newendpoints [ dep ] = tasks
if len ( newendpoints ) != 0 :
process_endpoints2 ( newendpoints )
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
sq_revdeps2 . append ( copy . copy ( self . rqdata . runq_revdeps [ task ] ) )
sq_revdeps_new2 . append ( set ( ) )
if ( len ( self . rqdata . runq_revdeps [ task ] ) == 0 ) and task not in self . rqdata . runq_setscene :
endpoints2 [ task ] = set ( )
process_endpoints2 ( endpoints2 )
self . unskippable = [ ]
for task in self . rqdata . runq_setscene :
if sq_revdeps_new2 [ task ] :
self . unskippable . append ( self . rqdata . runq_setscene . index ( task ) )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . rqdata . runq_fnid ) ) :
2010-08-19 10:36:29 +00:00
if task in self . rqdata . runq_setscene :
deps = set ( )
for dep in sq_revdeps_new [ task ] :
deps . add ( self . rqdata . runq_setscene . index ( dep ) )
sq_revdeps_squash . append ( deps )
elif len ( sq_revdeps_new [ task ] ) != 0 :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Something went badly wrong during scenequeue generation, aborting. Please report this problem. " )
2010-08-19 10:36:29 +00:00
2012-01-26 20:09:08 +00:00
# Resolve setscene inter-task dependencies
# e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
# Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
for task in self . rqdata . runq_setscene :
realid = self . rqdata . taskData . gettask_id ( self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ task ] ] , self . rqdata . runq_task [ task ] + " _setscene " , False )
idepends = self . rqdata . taskData . tasks_idepends [ realid ]
for ( depid , idependtask ) in idepends :
if depid not in self . rqdata . taskData . build_targets :
continue
depdata = self . rqdata . taskData . build_targets [ depid ] [ 0 ]
if depdata is None :
continue
dep = self . rqdata . taskData . fn_index [ depdata ]
taskid = self . rqdata . get_task_id ( self . rqdata . taskData . getfn_id ( dep ) , idependtask . replace ( " _setscene " , " " ) )
if taskid is None :
2014-03-06 22:34:33 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s _setscene depends upon non-existent task %s : %s " % ( self . rqdata . get_user_idstring ( task ) , dep , idependtask ) )
2014-02-10 22:50:28 +00:00
if not self . rqdata . runq_setscene . index ( taskid ) in self . sq_harddeps :
self . sq_harddeps [ self . rqdata . runq_setscene . index ( taskid ) ] = set ( )
self . sq_harddeps [ self . rqdata . runq_setscene . index ( taskid ) ] . add ( self . rqdata . runq_setscene . index ( task ) )
2012-01-26 20:09:08 +00:00
sq_revdeps_squash [ self . rqdata . runq_setscene . index ( task ) ] . add ( self . rqdata . runq_setscene . index ( taskid ) )
# Have to zero this to avoid circular dependencies
sq_revdeps_squash [ self . rqdata . runq_setscene . index ( taskid ) ] = set ( )
2014-03-31 22:07:20 +00:00
for task in self . sq_harddeps :
for dep in self . sq_harddeps [ task ] :
sq_revdeps_squash [ dep ] . add ( task )
2010-11-19 20:39:22 +00:00
#for task in xrange(len(sq_revdeps_squash)):
2014-02-10 22:50:28 +00:00
# realtask = self.rqdata.runq_setscene[task]
# bb.warn("Task %s: %s_setscene is %s " % (task, self.rqdata.get_user_idstring(realtask) , sq_revdeps_squash[task]))
2010-08-19 10:36:29 +00:00
self . sq_deps = [ ]
self . sq_revdeps = sq_revdeps_squash
self . sq_revdeps2 = copy . deepcopy ( self . sq_revdeps )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
self . sq_deps . append ( set ( ) )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
for dep in self . sq_revdeps [ task ] :
self . sq_deps [ dep ] . add ( task )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-08-19 10:36:29 +00:00
if len ( self . sq_revdeps [ task ] ) == 0 :
self . runq_buildable [ task ] = 1
2014-04-01 08:16:30 +00:00
self . outrightfail = [ ]
2010-10-05 21:21:34 +00:00
if self . rq . hashvalidate :
sq_hash = [ ]
sq_hashfn = [ ]
sq_fn = [ ]
2010-12-14 11:21:24 +00:00
sq_taskname = [ ]
2010-10-05 21:21:34 +00:00
sq_task = [ ]
2010-11-28 15:59:16 +00:00
noexec = [ ]
2011-05-27 14:03:51 +00:00
stamppresent = [ ]
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-10-05 21:21:34 +00:00
realtask = self . rqdata . runq_setscene [ task ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
2010-11-28 15:59:16 +00:00
taskname = self . rqdata . runq_task [ realtask ]
taskdep = self . rqdata . dataCache . task_deps [ fn ]
2011-05-27 14:03:51 +00:00
2010-11-28 15:59:16 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
noexec . append ( task )
2010-12-17 10:46:33 +00:00
self . task_skip ( task )
bb . build . make_stamp ( taskname + " _setscene " , self . rqdata . dataCache , fn )
2010-11-28 15:59:16 +00:00
continue
2011-05-27 14:03:51 +00:00
2012-05-10 08:21:41 +00:00
if self . rq . check_stamp_task ( realtask , taskname + " _setscene " , cache = self . stampcache ) :
2011-05-27 14:03:51 +00:00
logger . debug ( 2 , ' Setscene stamp current for task %s ( %s ) ' , task , self . rqdata . get_user_idstring ( realtask ) )
stamppresent . append ( task )
self . task_skip ( task )
continue
2012-07-25 18:48:41 +00:00
if self . rq . check_stamp_task ( realtask , taskname , recurse = True , cache = self . stampcache ) :
logger . debug ( 2 , ' Normal stamp current for task %s ( %s ) ' , task , self . rqdata . get_user_idstring ( realtask ) )
stamppresent . append ( task )
self . task_skip ( task )
continue
2010-10-05 21:21:34 +00:00
sq_fn . append ( fn )
sq_hashfn . append ( self . rqdata . dataCache . hashfn [ fn ] )
sq_hash . append ( self . rqdata . runq_hash [ realtask ] )
2010-12-14 11:21:24 +00:00
sq_taskname . append ( taskname )
sq_task . append ( task )
2010-10-05 21:21:34 +00:00
call = self . rq . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d) "
2013-05-30 12:26:58 +00:00
locs = { " sq_fn " : sq_fn , " sq_task " : sq_taskname , " sq_hash " : sq_hash , " sq_hashfn " : sq_hashfn , " d " : self . cooker . data }
2010-10-05 21:21:34 +00:00
valid = bb . utils . better_eval ( call , locs )
2010-12-14 11:21:24 +00:00
2011-05-27 14:03:51 +00:00
valid_new = stamppresent
2010-12-14 11:21:24 +00:00
for v in valid :
valid_new . append ( sq_task [ v ] )
2010-11-19 20:39:22 +00:00
for task in xrange ( len ( self . sq_revdeps ) ) :
2010-12-17 10:46:33 +00:00
if task not in valid_new and task not in noexec :
2011-11-24 14:14:55 +00:00
realtask = self . rqdata . runq_setscene [ task ]
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' No package found, so skipping setscene task %s ' ,
2011-11-24 14:14:55 +00:00
self . rqdata . get_user_idstring ( realtask ) )
2014-04-01 08:16:30 +00:00
self . outrightfail . append ( task )
2010-10-05 21:21:34 +00:00
2011-01-10 12:48:49 +00:00
logger . info ( ' Executing SetScene Tasks ' )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueSceneRun
2013-06-14 16:03:45 +00:00
def scenequeue_updatecounters ( self , task , fail = False ) :
2010-08-19 10:36:29 +00:00
for dep in self . sq_deps [ task ] :
2014-02-10 22:50:28 +00:00
if fail and task in self . sq_harddeps and dep in self . sq_harddeps [ task ] :
realtask = self . rqdata . runq_setscene [ task ]
realdep = self . rqdata . runq_setscene [ dep ]
logger . debug ( 2 , " %s was unavailable and is a hard dependency of %s so skipping " % ( self . rqdata . get_user_idstring ( realtask ) , self . rqdata . get_user_idstring ( realdep ) ) )
2014-08-27 12:07:24 +00:00
self . scenequeue_updatecounters ( dep , fail )
continue
if task not in self . sq_revdeps2 [ dep ] :
# May already have been removed by the fail case above
2013-06-14 16:03:45 +00:00
continue
2010-08-19 10:36:29 +00:00
self . sq_revdeps2 [ dep ] . remove ( task )
if len ( self . sq_revdeps2 [ dep ] ) == 0 :
self . runq_buildable [ dep ] = 1
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2010-08-19 10:36:29 +00:00
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
index = self . rqdata . runq_setscene [ task ]
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Found task %s which could be accelerated ' ,
self . rqdata . get_user_idstring ( index ) )
2010-08-19 10:36:29 +00:00
self . scenequeue_covered . add ( task )
self . scenequeue_updatecounters ( task )
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
2013-09-09 16:40:56 +00:00
bb . event . fire ( sceneQueueTaskCompleted ( task , self . stats , self . rq ) , self . cfgData )
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
def task_fail ( self , task , result ) :
self . stats . taskFailed ( )
2012-03-01 14:57:35 +00:00
bb . event . fire ( sceneQueueTaskFailed ( task , self . stats , result , self ) , self . cfgData )
2010-08-19 10:36:29 +00:00
self . scenequeue_notcovered . add ( task )
2013-06-14 16:03:45 +00:00
self . scenequeue_updatecounters ( task , True )
2010-08-19 10:36:29 +00:00
def task_failoutright ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
index = self . rqdata . runq_setscene [ task ]
self . scenequeue_notcovered . add ( task )
2013-06-14 16:03:45 +00:00
self . scenequeue_updatecounters ( task , True )
2010-08-19 10:36:29 +00:00
def task_skip ( self , task ) :
self . runq_running [ task ] = 1
self . runq_buildable [ task ] = 1
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
def execute ( self ) :
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-06-07 17:11:09 +00:00
2010-08-19 10:36:29 +00:00
task = None
if self . stats . active < self . number_tasks :
# Find the next setscene to run
2010-11-19 20:39:22 +00:00
for nexttask in xrange ( self . stats . total ) :
2010-08-19 10:36:29 +00:00
if self . runq_buildable [ nexttask ] == 1 and self . runq_running [ nexttask ] != 1 :
2012-11-16 15:30:52 +00:00
if nexttask in self . unskippable :
logger . debug ( 2 , " Setscene task %s is unskippable " % self . rqdata . get_user_idstring ( self . rqdata . runq_setscene [ nexttask ] ) )
if nexttask not in self . unskippable and len ( self . sq_revdeps [ nexttask ] ) > 0 and self . sq_revdeps [ nexttask ] . issubset ( self . scenequeue_covered ) and self . check_dependencies ( nexttask , self . sq_revdeps [ nexttask ] , True ) :
2014-02-13 15:01:25 +00:00
realtask = self . rqdata . runq_setscene [ nexttask ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
2014-02-15 13:49:38 +00:00
foundtarget = False
2014-02-13 15:01:25 +00:00
for target in self . rqdata . target_pairs :
if target [ 0 ] == fn and target [ 1 ] == self . rqdata . runq_task [ realtask ] :
2014-02-15 13:49:38 +00:00
foundtarget = True
2014-02-13 15:01:25 +00:00
break
2014-02-15 13:49:38 +00:00
if not foundtarget :
2014-02-13 15:01:25 +00:00
logger . debug ( 2 , " Skipping setscene for task %s " % self . rqdata . get_user_idstring ( self . rqdata . runq_setscene [ nexttask ] ) )
self . task_skip ( nexttask )
self . scenequeue_notneeded . add ( nexttask )
return True
2014-04-01 08:16:30 +00:00
if nexttask in self . outrightfail :
self . task_failoutright ( nexttask )
return True
2010-08-19 10:36:29 +00:00
task = nexttask
break
if task is not None :
realtask = self . rqdata . runq_setscene [ task ]
fn = self . rqdata . taskData . fn_index [ self . rqdata . runq_fnid [ realtask ] ]
taskname = self . rqdata . runq_task [ realtask ] + " _setscene "
2012-05-10 08:21:41 +00:00
if self . rq . check_stamp_task ( realtask , self . rqdata . runq_task [ realtask ] , recurse = True , cache = self . stampcache ) :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stamp for underlying task %s ( %s ) is current, so skipping setscene variant ' ,
2011-11-21 14:39:29 +00:00
task , self . rqdata . get_user_idstring ( realtask ) )
2010-08-19 10:36:29 +00:00
self . task_failoutright ( task )
return True
if self . cooker . configuration . force :
2010-10-07 14:45:02 +00:00
for target in self . rqdata . target_pairs :
2010-08-19 10:36:29 +00:00
if target [ 0 ] == fn and target [ 1 ] == self . rqdata . runq_task [ realtask ] :
self . task_failoutright ( task )
return True
2012-05-10 08:21:41 +00:00
if self . rq . check_stamp_task ( realtask , taskname , cache = self . stampcache ) :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Setscene stamp current task %s ( %s ), so skip it and its dependencies ' ,
task , self . rqdata . get_user_idstring ( realtask ) )
2010-08-19 10:36:29 +00:00
self . task_skip ( task )
return True
2012-02-24 01:31:30 +00:00
startevent = sceneQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2013-06-07 17:13:04 +00:00
taskdep = self . rqdata . dataCache . task_deps [ fn ]
if ' fakeroot ' in taskdep and taskname in taskdep [ ' fakeroot ' ] :
if not self . rq . fakeworker :
self . rq . start_fakeworker ( self )
2013-11-25 23:18:22 +00:00
self . rq . fakeworker . stdin . write ( " <runtask> " + pickle . dumps ( ( fn , realtask , taskname , True , self . cooker . collection . get_file_appends ( fn ) , None ) ) + " </runtask> " )
2013-06-07 17:13:04 +00:00
self . rq . fakeworker . stdin . flush ( )
else :
2013-11-25 23:18:22 +00:00
self . rq . worker . stdin . write ( " <runtask> " + pickle . dumps ( ( fn , realtask , taskname , True , self . cooker . collection . get_file_appends ( fn ) , None ) ) + " </runtask> " )
2013-06-07 17:13:04 +00:00
self . rq . worker . stdin . flush ( )
2010-08-19 10:36:29 +00:00
self . runq_running [ task ] = 1
self . stats . taskActive ( )
if self . stats . active < self . number_tasks :
return True
if self . stats . active > 0 :
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-09-02 08:15:19 +00:00
return self . rq . active_fds ( )
2010-08-19 10:36:29 +00:00
2014-02-10 22:50:28 +00:00
#for task in xrange(self.stats.total):
# if self.runq_running[task] != 1:
# buildable = self.runq_buildable[task]
# revdeps = self.sq_revdeps[task]
# bb.warn("Found we didn't run %s %s %s %s" % (task, buildable, str(revdeps), self.rqdata.get_user_idstring(self.rqdata.runq_setscene[task])))
2010-08-19 10:36:29 +00:00
# Convert scenequeue_covered task numbers into full taskgraph ids
oldcovered = self . scenequeue_covered
self . rq . scenequeue_covered = set ( )
for task in oldcovered :
self . rq . scenequeue_covered . add ( self . rqdata . runq_setscene [ task ] )
2012-07-25 18:56:00 +00:00
self . rq . scenequeue_notcovered = set ( )
for task in self . scenequeue_notcovered :
self . rq . scenequeue_notcovered . add ( self . rqdata . runq_setscene [ task ] )
2010-08-19 10:36:29 +00:00
2011-11-21 14:40:21 +00:00
logger . debug ( 1 , ' We can skip tasks %s ' , sorted ( self . rq . scenequeue_covered ) )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueRunInit
2014-08-02 08:51:00 +00:00
completeevent = sceneQueueComplete ( self . stats , self . rq )
bb . event . fire ( completeevent , self . cfgData )
2010-08-19 10:36:29 +00:00
return True
2010-07-06 16:41:11 +00:00
2013-06-07 17:11:09 +00:00
def runqueue_process_waitpid ( self , task , status ) :
task = self . rq . rqdata . runq_setscene . index ( task )
RunQueueExecute . runqueue_process_waitpid ( self , task , status )
2011-01-10 13:13:08 +00:00
2010-01-20 18:46:02 +00:00
class TaskFailure ( Exception ) :
"""
Exception raised when a task in a runqueue fails
"""
2010-03-24 23:56:12 +00:00
def __init__ ( self , x ) :
2010-01-20 18:46:02 +00:00
self . args = x
class runQueueExitWait ( bb . event . Event ) :
"""
Event when waiting for task processes to exit
"""
def __init__ ( self , remain ) :
self . remain = remain
self . message = " Waiting for %s active tasks to finish " % remain
bb . event . Event . __init__ ( self )
class runQueueEvent ( bb . event . Event ) :
"""
Base runQueue event class
"""
def __init__ ( self , task , stats , rq ) :
self . taskid = task
2010-08-18 10:30:53 +00:00
self . taskstring = rq . rqdata . get_user_idstring ( task )
2013-09-18 12:15:49 +00:00
self . taskname = rq . rqdata . get_task_name ( task )
self . taskfile = rq . rqdata . get_task_file ( task )
2013-09-18 12:15:53 +00:00
self . taskhash = rq . rqdata . get_task_hash ( task )
2011-02-16 22:18:06 +00:00
self . stats = stats . copy ( )
2010-01-20 18:46:02 +00:00
bb . event . Event . __init__ ( self )
2012-02-27 18:54:11 +00:00
class sceneQueueEvent ( runQueueEvent ) :
"""
Base sceneQueue event class
"""
def __init__ ( self , task , stats , rq , noexec = False ) :
runQueueEvent . __init__ ( self , task , stats , rq )
realtask = rq . rqdata . runq_setscene [ task ]
self . taskstring = rq . rqdata . get_user_idstring ( realtask , " _setscene " )
2013-09-18 12:15:49 +00:00
self . taskname = rq . rqdata . get_task_name ( realtask ) + " _setscene "
self . taskfile = rq . rqdata . get_task_file ( realtask )
2014-03-25 14:13:50 +00:00
self . taskhash = rq . rqdata . get_task_hash ( realtask )
2012-02-27 18:54:11 +00:00
2010-01-20 18:46:02 +00:00
class runQueueTaskStarted ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task was started
2010-01-20 18:46:02 +00:00
"""
2011-01-01 14:36:38 +00:00
def __init__ ( self , task , stats , rq , noexec = False ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2011-01-01 14:36:38 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
2012-02-27 18:54:11 +00:00
class sceneQueueTaskStarted ( sceneQueueEvent ) :
2012-02-24 01:31:30 +00:00
"""
2014-08-13 09:18:50 +00:00
Event notifying a setscene task was started
2012-02-24 01:31:30 +00:00
"""
def __init__ ( self , task , stats , rq , noexec = False ) :
2012-02-27 18:54:11 +00:00
sceneQueueEvent . __init__ ( self , task , stats , rq )
2012-02-24 01:31:30 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
class runQueueTaskFailed ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task failed
2010-01-20 18:46:02 +00:00
"""
2010-12-06 21:58:55 +00:00
def __init__ ( self , task , stats , exitcode , rq ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2010-12-06 21:58:55 +00:00
self . exitcode = exitcode
2010-01-20 18:46:02 +00:00
2012-02-27 18:54:11 +00:00
class sceneQueueTaskFailed ( sceneQueueEvent ) :
2011-02-28 14:28:25 +00:00
"""
2014-08-13 09:18:50 +00:00
Event notifying a setscene task failed
2011-02-28 14:28:25 +00:00
"""
def __init__ ( self , task , stats , exitcode , rq ) :
2012-02-27 18:54:11 +00:00
sceneQueueEvent . __init__ ( self , task , stats , rq )
2012-02-24 01:31:30 +00:00
self . exitcode = exitcode
2011-02-28 14:28:25 +00:00
2014-08-02 08:51:00 +00:00
class sceneQueueComplete ( sceneQueueEvent ) :
"""
Event when all the sceneQueue tasks are complete
"""
def __init__ ( self , stats , rq ) :
self . stats = stats . copy ( )
bb . event . Event . __init__ ( self )
2010-01-20 18:46:02 +00:00
class runQueueTaskCompleted ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task completed
2010-01-20 18:46:02 +00:00
"""
2008-05-13 07:53:18 +00:00
2013-09-09 16:40:56 +00:00
class sceneQueueTaskCompleted ( sceneQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a setscene task completed
2013-09-09 16:40:56 +00:00
"""
2013-09-16 12:46:01 +00:00
class runQueueTaskSkipped ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task was skipped
2013-09-16 12:46:01 +00:00
"""
def __init__ ( self , task , stats , rq , reason ) :
runQueueEvent . __init__ ( self , task , stats , rq )
self . reason = reason
2010-01-20 18:46:02 +00:00
class runQueuePipe ( ) :
"""
Abstraction for a pipe between a worker thread and the server
"""
2014-03-09 17:00:17 +00:00
def __init__ ( self , pipein , pipeout , d , rq , rqexec ) :
2011-01-10 12:48:49 +00:00
self . input = pipein
2013-06-07 17:11:09 +00:00
if pipeout :
pipeout . close ( )
2012-06-22 11:53:16 +00:00
bb . utils . nonblockingfd ( self . input )
2010-01-20 18:46:02 +00:00
self . queue = " "
self . d = d
2013-06-07 17:11:09 +00:00
self . rq = rq
2014-03-09 17:00:17 +00:00
self . rqexec = rqexec
2013-06-07 17:11:09 +00:00
2014-03-09 17:00:17 +00:00
def setrunqueueexec ( self , rqexec ) :
self . rqexec = rqexec
2010-01-20 18:46:02 +00:00
def read ( self ) :
2014-03-19 17:44:39 +00:00
for w in [ self . rq . worker , self . rq . fakeworker ] :
if not w :
continue
w . poll ( )
if w . returncode is not None and not self . rq . teardown :
name = None
if self . rq . worker and w . pid == self . rq . worker . pid :
name = " Worker "
elif self . rq . fakeworker and w . pid == self . rq . fakeworker . pid :
name = " Fakeroot "
bb . error ( " %s process ( %s ) exited unexpectedly ( %s ), shutting down... " % ( name , w . pid , str ( w . returncode ) ) )
self . rq . finish_runqueue ( True )
2014-03-19 12:53:05 +00:00
2010-01-20 18:46:02 +00:00
start = len ( self . queue )
2010-01-21 23:46:20 +00:00
try :
2011-01-28 10:21:41 +00:00
self . queue = self . queue + self . input . read ( 102400 )
2013-06-07 17:11:09 +00:00
except ( OSError , IOError ) as e :
if e . errno != errno . EAGAIN :
raise
2010-01-20 18:46:02 +00:00
end = len ( self . queue )
2013-06-07 17:11:09 +00:00
found = True
while found and len ( self . queue ) :
found = False
2010-01-20 18:46:02 +00:00
index = self . queue . find ( " </event> " )
2013-06-07 17:11:09 +00:00
while index != - 1 and self . queue . startswith ( " <event> " ) :
2014-02-23 10:02:18 +00:00
try :
event = pickle . loads ( self . queue [ 7 : index ] )
except ValueError as e :
bb . msg . fatal ( " RunQueue " , " failed load pickle ' %s ' : ' %s ' " % ( e , self . queue [ 7 : index ] ) )
2013-06-07 17:11:09 +00:00
bb . event . fire_from_worker ( event , self . d )
found = True
self . queue = self . queue [ index + 8 : ]
index = self . queue . find ( " </event> " )
index = self . queue . find ( " </exitcode> " )
while index != - 1 and self . queue . startswith ( " <exitcode> " ) :
2014-02-23 10:02:18 +00:00
try :
task , status = pickle . loads ( self . queue [ 10 : index ] )
except ValueError as e :
bb . msg . fatal ( " RunQueue " , " failed load pickle ' %s ' : ' %s ' " % ( e , self . queue [ 10 : index ] ) )
2014-03-09 17:00:17 +00:00
self . rqexec . runqueue_process_waitpid ( task , status )
2013-06-07 17:11:09 +00:00
found = True
self . queue = self . queue [ index + 11 : ]
index = self . queue . find ( " </exitcode> " )
2010-01-20 18:46:02 +00:00
return ( end > start )
def close ( self ) :
while self . read ( ) :
continue
if len ( self . queue ) > 0 :
2010-08-20 11:25:19 +00:00
print ( " Warning, worker left partial message: %s " % self . queue )
2011-01-10 12:48:49 +00:00
self . input . close ( )