2006-11-29 22:52:37 +00:00
#!/usr/bin/env python
2006-11-16 15:02:15 +00:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake ' RunQueue ' implementation
Handles preparation and execution of a queue of tasks
"""
2007-09-02 14:10:08 +00:00
# Copyright (C) 2006-2007 Richard Purdie
2007-01-08 23:53:01 +00:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
2011-01-10 12:48:49 +00:00
import copy
2010-06-10 17:35:31 +00:00
import os
import sys
2007-04-01 15:04:49 +00:00
import signal
2008-03-03 22:01:45 +00:00
import stat
2010-01-21 23:46:20 +00:00
import fcntl
2013-06-07 17:11:09 +00:00
import errno
2010-06-10 15:05:52 +00:00
import logging
2013-12-19 09:40:52 +00:00
import re
2010-06-10 17:35:31 +00:00
import bb
2010-06-10 15:05:52 +00:00
from bb import msg , data , event
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
from bb import monitordisk
2013-06-07 17:11:09 +00:00
import subprocess
2016-05-12 07:30:35 +00:00
import pickle
2010-06-10 15:05:52 +00:00
bblogger = logging . getLogger ( " BitBake " )
2010-06-10 17:35:31 +00:00
logger = logging . getLogger ( " BitBake.RunQueue " )
2006-11-16 15:02:15 +00:00
2013-12-19 09:40:52 +00:00
__find_md5__ = re . compile ( r ' (?i)(?<![a-z0-9])[a-f0-9] {32} (?![a-z0-9]) ' )
2016-06-12 22:55:48 +00:00
def fn_from_tid ( tid ) :
return tid . rsplit ( " : " , 1 ) [ 0 ]
def taskname_from_tid ( tid ) :
return tid . rsplit ( " : " , 1 ) [ 1 ]
2016-08-16 16:47:06 +00:00
def split_tid ( tid ) :
if tid . startswith ( ' multiconfig: ' ) :
elems = tid . split ( ' : ' )
mc = elems [ 1 ]
fn = " : " . join ( elems [ 2 : - 1 ] )
taskname = elems [ - 1 ]
else :
tid = tid . rsplit ( " : " , 1 )
mc = " "
fn = tid [ 0 ]
taskname = tid [ 1 ]
return ( mc , fn , taskname )
def build_tid ( mc , fn , taskname ) :
if mc :
return " multiconfig: " + mc + " : " + fn + " : " + taskname
return fn + " : " + taskname
def taskfn_fromtid ( tid ) :
( mc , fn , taskname ) = split_tid ( tid )
if mc :
return " multiconfig: " + mc + " : " + fn
return fn
2007-04-01 15:04:49 +00:00
class RunQueueStats :
"""
Holds statistics on the tasks handled by the associated runQueue
"""
2010-01-20 18:46:02 +00:00
def __init__ ( self , total ) :
2007-04-01 15:04:49 +00:00
self . completed = 0
self . skipped = 0
self . failed = 0
2010-01-20 18:46:02 +00:00
self . active = 0
self . total = total
2007-04-01 15:04:49 +00:00
2011-02-16 22:18:06 +00:00
def copy ( self ) :
obj = self . __class__ ( self . total )
2011-02-16 22:24:24 +00:00
obj . __dict__ . update ( self . __dict__ )
2011-02-16 22:18:06 +00:00
return obj
2007-04-01 15:04:49 +00:00
def taskFailed ( self ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - 1
2007-04-01 15:04:49 +00:00
self . failed = self . failed + 1
2008-03-03 22:01:45 +00:00
def taskCompleted ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active - number
2008-03-03 22:01:45 +00:00
self . completed = self . completed + number
2007-04-01 15:04:49 +00:00
2008-03-03 22:01:45 +00:00
def taskSkipped ( self , number = 1 ) :
2010-01-20 18:46:02 +00:00
self . active = self . active + number
2008-03-03 22:01:45 +00:00
self . skipped = self . skipped + number
2007-04-01 15:04:49 +00:00
2010-01-20 18:46:02 +00:00
def taskActive ( self ) :
self . active = self . active + 1
2010-03-24 23:56:12 +00:00
# These values indicate the next step due to be run in the
2010-01-20 18:46:02 +00:00
# runQueue state machine
runQueuePrepare = 2
2010-08-19 10:36:29 +00:00
runQueueSceneInit = 3
runQueueSceneRun = 4
runQueueRunInit = 5
runQueueRunning = 6
runQueueFailed = 7
runQueueCleanUp = 8
runQueueComplete = 9
2010-01-20 18:46:02 +00:00
2010-07-22 17:54:58 +00:00
class RunQueueScheduler ( object ) :
2008-01-06 16:51:51 +00:00
"""
Control the order tasks are scheduled in .
"""
2010-07-22 17:54:58 +00:00
name = " basic "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
2010-03-24 23:56:12 +00:00
The default scheduler just returns the first buildable task ( the
2014-08-12 08:53:16 +00:00
priority map is sorted by task number )
2008-01-06 16:51:51 +00:00
"""
self . rq = runqueue
2010-08-18 10:30:53 +00:00
self . rqdata = rqdata
2016-06-12 22:55:48 +00:00
self . numTasks = len ( self . rqdata . runtaskentries )
2008-01-06 16:51:51 +00:00
2016-06-12 22:55:48 +00:00
self . prio_map = [ self . rqdata . runtaskentries . keys ( ) ]
2013-11-25 23:12:27 +00:00
self . buildable = [ ]
self . stamps = { }
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
2016-08-31 10:37:53 +00:00
self . stamps [ tid ] = bb . build . stampfile ( taskname , self . rqdata . dataCaches [ mc ] , taskfn , noextra = True )
2016-06-12 22:55:48 +00:00
if tid in self . rq . runq_buildable :
self . buildable . append ( tid )
2013-11-25 23:12:27 +00:00
self . rev_prio_map = None
2008-01-06 16:51:51 +00:00
2011-01-10 12:48:49 +00:00
def next_buildable_task ( self ) :
2008-01-06 16:51:51 +00:00
"""
Return the id of the first task we find that is buildable
"""
2016-06-12 22:55:08 +00:00
self . buildable = [ x for x in self . buildable if x not in self . rq . runq_running ]
2013-11-25 23:12:27 +00:00
if not self . buildable :
return None
if len ( self . buildable ) == 1 :
2016-06-12 22:55:48 +00:00
tid = self . buildable [ 0 ]
stamp = self . stamps [ tid ]
2016-05-12 07:30:35 +00:00
if stamp not in self . rq . build_stamps . values ( ) :
2016-06-12 22:55:48 +00:00
return tid
2013-11-25 23:12:27 +00:00
if not self . rev_prio_map :
2016-06-12 22:55:48 +00:00
self . rev_prio_map = { }
for tid in self . rqdata . runtaskentries :
self . rev_prio_map [ tid ] = self . prio_map . index ( tid )
2013-11-25 23:12:27 +00:00
best = None
bestprio = None
2016-06-12 22:55:48 +00:00
for tid in self . buildable :
prio = self . rev_prio_map [ tid ]
2014-04-13 10:44:25 +00:00
if bestprio is None or bestprio > prio :
2016-06-12 22:55:48 +00:00
stamp = self . stamps [ tid ]
2016-05-12 07:30:35 +00:00
if stamp in self . rq . build_stamps . values ( ) :
2011-06-28 09:05:19 +00:00
continue
2013-11-25 23:12:27 +00:00
bestprio = prio
2016-06-12 22:55:48 +00:00
best = tid
2013-11-25 23:12:27 +00:00
return best
2010-07-23 21:32:14 +00:00
def next ( self ) :
"""
Return the id of the task we should build next
"""
2010-07-23 20:42:54 +00:00
if self . rq . stats . active < self . rq . number_tasks :
2011-01-10 12:48:49 +00:00
return self . next_buildable_task ( )
2008-01-06 16:51:51 +00:00
2013-11-25 23:12:27 +00:00
def newbuilable ( self , task ) :
self . buildable . append ( task )
2008-01-06 16:51:51 +00:00
class RunQueueSchedulerSpeed ( RunQueueScheduler ) :
"""
A scheduler optimised for speed . The priority map is sorted by task weight ,
heavier weighted tasks ( tasks needed by the most other tasks ) are run first .
"""
2010-07-22 17:54:58 +00:00
name = " speed "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
2008-01-06 16:51:51 +00:00
"""
The priority map is sorted by task weight .
"""
2013-11-25 23:12:27 +00:00
RunQueueScheduler . __init__ ( self , runqueue , rqdata )
2008-01-06 16:51:51 +00:00
2016-06-12 22:55:48 +00:00
weights = { }
for tid in self . rqdata . runtaskentries :
weight = self . rqdata . runtaskentries [ tid ] . weight
if not weight in weights :
weights [ weight ] = [ ]
weights [ weight ] . append ( tid )
2008-01-06 16:51:51 +00:00
2016-06-12 22:55:48 +00:00
self . prio_map = [ ]
for weight in sorted ( weights ) :
for w in weights [ weight ] :
self . prio_map . append ( w )
2008-01-06 16:51:51 +00:00
self . prio_map . reverse ( )
class RunQueueSchedulerCompletion ( RunQueueSchedulerSpeed ) :
"""
2010-03-24 23:56:12 +00:00
A scheduler optimised to complete . bb files are quickly as possible . The
priority map is sorted by task weight , but then reordered so once a given
2014-08-12 08:53:16 +00:00
. bb file starts to build , it ' s completed as quickly as possible. This works
2010-03-24 23:56:12 +00:00
well where disk space is at a premium and classes like OE ' s rm_work are in
2008-01-06 16:51:51 +00:00
force .
"""
2010-07-22 17:54:58 +00:00
name = " completion "
2010-08-18 10:30:53 +00:00
def __init__ ( self , runqueue , rqdata ) :
RunQueueSchedulerSpeed . __init__ ( self , runqueue , rqdata )
2008-01-06 16:51:51 +00:00
2016-06-12 22:55:48 +00:00
#FIXME - whilst this groups all fns together it does not reorder the
#fn groups optimally.
2010-03-24 23:56:12 +00:00
2011-01-10 12:48:49 +00:00
basemap = copy . deepcopy ( self . prio_map )
2008-01-06 16:51:51 +00:00
self . prio_map = [ ]
while ( len ( basemap ) > 0 ) :
entry = basemap . pop ( 0 )
self . prio_map . append ( entry )
2016-06-12 22:55:48 +00:00
fn = fn_from_tid ( entry )
2008-01-06 16:51:51 +00:00
todel = [ ]
for entry in basemap :
2016-06-12 22:55:48 +00:00
entry_fn = fn_from_tid ( entry )
if entry_fn == fn :
2008-01-06 16:51:51 +00:00
todel . append ( basemap . index ( entry ) )
self . prio_map . append ( entry )
todel . reverse ( )
for idx in todel :
del basemap [ idx ]
2016-06-12 22:55:48 +00:00
class RunTaskEntry ( object ) :
def __init__ ( self ) :
self . depends = set ( )
self . revdeps = set ( )
self . hash = None
self . task = None
self . weight = 1
2010-08-18 10:30:53 +00:00
class RunQueueData :
2006-11-16 15:02:15 +00:00
"""
BitBake Run Queue implementation
"""
2016-08-16 16:47:06 +00:00
def __init__ ( self , rq , cooker , cfgData , dataCaches , taskData , targets ) :
2007-04-01 15:04:49 +00:00
self . cooker = cooker
2016-08-16 16:47:06 +00:00
self . dataCaches = dataCaches
2007-04-01 15:04:49 +00:00
self . taskData = taskData
self . targets = targets
2010-08-18 10:30:53 +00:00
self . rq = rq
2012-02-13 11:41:31 +00:00
self . warn_multi_bb = False
2007-04-01 15:04:49 +00:00
2012-03-03 10:41:41 +00:00
self . stampwhitelist = cfgData . getVar ( " BB_STAMP_WHITELIST " , True ) or " "
self . multi_provider_whitelist = ( cfgData . getVar ( " MULTI_PROVIDER_WHITELIST " , True ) or " " ) . split ( )
2016-06-23 10:59:11 +00:00
self . setscenewhitelist = get_setscene_enforce_whitelist ( cfgData )
2016-06-23 10:59:12 +00:00
self . init_progress_reporter = bb . progress . DummyMultiStageProcessProgressReporter ( )
2010-08-18 10:30:53 +00:00
self . reset ( )
2006-11-16 15:02:15 +00:00
2010-08-18 10:30:53 +00:00
def reset ( self ) :
2016-06-12 22:55:48 +00:00
self . runtaskentries = { }
2010-01-20 18:46:02 +00:00
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
def runq_depends_names ( self , ids ) :
import re
ret = [ ]
2016-06-12 22:55:48 +00:00
for id in ids :
nam = os . path . basename ( id )
bitbake: runqueue.py: improve printing dependent tasks
Print names instead of Task-IDs (and not mentioning they're task ids).
Previously we printed e.g.:
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (depends: Set([88, 282, 92, 87]))
Now we say
Dependency loop #1 found:
Task 89 (/there/src/oe/openembedded/recipes/busybox/busybox-native_1.15.2.bb, do_configure) (dependent Task-IDs ['busybox-native, do_patch', 'update-rc.d, do_populate_staging', 'busybox-native, do_populate_staging', 'shasum-native.bb, do_populate_staging', 'busybox-native, do_unpack'])
(Bitbake rev: 00eaf76fdc32eb515995b47dfa69eb90ca904b37)
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
2010-02-08 17:50:34 +00:00
nam = re . sub ( " _[^,]*, " , " , " , nam )
ret . extend ( [ nam ] )
return ret
2016-06-12 22:55:48 +00:00
def get_task_hash ( self , tid ) :
return self . runtaskentries [ tid ] . hash
2013-09-18 12:15:49 +00:00
2016-06-12 22:55:48 +00:00
def get_user_idstring ( self , tid , task_name_suffix = " " ) :
return tid + task_name_suffix
2006-11-16 15:02:15 +00:00
2016-04-10 10:09:21 +00:00
def get_short_user_idstring ( self , task , task_name_suffix = " " ) :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( task )
pn = self . dataCaches [ mc ] . pkg_fn [ fn ]
2016-06-12 22:55:48 +00:00
taskname = taskname_from_tid ( task ) + task_name_suffix
2016-04-10 10:09:21 +00:00
return " %s : %s " % ( pn , taskname )
2008-01-06 16:51:51 +00:00
def circular_depchains_handler ( self , tasks ) :
"""
Some tasks aren ' t buildable, likely due to circular dependency issues.
Identify the circular dependencies and print them in a user readable format .
"""
from copy import deepcopy
valid_chains = [ ]
explored_deps = { }
msgs = [ ]
def chain_reorder ( chain ) :
"""
Reorder a dependency chain so the lowest task id is first
"""
lowest = 0
new_chain = [ ]
2016-05-12 07:30:35 +00:00
for entry in range ( len ( chain ) ) :
2008-01-06 16:51:51 +00:00
if chain [ entry ] < chain [ lowest ] :
lowest = entry
new_chain . extend ( chain [ lowest : ] )
new_chain . extend ( chain [ : lowest ] )
return new_chain
def chain_compare_equal ( chain1 , chain2 ) :
"""
Compare two dependency chains and see if they ' re the same
"""
if len ( chain1 ) != len ( chain2 ) :
return False
2016-05-12 07:30:35 +00:00
for index in range ( len ( chain1 ) ) :
2008-01-06 16:51:51 +00:00
if chain1 [ index ] != chain2 [ index ] :
return False
return True
2010-03-24 23:56:12 +00:00
2008-01-06 16:51:51 +00:00
def chain_array_contains ( chain , chain_array ) :
"""
Return True if chain_array contains chain
"""
for ch in chain_array :
if chain_compare_equal ( ch , chain ) :
return True
return False
2016-06-12 22:55:48 +00:00
def find_chains ( tid , prev_chain ) :
prev_chain . append ( tid )
2008-01-06 16:51:51 +00:00
total_deps = [ ]
2016-06-12 22:55:48 +00:00
total_deps . extend ( self . runtaskentries [ tid ] . revdeps )
for revdep in self . runtaskentries [ tid ] . revdeps :
2008-01-06 16:51:51 +00:00
if revdep in prev_chain :
idx = prev_chain . index ( revdep )
# To prevent duplicates, reorder the chain to start with the lowest taskid
# and search through an array of those we've already printed
chain = prev_chain [ idx : ]
new_chain = chain_reorder ( chain )
if not chain_array_contains ( new_chain , valid_chains ) :
valid_chains . append ( new_chain )
msgs . append ( " Dependency loop # %d found: \n " % len ( valid_chains ) )
for dep in new_chain :
2016-06-12 22:55:48 +00:00
msgs . append ( " Task %s (dependent Tasks %s ) \n " % ( dep , self . runq_depends_names ( self . runtaskentries [ dep ] . depends ) ) )
2008-01-06 16:51:51 +00:00
msgs . append ( " \n " )
if len ( valid_chains ) > 10 :
msgs . append ( " Aborted dependency loops search after 10 matches. \n " )
return msgs
continue
scan = False
if revdep not in explored_deps :
scan = True
elif revdep in explored_deps [ revdep ] :
scan = True
else :
for dep in prev_chain :
if dep in explored_deps [ revdep ] :
scan = True
if scan :
2011-01-10 12:48:49 +00:00
find_chains ( revdep , copy . deepcopy ( prev_chain ) )
2008-01-06 16:51:51 +00:00
for dep in explored_deps [ revdep ] :
if dep not in total_deps :
total_deps . append ( dep )
2016-06-16 20:22:03 +00:00
explored_deps [ tid ] = total_deps
2008-01-06 16:51:51 +00:00
for task in tasks :
find_chains ( task , [ ] )
return msgs
def calculate_task_weights ( self , endpoints ) :
"""
2010-03-24 23:56:12 +00:00
Calculate a number representing the " weight " of each task . Heavier weighted tasks
2008-01-06 16:51:51 +00:00
have more dependencies and hence should be executed sooner for maximum speed .
2010-12-22 15:41:32 +00:00
This function also sanity checks the task list finding tasks that are not
2008-01-06 16:51:51 +00:00
possible to execute due to circular dependencies .
"""
2016-06-12 22:55:48 +00:00
numTasks = len ( self . runtaskentries )
weight = { }
deps_left = { }
task_done = { }
2008-01-06 16:51:51 +00:00
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
task_done [ tid ] = False
weight [ tid ] = 1
deps_left [ tid ] = len ( self . runtaskentries [ tid ] . revdeps )
2008-01-06 16:51:51 +00:00
2016-06-12 22:55:48 +00:00
for tid in endpoints :
weight [ tid ] = 10
task_done [ tid ] = True
2008-01-06 16:51:51 +00:00
2010-04-12 00:03:55 +00:00
while True :
2008-01-06 16:51:51 +00:00
next_points = [ ]
2016-06-12 22:55:48 +00:00
for tid in endpoints :
for revdep in self . runtaskentries [ tid ] . depends :
weight [ revdep ] = weight [ revdep ] + weight [ tid ]
2008-01-06 16:51:51 +00:00
deps_left [ revdep ] = deps_left [ revdep ] - 1
if deps_left [ revdep ] == 0 :
next_points . append ( revdep )
task_done [ revdep ] = True
endpoints = next_points
if len ( next_points ) == 0 :
2010-03-24 23:56:12 +00:00
break
2008-01-06 16:51:51 +00:00
# Circular dependency sanity check
problem_tasks = [ ]
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
if task_done [ tid ] is False or deps_left [ tid ] != 0 :
problem_tasks . append ( tid )
logger . debug ( 2 , " Task %s is not buildable " , tid )
logger . debug ( 2 , " (Complete marker was %s and the remaining dependency count was %s ) \n " , task_done [ tid ] , deps_left [ tid ] )
self . runtaskentries [ tid ] . weight = weight [ tid ]
2008-01-06 16:51:51 +00:00
if problem_tasks :
2016-06-12 22:55:48 +00:00
message = " %s unbuildable tasks were found. \n " % len ( problem_tasks )
2008-01-06 16:51:51 +00:00
message = message + " These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks. \n \n "
message = message + " Identifying dependency loops (this may take a short while)... \n "
2010-06-10 17:35:31 +00:00
logger . error ( message )
2008-01-06 16:51:51 +00:00
msgs = self . circular_depchains_handler ( problem_tasks )
message = " \n "
for msg in msgs :
message = message + msg
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , message )
2008-01-06 16:51:51 +00:00
return weight
2010-08-18 10:30:53 +00:00
def prepare ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-03-24 23:56:12 +00:00
Turn a set of taskData into a RunQueue and compute data needed
2006-11-16 15:02:15 +00:00
to optimise the execution order .
"""
2016-06-12 22:55:48 +00:00
runq_build = { }
2012-06-26 18:00:58 +00:00
recursivetasks = { }
2013-06-19 13:03:39 +00:00
recursiveitasks = { }
2012-07-04 16:39:11 +00:00
recursivetasksselfref = set ( )
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
taskData = self . taskData
2016-08-16 16:47:06 +00:00
found = False
for mc in self . taskData :
if len ( taskData [ mc ] . taskentries ) > 0 :
found = True
break
if not found :
2007-05-22 11:50:37 +00:00
# Nothing to do
2010-08-24 23:58:23 +00:00
return 0
2007-05-22 11:50:37 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . start ( )
self . init_progress_reporter . next_stage ( )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Step A - Work out a list of tasks to run
#
2009-07-21 18:44:23 +00:00
# Taskdata gives us a list of possible providers for every build and run
2010-03-24 23:56:12 +00:00
# target ordered by priority. It also gives information on each of those
2009-07-21 18:44:23 +00:00
# providers.
2008-01-06 16:51:51 +00:00
#
2010-03-24 23:56:12 +00:00
# To create the actual list of tasks to execute we fix the list of
# providers and then resolve the dependencies into task IDs. This
# process is repeated for each type of dependency (tdepends, deptask,
2008-01-06 16:51:51 +00:00
# rdeptast, recrdeptask, idepends).
2016-08-16 16:47:06 +00:00
def add_build_dependencies ( depids , tasknames , depends , mc ) :
2016-06-12 22:55:48 +00:00
for depname in depids :
2009-07-21 21:38:53 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2016-08-16 16:47:06 +00:00
if depname not in taskData [ mc ] . build_targets or not taskData [ mc ] . build_targets [ depname ] :
2009-07-21 21:38:53 +00:00
continue
2016-08-16 16:47:06 +00:00
depdata = taskData [ mc ] . build_targets [ depname ] [ 0 ]
2009-07-21 21:38:53 +00:00
if depdata is None :
continue
for taskname in tasknames :
2016-06-12 22:55:48 +00:00
t = depdata + " : " + taskname
2016-08-16 16:47:06 +00:00
if t in taskData [ mc ] . taskentries :
2016-06-12 22:55:48 +00:00
depends . add ( t )
2009-07-21 21:38:53 +00:00
2016-08-16 16:47:06 +00:00
def add_runtime_dependencies ( depids , tasknames , depends , mc ) :
2016-06-12 22:55:48 +00:00
for depname in depids :
2016-08-16 16:47:06 +00:00
if depname not in taskData [ mc ] . run_targets or not taskData [ mc ] . run_targets [ depname ] :
2009-07-21 21:38:53 +00:00
continue
2016-08-16 16:47:06 +00:00
depdata = taskData [ mc ] . run_targets [ depname ] [ 0 ]
2009-07-21 21:38:53 +00:00
if depdata is None :
continue
for taskname in tasknames :
2016-06-12 22:55:48 +00:00
t = depdata + " : " + taskname
2016-08-16 16:47:06 +00:00
if t in taskData [ mc ] . taskentries :
2016-06-12 22:55:48 +00:00
depends . add ( t )
2009-07-21 21:38:53 +00:00
2016-08-16 16:47:06 +00:00
def add_resolved_dependencies ( mc , fn , tasknames , depends ) :
2016-06-12 22:55:48 +00:00
for taskname in tasknames :
2016-08-16 16:47:06 +00:00
tid = build_tid ( mc , fn , taskname )
2016-06-12 22:55:48 +00:00
if tid in self . runtaskentries :
depends . add ( tid )
2016-08-16 16:47:06 +00:00
for mc in taskData :
for tid in taskData [ mc ] . taskentries :
2016-06-12 22:55:48 +00:00
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
#runtid = build_tid(mc, fn, taskname)
taskfn = taskfn_fromtid ( tid )
2012-06-26 18:00:58 +00:00
2016-08-16 16:47:06 +00:00
#logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
2006-11-16 15:02:15 +00:00
2016-08-16 16:47:06 +00:00
depends = set ( )
task_deps = self . dataCaches [ mc ] . task_deps [ taskfn ]
2009-07-21 18:44:23 +00:00
2016-08-16 16:47:06 +00:00
self . runtaskentries [ tid ] = RunTaskEntry ( )
2016-06-12 22:55:48 +00:00
2016-08-16 16:47:06 +00:00
if fn in taskData [ mc ] . failed_fns :
continue
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve task internal dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. addtask before X after Y
2016-08-16 16:47:06 +00:00
for t in taskData [ mc ] . taskentries [ tid ] . tdepends :
( _ , depfn , deptaskname ) = split_tid ( t )
depends . add ( build_tid ( mc , depfn , deptaskname ) )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve 'deptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[deptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS)
2016-06-12 22:55:48 +00:00
if ' deptask ' in task_deps and taskname in task_deps [ ' deptask ' ] :
tasknames = task_deps [ ' deptask ' ] [ taskname ] . split ( )
2016-08-16 16:47:06 +00:00
add_build_dependencies ( taskData [ mc ] . depids [ taskfn ] , tasknames , depends , mc )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve 'rdeptask' dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
2016-06-12 22:55:48 +00:00
if ' rdeptask ' in task_deps and taskname in task_deps [ ' rdeptask ' ] :
tasknames = task_deps [ ' rdeptask ' ] [ taskname ] . split ( )
2016-08-16 16:47:06 +00:00
add_runtime_dependencies ( taskData [ mc ] . rdepids [ taskfn ] , tasknames , depends , mc )
2006-11-16 15:02:15 +00:00
2010-03-24 23:56:12 +00:00
# Resolve inter-task dependencies
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
2016-08-16 16:47:06 +00:00
idepends = taskData [ mc ] . taskentries [ tid ] . idepends
2016-06-12 22:55:48 +00:00
for ( depname , idependtask ) in idepends :
2016-08-16 16:47:06 +00:00
if depname in taskData [ mc ] . build_targets and taskData [ mc ] . build_targets [ depname ] and not depname in taskData [ mc ] . failed_deps :
2007-08-05 22:43:24 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2016-08-16 16:47:06 +00:00
depdata = taskData [ mc ] . build_targets [ depname ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2016-06-12 22:55:48 +00:00
t = depdata + " : " + idependtask
depends . add ( t )
2016-08-16 16:47:06 +00:00
if t not in taskData [ mc ] . taskentries :
2016-06-12 22:55:48 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s in %s depends upon non-existent task %s in %s " % ( taskname , fn , idependtask , depdata ) )
2016-08-16 16:47:06 +00:00
irdepends = taskData [ mc ] . taskentries [ tid ] . irdepends
2016-06-12 22:55:48 +00:00
for ( depname , idependtask ) in irdepends :
2016-08-16 16:47:06 +00:00
if depname in taskData [ mc ] . run_targets :
2012-06-22 11:51:29 +00:00
# Won't be in run_targets if ASSUME_PROVIDED
2016-08-16 16:47:06 +00:00
depdata = taskData [ mc ] . run_targets [ depname ] [ 0 ]
2012-06-22 11:51:29 +00:00
if depdata is not None :
2016-06-12 22:55:48 +00:00
t = depdata + " : " + idependtask
depends . add ( t )
2016-08-16 16:47:06 +00:00
if t not in taskData [ mc ] . taskentries :
2016-06-12 22:55:48 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s in %s rdepends upon non-existent task %s in %s " % ( taskname , fn , idependtask , depdata ) )
2009-07-21 18:44:23 +00:00
2012-06-26 18:00:58 +00:00
# Resolve recursive 'recrdeptask' dependencies (Part A)
2008-01-06 16:51:51 +00:00
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2009-07-21 18:44:23 +00:00
# We cover the recursive part of the dependencies below
2016-06-12 22:55:48 +00:00
if ' recrdeptask ' in task_deps and taskname in task_deps [ ' recrdeptask ' ] :
tasknames = task_deps [ ' recrdeptask ' ] [ taskname ] . split ( )
recursivetasks [ tid ] = tasknames
2016-08-16 16:47:06 +00:00
add_build_dependencies ( taskData [ mc ] . depids [ taskfn ] , tasknames , depends , mc )
add_runtime_dependencies ( taskData [ mc ] . rdepids [ taskfn ] , tasknames , depends , mc )
2016-06-12 22:55:48 +00:00
if taskname in tasknames :
recursivetasksselfref . add ( tid )
if ' recideptask ' in task_deps and taskname in task_deps [ ' recideptask ' ] :
recursiveitasks [ tid ] = [ ]
for t in task_deps [ ' recideptask ' ] [ taskname ] . split ( ) :
2016-08-16 16:47:06 +00:00
newdep = build_tid ( mc , fn , t )
2016-06-12 22:55:48 +00:00
recursiveitasks [ tid ] . append ( newdep )
2016-08-16 16:47:06 +00:00
self . runtaskentries [ tid ] . depends = depends
#self.dump_data()
2009-07-21 18:44:23 +00:00
2012-06-26 18:00:58 +00:00
# Resolve recursive 'recrdeptask' dependencies (Part B)
2009-07-21 18:44:23 +00:00
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2016-06-12 22:55:48 +00:00
# We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( len ( recursivetasks ) )
2012-06-26 18:00:58 +00:00
extradeps = { }
2016-06-23 10:59:12 +00:00
for taskcounter , tid in enumerate ( recursivetasks ) :
2016-06-12 22:55:48 +00:00
extradeps [ tid ] = set ( self . runtaskentries [ tid ] . depends )
tasknames = recursivetasks [ tid ]
2012-06-26 18:00:58 +00:00
seendeps = set ( )
def generate_recdeps ( t ) :
newdeps = set ( )
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( t )
add_resolved_dependencies ( mc , fn , tasknames , newdeps )
2016-06-12 22:55:48 +00:00
extradeps [ tid ] . update ( newdeps )
2012-06-26 18:00:58 +00:00
seendeps . add ( t )
newdeps . add ( t )
for i in newdeps :
2016-06-12 22:55:48 +00:00
task = self . runtaskentries [ i ] . task
for n in self . runtaskentries [ i ] . depends :
2012-06-26 18:00:58 +00:00
if n not in seendeps :
2016-06-12 22:55:48 +00:00
generate_recdeps ( n )
generate_recdeps ( tid )
2012-06-26 18:00:58 +00:00
2016-06-12 22:55:48 +00:00
if tid in recursiveitasks :
for dep in recursiveitasks [ tid ] :
2013-06-19 13:03:39 +00:00
generate_recdeps ( dep )
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . update ( taskcounter )
2013-06-19 13:03:39 +00:00
2012-07-02 12:29:53 +00:00
# Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
2016-06-12 22:55:48 +00:00
for tid in recursivetasks :
extradeps [ tid ] . difference_update ( recursivetasksselfref )
2012-07-02 12:29:53 +00:00
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
task = self . runtaskentries [ tid ] . task
2012-06-26 18:00:58 +00:00
# Add in extra dependencies
2016-06-12 22:55:48 +00:00
if tid in extradeps :
self . runtaskentries [ tid ] . depends = extradeps [ tid ]
2012-06-26 18:00:58 +00:00
# Remove all self references
2016-06-12 22:55:48 +00:00
if tid in self . runtaskentries [ tid ] . depends :
logger . debug ( 2 , " Task %s contains self reference! " , tid )
self . runtaskentries [ tid ] . depends . remove ( tid )
2008-01-06 16:51:51 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2016-08-16 16:47:06 +00:00
#self.dump_data()
2008-01-06 16:51:51 +00:00
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
# as active too. If the task is to be 'forced', clear its stamp. Once
# all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Marking Active Tasks " )
2006-11-16 15:02:15 +00:00
2016-06-12 22:55:48 +00:00
def mark_active ( tid , depth ) :
2006-11-16 15:02:15 +00:00
"""
Mark an item as active along with its depends
( calls itself recursively )
"""
2016-06-12 22:55:48 +00:00
if tid in runq_build :
2006-11-16 15:02:15 +00:00
return
2016-06-12 22:55:48 +00:00
runq_build [ tid ] = 1
2006-11-16 15:02:15 +00:00
2016-06-12 22:55:48 +00:00
depends = self . runtaskentries [ tid ] . depends
2006-11-16 15:02:15 +00:00
for depend in depends :
mark_active ( depend , depth + 1 )
2016-08-16 16:47:06 +00:00
self . target_tids = [ ]
for ( mc , target , task , fn ) in self . targets :
if target not in taskData [ mc ] . build_targets or not taskData [ mc ] . build_targets [ target ] :
2006-11-16 15:02:15 +00:00
continue
2016-08-16 16:47:06 +00:00
if target in taskData [ mc ] . failed_deps :
2007-02-21 20:15:13 +00:00
continue
2015-12-15 16:42:18 +00:00
parents = False
if task . endswith ( ' - ' ) :
parents = True
task = task [ : - 1 ]
2016-08-16 16:47:06 +00:00
if fn in taskData [ mc ] . failed_fns :
2006-11-16 15:02:15 +00:00
continue
2016-08-16 16:47:06 +00:00
# fn already has mc prefix
2016-06-12 22:55:48 +00:00
tid = fn + " : " + task
2016-08-16 16:47:06 +00:00
self . target_tids . append ( tid )
if tid not in taskData [ mc ] . taskentries :
2013-08-09 13:51:22 +00:00
import difflib
2016-06-12 22:55:48 +00:00
tasks = [ ]
2016-08-16 16:47:06 +00:00
for x in taskData [ mc ] . taskentries :
2016-06-12 22:55:48 +00:00
if x . startswith ( fn + " : " ) :
tasks . append ( taskname_from_tid ( x ) )
close_matches = difflib . get_close_matches ( task , tasks , cutoff = 0.7 )
2013-08-09 13:51:22 +00:00
if close_matches :
extra = " . Close matches: \n %s " % " \n " . join ( close_matches )
else :
extra = " "
2016-08-16 16:47:06 +00:00
bb . msg . fatal ( " RunQueue " , " Task %s does not exist for target %s ( %s ) %s " % ( task , target , tid , extra ) )
2016-06-12 22:55:48 +00:00
2015-12-15 16:42:18 +00:00
# For tasks called "XXXX-", ony run their dependencies
if parents :
2016-06-12 22:55:48 +00:00
for i in self . runtaskentries [ tid ] . depends :
2015-12-15 16:42:18 +00:00
mark_active ( i , 1 )
else :
2016-06-12 22:55:48 +00:00
mark_active ( tid , 1 )
2006-11-16 15:02:15 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2008-01-06 16:51:51 +00:00
# Step C - Prune all inactive tasks
#
# Once all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
delcount = 0
2016-06-12 22:55:48 +00:00
for tid in list ( self . runtaskentries . keys ( ) ) :
if tid not in runq_build :
del self . runtaskentries [ tid ]
delcount + = 1
2006-11-16 15:02:15 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2008-01-06 16:51:51 +00:00
#
# Step D - Sanity checks and computation
#
# Check to make sure we still have tasks to run
2016-06-12 22:55:48 +00:00
if len ( self . runtaskentries ) == 0 :
2016-08-16 16:47:06 +00:00
if not taskData [ ' ' ] . abort :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above. " )
2009-07-21 18:44:23 +00:00
else :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " No active tasks and not in --continue mode?! Please report this bug. " )
2006-11-16 15:02:15 +00:00
2016-06-12 22:55:48 +00:00
logger . verbose ( " Pruned %s inactive tasks, %s left " , delcount , len ( self . runtaskentries ) )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Assign Weightings " )
2006-11-16 15:02:15 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2008-01-06 16:51:51 +00:00
# Generate a list of reverse dependencies to ease future calculations
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
for dep in self . runtaskentries [ tid ] . depends :
self . runtaskentries [ dep ] . revdeps . add ( tid )
2006-11-16 15:02:15 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2008-01-06 16:51:51 +00:00
# Identify tasks at the end of dependency chains
# Error on circular dependency loops (length two)
2006-11-16 15:02:15 +00:00
endpoints = [ ]
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
revdeps = self . runtaskentries [ tid ] . revdeps
2006-11-16 15:02:15 +00:00
if len ( revdeps ) == 0 :
2016-06-12 22:55:48 +00:00
endpoints . append ( tid )
2006-11-16 15:02:15 +00:00
for dep in revdeps :
2016-06-12 22:55:48 +00:00
if dep in self . runtaskentries [ tid ] . depends :
bb . msg . fatal ( " RunQueue " , " Task %s has circular dependency on %s " % ( tid , dep ) )
2006-11-16 15:02:15 +00:00
2010-06-10 17:35:31 +00:00
logger . verbose ( " Compute totals (have %s endpoint(s)) " , len ( endpoints ) )
2006-11-16 15:02:15 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2010-03-24 23:56:12 +00:00
# Calculate task weights
2008-01-06 16:51:51 +00:00
# Check of higher length circular dependencies
self . runq_weight = self . calculate_task_weights ( endpoints )
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2008-01-06 16:51:51 +00:00
# Sanity Check - Check for multiple tasks building the same provider
2016-08-16 16:47:06 +00:00
for mc in self . dataCaches :
prov_list = { }
seen_fn = [ ]
for tid in self . runtaskentries :
( tidmc , fn , taskname ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
if taskfn in seen_fn :
continue
if mc != tidmc :
continue
seen_fn . append ( taskfn )
for prov in self . dataCaches [ mc ] . fn_provides [ taskfn ] :
if prov not in prov_list :
prov_list [ prov ] = [ taskfn ]
elif taskfn not in prov_list [ prov ] :
prov_list [ prov ] . append ( taskfn )
for prov in prov_list :
if len ( prov_list [ prov ] ) < 2 :
continue
if prov in self . multi_provider_whitelist :
continue
2013-09-02 13:47:21 +00:00
seen_pn = [ ]
# If two versions of the same PN are being built its fatal, we don't support it.
for fn in prov_list [ prov ] :
2016-08-16 16:47:06 +00:00
pn = self . dataCaches [ mc ] . pkg_fn [ fn ]
2013-09-02 13:47:21 +00:00
if pn not in seen_pn :
seen_pn . append ( pn )
else :
bb . fatal ( " Multiple versions of %s are due to be built ( %s ). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_ %s to select the correct version or don ' t depend on multiple versions. " % ( pn , " " . join ( prov_list [ prov ] ) , pn ) )
2016-04-10 10:09:21 +00:00
msg = " Multiple .bb files are due to be built which each provide %s : \n %s " % ( prov , " \n " . join ( prov_list [ prov ] ) )
#
# Construct a list of things which uniquely depend on each provider
# since this may help the user figure out which dependency is triggering this warning
#
msg + = " \n A list of tasks depending on these providers is shown and may help explain where the dependency comes from. "
deplist = { }
commondeps = None
for provfn in prov_list [ prov ] :
deps = set ( )
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
fn = fn_from_tid ( tid )
2016-04-10 10:09:21 +00:00
if fn != provfn :
continue
2016-06-12 22:55:48 +00:00
for dep in self . runtaskentries [ tid ] . revdeps :
fn = fn_from_tid ( dep )
2016-04-10 10:09:21 +00:00
if fn == provfn :
continue
2016-06-12 22:55:48 +00:00
deps . add ( dep )
2016-04-10 10:09:21 +00:00
if not commondeps :
commondeps = set ( deps )
else :
commondeps & = deps
deplist [ provfn ] = deps
for provfn in deplist :
msg + = " \n %s has unique dependees: \n %s " % ( provfn , " \n " . join ( deplist [ provfn ] - commondeps ) )
#
# Construct a list of provides and runtime providers for each recipe
# (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
#
msg + = " \n It could be that one recipe provides something the other doesn ' t and should. The following provider and runtime provider differences may be helpful. "
provide_results = { }
rprovide_results = { }
commonprovs = None
commonrprovs = None
for provfn in prov_list [ prov ] :
2016-08-16 16:47:06 +00:00
provides = set ( self . dataCaches [ mc ] . fn_provides [ provfn ] )
2016-04-10 10:09:21 +00:00
rprovides = set ( )
2016-08-16 16:47:06 +00:00
for rprovide in self . dataCaches [ mc ] . rproviders :
if provfn in self . dataCaches [ mc ] . rproviders [ rprovide ] :
2016-04-10 10:09:21 +00:00
rprovides . add ( rprovide )
2016-08-16 16:47:06 +00:00
for package in self . dataCaches [ mc ] . packages :
if provfn in self . dataCaches [ mc ] . packages [ package ] :
2016-04-10 10:09:21 +00:00
rprovides . add ( package )
2016-08-16 16:47:06 +00:00
for package in self . dataCaches [ mc ] . packages_dynamic :
if provfn in self . dataCaches [ mc ] . packages_dynamic [ package ] :
2016-04-10 10:09:21 +00:00
rprovides . add ( package )
if not commonprovs :
commonprovs = set ( provides )
else :
commonprovs & = provides
provide_results [ provfn ] = provides
if not commonrprovs :
commonrprovs = set ( rprovides )
else :
commonrprovs & = rprovides
rprovide_results [ provfn ] = rprovides
#msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
#msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
for provfn in prov_list [ prov ] :
msg + = " \n %s has unique provides: \n %s " % ( provfn , " \n " . join ( provide_results [ provfn ] - commonprovs ) )
msg + = " \n %s has unique rprovides: \n %s " % ( provfn , " \n " . join ( rprovide_results [ provfn ] - commonrprovs ) )
2012-02-13 11:41:31 +00:00
if self . warn_multi_bb :
2016-05-09 13:01:12 +00:00
logger . warning ( msg )
2012-02-13 11:41:31 +00:00
else :
logger . error ( msg )
2007-09-02 14:10:08 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2008-05-04 23:22:24 +00:00
# Create a whitelist usable by the stamp checks
2016-08-16 16:47:06 +00:00
self . stampfnwhitelist = { }
for mc in self . taskData :
self . stampfnwhitelist [ mc ] = [ ]
for entry in self . stampwhitelist . split ( ) :
if entry not in self . taskData [ mc ] . build_targets :
continue
fn = self . taskData . build_targets [ entry ] [ 0 ]
self . stampfnwhitelist [ mc ] . append ( fn )
2008-05-04 23:22:24 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2012-09-17 22:43:17 +00:00
# Iterate over the task list looking for tasks with a 'setscene' function
2016-06-12 22:55:48 +00:00
self . runq_setscene_tids = [ ]
2012-09-17 22:43:17 +00:00
if not self . cooker . configuration . nosetscene :
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
setscenetid = fn + " : " + taskname + " _setscene "
if setscenetid not in taskData [ mc ] . taskentries :
2012-09-17 22:43:17 +00:00
continue
2016-06-12 22:55:48 +00:00
self . runq_setscene_tids . append ( tid )
2010-08-19 10:36:29 +00:00
2016-08-16 16:47:06 +00:00
def invalidate_task ( tid , error_nostamp ) :
( mc , fn , taskname ) = split_tid ( tid )
taskdep = self . dataCaches [ mc ] . task_deps [ fn ]
if fn + " : " + taskname not in taskData [ mc ] . taskentries :
2016-05-09 13:01:12 +00:00
logger . warning ( " Task %s does not exist, invalidating this task will have no effect " % taskname )
2012-06-18 15:45:36 +00:00
if ' nostamp ' in taskdep and taskname in taskdep [ ' nostamp ' ] :
if error_nostamp :
bb . fatal ( " Task %s is marked nostamp, cannot invalidate this task " % taskname )
else :
bb . debug ( 1 , " Task %s is marked nostamp, cannot invalidate this task " % taskname )
else :
logger . verbose ( " Invalidate task %s , %s " , taskname , fn )
2016-08-16 16:47:06 +00:00
bb . parse . siggen . invalidate_task ( taskname , self . dataCaches [ mc ] , fn )
2012-06-18 15:45:36 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2012-06-18 15:45:35 +00:00
# Invalidate task if force mode active
if self . cooker . configuration . force :
2016-08-16 16:47:06 +00:00
for tid in self . target_tids :
invalidate_task ( tid , False )
2012-06-18 15:45:36 +00:00
# Invalidate task if invalidate mode active
if self . cooker . configuration . invalidate_stamp :
2016-08-16 16:47:06 +00:00
for tid in self . target_tids :
fn = fn_from_tid ( tid )
2012-06-18 15:45:36 +00:00
for st in self . cooker . configuration . invalidate_stamp . split ( ' , ' ) :
2015-09-02 20:11:30 +00:00
if not st . startswith ( " do_ " ) :
st = " do_ %s " % st
2016-08-16 16:47:06 +00:00
invalidate_task ( fn + " : " + st , True )
2012-06-18 15:45:35 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2015-09-30 13:29:01 +00:00
# Create and print to the logs a virtual/xxxx -> PN (fn) table
2016-08-16 16:47:06 +00:00
for mc in taskData :
virtmap = taskData [ mc ] . get_providermap ( prefix = " virtual/ " )
virtpnmap = { }
for v in virtmap :
virtpnmap [ v ] = self . dataCaches [ mc ] . pkg_fn [ virtmap [ v ] ]
bb . debug ( 2 , " %s resolved to: %s ( %s ) " % ( v , virtpnmap [ v ] , virtmap [ v ] ) )
if hasattr ( bb . parse . siggen , " tasks_resolved " ) :
bb . parse . siggen . tasks_resolved ( virtmap , virtpnmap , self . dataCaches [ mc ] )
2015-09-30 13:29:01 +00:00
2016-06-23 10:59:12 +00:00
self . init_progress_reporter . next_stage ( )
2014-08-12 08:53:16 +00:00
# Iterate over the task list and call into the siggen code
2010-08-31 13:49:43 +00:00
dealtwith = set ( )
2016-06-12 22:55:48 +00:00
todeal = set ( self . runtaskentries )
2010-08-31 13:49:43 +00:00
while len ( todeal ) > 0 :
2016-06-12 22:55:48 +00:00
for tid in todeal . copy ( ) :
if len ( self . runtaskentries [ tid ] . depends - dealtwith ) == 0 :
dealtwith . add ( tid )
todeal . remove ( tid )
2010-08-31 13:49:43 +00:00
procdep = [ ]
2016-06-12 22:55:48 +00:00
for dep in self . runtaskentries [ tid ] . depends :
procdep . append ( fn_from_tid ( dep ) + " . " + taskname_from_tid ( dep ) )
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
self . runtaskentries [ tid ] . hash = bb . parse . siggen . get_taskhash ( taskfn , taskname , procdep , self . dataCaches [ mc ] )
2016-06-12 22:55:48 +00:00
task = self . runtaskentries [ tid ] . task
2010-08-31 13:49:43 +00:00
2016-01-26 13:34:29 +00:00
bb . parse . siggen . writeout_file_checksum_cache ( )
2016-08-16 16:47:06 +00:00
#self.dump_data()
2016-06-12 22:55:48 +00:00
return len ( self . runtaskentries )
2010-08-24 23:58:23 +00:00
2016-08-16 16:47:06 +00:00
def dump_data ( self ) :
2010-08-18 10:30:53 +00:00
"""
Dump some debug information on the internal data structures
"""
2010-06-10 17:35:31 +00:00
logger . debug ( 3 , " run_tasks: " )
2016-06-12 22:55:48 +00:00
for tid in self . runtaskentries :
logger . debug ( 3 , " %s : %s Deps %s RevDeps %s " , tid ,
self . runtaskentries [ tid ] . weight ,
self . runtaskentries [ tid ] . depends ,
self . runtaskentries [ tid ] . revdeps )
2010-08-18 10:30:53 +00:00
2016-08-15 16:58:39 +00:00
class RunQueueWorker ( ) :
def __init__ ( self , process , pipe ) :
self . process = process
self . pipe = pipe
2010-08-18 10:30:53 +00:00
class RunQueue :
2016-08-16 16:47:06 +00:00
def __init__ ( self , cooker , cfgData , dataCaches , taskData , targets ) :
2010-08-18 10:30:53 +00:00
self . cooker = cooker
self . cfgData = cfgData
2016-08-16 16:47:06 +00:00
self . rqdata = RunQueueData ( self , cooker , cfgData , dataCaches , taskData , targets )
2010-08-18 10:30:53 +00:00
2011-11-25 14:57:53 +00:00
self . stamppolicy = cfgData . getVar ( " BB_STAMP_POLICY " , True ) or " perfile "
self . hashvalidate = cfgData . getVar ( " BB_HASHCHECK_FUNCTION " , True ) or None
2016-06-12 22:55:48 +00:00
self . setsceneverify = cfgData . getVar ( " BB_SETSCENE_VERIFY_FUNCTION2 " , True ) or None
2012-11-16 15:30:52 +00:00
self . depvalidate = cfgData . getVar ( " BB_SETSCENE_DEPVALID " , True ) or None
2010-08-18 10:30:53 +00:00
self . state = runQueuePrepare
2010-01-20 18:46:02 +00:00
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
# For disk space monitor
self . dm = monitordisk . diskMonitor ( cfgData )
2012-02-29 14:15:28 +00:00
self . rqexe = None
2016-08-15 16:58:39 +00:00
self . worker = { }
self . fakeworker = { }
2013-06-07 17:11:49 +00:00
2016-08-16 16:47:06 +00:00
def _start_worker ( self , mc , fakeroot = False , rqexec = None ) :
2013-06-07 17:11:49 +00:00
logger . debug ( 1 , " Starting bitbake-worker " )
2014-08-27 13:55:50 +00:00
magic = " decafbad "
if self . cooker . configuration . profile :
magic = " decafbadbad "
2013-06-07 17:13:04 +00:00
if fakeroot :
2016-01-29 11:08:50 +00:00
magic = magic + " beef "
2013-06-07 17:13:04 +00:00
fakerootcmd = self . cfgData . getVar ( " FAKEROOTCMD " , True )
fakerootenv = ( self . cfgData . getVar ( " FAKEROOTBASEENV " , True ) or " " ) . split ( )
env = os . environ . copy ( )
for key , value in ( var . split ( ' = ' ) for var in fakerootenv ) :
env [ key ] = value
2014-08-27 13:55:50 +00:00
worker = subprocess . Popen ( [ fakerootcmd , " bitbake-worker " , magic ] , stdout = subprocess . PIPE , stdin = subprocess . PIPE , env = env )
2013-06-07 17:13:04 +00:00
else :
2014-08-27 13:55:50 +00:00
worker = subprocess . Popen ( [ " bitbake-worker " , magic ] , stdout = subprocess . PIPE , stdin = subprocess . PIPE )
2013-06-07 17:12:30 +00:00
bb . utils . nonblockingfd ( worker . stdout )
2014-03-09 17:00:17 +00:00
workerpipe = runQueuePipe ( worker . stdout , None , self . cfgData , self , rqexec )
2013-06-07 17:11:49 +00:00
2016-06-12 22:55:48 +00:00
runqhash = { }
for tid in self . rqdata . runtaskentries :
runqhash [ tid ] = self . rqdata . runtaskentries [ tid ] . hash
2013-06-07 17:11:49 +00:00
workerdata = {
2016-08-16 16:47:06 +00:00
" taskdeps " : self . rqdata . dataCaches [ mc ] . task_deps ,
" fakerootenv " : self . rqdata . dataCaches [ mc ] . fakerootenv ,
" fakerootdirs " : self . rqdata . dataCaches [ mc ] . fakerootdirs ,
" fakerootnoenv " : self . rqdata . dataCaches [ mc ] . fakerootnoenv ,
2014-09-05 09:34:41 +00:00
" sigdata " : bb . parse . siggen . get_taskdata ( ) ,
2016-06-12 22:55:48 +00:00
" runq_hash " : runqhash ,
2013-06-07 17:11:49 +00:00
" logdefaultdebug " : bb . msg . loggerDefaultDebugLevel ,
" logdefaultverbose " : bb . msg . loggerDefaultVerbose ,
" logdefaultverboselogs " : bb . msg . loggerVerboseLogs ,
" logdefaultdomain " : bb . msg . loggerDefaultDomains ,
2013-06-07 17:13:04 +00:00
" prhost " : self . cooker . prhost ,
2013-09-02 17:26:28 +00:00
" buildname " : self . cfgData . getVar ( " BUILDNAME " , True ) ,
2013-09-06 15:53:20 +00:00
" date " : self . cfgData . getVar ( " DATE " , True ) ,
" time " : self . cfgData . getVar ( " TIME " , True ) ,
2013-06-07 17:11:49 +00:00
}
2016-05-12 07:30:35 +00:00
worker . stdin . write ( b " <cookerconfig> " + pickle . dumps ( self . cooker . configuration ) + b " </cookerconfig> " )
worker . stdin . write ( b " <workerdata> " + pickle . dumps ( workerdata ) + b " </workerdata> " )
2013-06-07 17:12:30 +00:00
worker . stdin . flush ( )
2013-06-07 17:11:49 +00:00
2016-08-15 16:58:39 +00:00
return RunQueueWorker ( worker , workerpipe )
2013-06-07 17:12:30 +00:00
2016-08-15 16:58:39 +00:00
def _teardown_worker ( self , worker ) :
2013-06-07 17:12:30 +00:00
if not worker :
return
2013-06-07 17:11:49 +00:00
logger . debug ( 1 , " Teardown for bitbake-worker " )
2014-03-09 17:01:19 +00:00
try :
2016-08-15 16:58:39 +00:00
worker . process . stdin . write ( b " <quit></quit> " )
worker . process . stdin . flush ( )
worker . process . stdin . close ( )
2014-03-09 17:01:19 +00:00
except IOError :
pass
2016-08-15 16:58:39 +00:00
while worker . process . returncode is None :
worker . pipe . read ( )
worker . process . poll ( )
while worker . pipe . read ( ) :
2013-06-07 17:11:49 +00:00
continue
2016-08-15 16:58:39 +00:00
worker . pipe . close ( )
2013-06-07 17:12:30 +00:00
def start_worker ( self ) :
if self . worker :
2013-06-07 17:13:04 +00:00
self . teardown_workers ( )
2014-03-09 17:00:17 +00:00
self . teardown = False
2016-08-16 16:47:06 +00:00
for mc in self . rqdata . dataCaches :
self . worker [ mc ] = self . _start_worker ( mc )
2013-06-07 17:12:30 +00:00
2013-06-07 17:13:04 +00:00
def start_fakeworker ( self , rqexec ) :
if not self . fakeworker :
2016-08-16 16:47:06 +00:00
for mc in self . rqdata . dataCaches :
self . fakeworker [ mc ] = self . _start_worker ( mc , True , rqexec )
2013-06-07 17:13:04 +00:00
def teardown_workers ( self ) :
2014-03-09 17:00:17 +00:00
self . teardown = True
2016-08-15 16:58:39 +00:00
for mc in self . worker :
self . _teardown_worker ( self . worker [ mc ] )
self . worker = { }
for mc in self . fakeworker :
self . _teardown_worker ( self . fakeworker [ mc ] )
self . fakeworker = { }
2013-06-07 17:13:04 +00:00
def read_workers ( self ) :
2016-08-15 16:58:39 +00:00
for mc in self . worker :
self . worker [ mc ] . pipe . read ( )
for mc in self . fakeworker :
self . fakeworker [ mc ] . pipe . read ( )
2012-02-29 14:15:28 +00:00
2013-08-31 22:40:55 +00:00
def active_fds ( self ) :
fds = [ ]
2016-08-15 16:58:39 +00:00
for mc in self . worker :
fds . append ( self . worker [ mc ] . pipe . input )
for mc in self . fakeworker :
fds . append ( self . fakeworker [ mc ] . pipe . input )
2013-08-31 22:40:55 +00:00
return fds
2016-06-12 22:55:48 +00:00
def check_stamp_task ( self , tid , taskname = None , recurse = False , cache = None ) :
2010-08-19 10:36:29 +00:00
def get_timestamp ( f ) :
try :
if not os . access ( f , os . F_OK ) :
return None
return os . stat ( f ) [ stat . ST_MTIME ]
except :
return None
2008-03-14 11:44:34 +00:00
2016-08-16 16:47:06 +00:00
( mc , fn , tn ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
if taskname is None :
taskname = tn
2008-03-14 11:44:34 +00:00
if self . stamppolicy == " perfile " :
fulldeptree = False
else :
fulldeptree = True
2008-05-04 23:22:24 +00:00
stampwhitelist = [ ]
if self . stamppolicy == " whitelist " :
2016-08-16 16:47:06 +00:00
stampwhitelist = self . rqdata . stampfnwhitelist [ mc ]
2011-01-10 12:48:49 +00:00
2016-08-16 16:47:06 +00:00
stampfile = bb . build . stampfile ( taskname , self . rqdata . dataCaches [ mc ] , taskfn )
2010-11-06 12:20:33 +00:00
2014-08-12 08:53:16 +00:00
# If the stamp is missing, it's not current
2008-03-14 11:44:34 +00:00
if not os . access ( stampfile , os . F_OK ) :
2010-12-17 21:46:41 +00:00
logger . debug ( 2 , " Stampfile %s not available " , stampfile )
2008-03-14 11:44:34 +00:00
return False
2014-08-12 08:53:16 +00:00
# If it's a 'nostamp' task, it's not current
2016-08-16 16:47:06 +00:00
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ taskfn ]
2008-10-01 13:55:17 +00:00
if ' nostamp ' in taskdep and taskname in taskdep [ ' nostamp ' ] :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , " %s . %s is nostamp \n " , fn , taskname )
2008-03-14 11:44:34 +00:00
return False
2011-01-06 19:48:47 +00:00
if taskname != " do_setscene " and taskname . endswith ( " _setscene " ) :
2010-08-19 10:36:29 +00:00
return True
2012-05-10 08:21:41 +00:00
if cache is None :
cache = { }
2008-03-14 11:44:34 +00:00
iscurrent = True
2010-08-19 10:36:29 +00:00
t1 = get_timestamp ( stampfile )
2016-06-12 22:55:48 +00:00
for dep in self . rqdata . runtaskentries [ tid ] . depends :
2008-03-14 11:44:34 +00:00
if iscurrent :
2016-08-16 16:47:06 +00:00
( mc2 , fn2 , taskname2 ) = split_tid ( dep )
taskfn2 = taskfn_fromtid ( dep )
stampfile2 = bb . build . stampfile ( taskname2 , self . rqdata . dataCaches [ mc2 ] , taskfn2 )
stampfile3 = bb . build . stampfile ( taskname2 + " _setscene " , self . rqdata . dataCaches [ mc2 ] , taskfn2 )
2010-08-19 10:36:29 +00:00
t2 = get_timestamp ( stampfile2 )
2010-11-06 12:20:33 +00:00
t3 = get_timestamp ( stampfile3 )
2016-05-11 21:55:14 +00:00
if t3 and not t2 :
continue
2010-08-19 10:36:29 +00:00
if t3 and t3 > t2 :
2016-05-11 21:55:14 +00:00
continue
2008-05-04 23:22:24 +00:00
if fn == fn2 or ( fulldeptree and fn2 not in stampwhitelist ) :
2010-12-16 15:14:13 +00:00
if not t2 :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stampfile %s does not exist ' , stampfile2 )
2008-03-14 11:44:34 +00:00
iscurrent = False
2016-05-11 21:55:14 +00:00
break
2010-12-16 15:14:13 +00:00
if t1 < t2 :
2011-01-10 12:48:49 +00:00
logger . debug ( 2 , ' Stampfile %s < %s ' , stampfile , stampfile2 )
2010-12-16 15:14:13 +00:00
iscurrent = False
2016-05-11 21:55:14 +00:00
break
2012-03-16 10:46:05 +00:00
if recurse and iscurrent :
2012-07-25 18:40:38 +00:00
if dep in cache :
iscurrent = cache [ dep ]
if not iscurrent :
logger . debug ( 2 , ' Stampfile for dependency %s : %s invalid (cached) ' % ( fn2 , taskname2 ) )
else :
iscurrent = self . check_stamp_task ( dep , recurse = True , cache = cache )
cache [ dep ] = iscurrent
if recurse :
2016-06-12 22:55:48 +00:00
cache [ tid ] = iscurrent
2008-03-14 11:44:34 +00:00
return iscurrent
2008-03-03 22:01:45 +00:00
2012-07-25 18:43:53 +00:00
def _execute_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
Upon failure , optionally try to recover the build using any alternate providers
( if the abort on failure configuration option isn ' t set)
"""
2013-08-31 22:40:55 +00:00
retval = True
2010-08-18 16:37:15 +00:00
2010-01-20 18:46:02 +00:00
if self . state is runQueuePrepare :
2010-08-24 23:58:23 +00:00
self . rqexe = RunQueueExecuteDummy ( self )
2016-06-23 10:59:12 +00:00
# NOTE: if you add, remove or significantly refactor the stages of this
# process then you should recalculate the weightings here. This is quite
# easy to do - just change the next line temporarily to pass debug=True as
# the last parameter and you'll get a printout of the weightings as well
# as a map to the lines where next_stage() was called. Of course this isn't
# critical, but it helps to keep the progress reporting accurate.
self . rqdata . init_progress_reporter = bb . progress . MultiStageProcessProgressReporter ( self . cooker . data ,
" Initialising tasks " ,
[ 43 , 967 , 4 , 3 , 1 , 5 , 3 , 7 , 13 , 1 , 2 , 1 , 1 , 246 , 35 , 1 , 38 , 1 , 35 , 2 , 338 , 204 , 142 , 3 , 3 , 37 , 244 ] )
2011-03-08 19:07:24 +00:00
if self . rqdata . prepare ( ) == 0 :
2010-08-24 23:58:23 +00:00
self . state = runQueueComplete
else :
self . state = runQueueSceneInit
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2010-08-19 10:36:29 +00:00
2016-05-12 15:00:10 +00:00
# we are ready to run, emit dependency info to any UI or class which
# needs it
depgraph = self . cooker . buildDependTree ( self , self . rqdata . taskData )
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2016-05-12 15:00:10 +00:00
bb . event . fire ( bb . event . DepTreeGenerated ( depgraph ) , self . cooker . data )
2013-09-18 12:15:48 +00:00
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneInit :
2014-03-26 13:46:54 +00:00
dump = self . cooker . configuration . dump_signatures
if dump :
2016-08-11 03:36:57 +00:00
self . rqdata . init_progress_reporter . finish ( )
2014-03-26 13:46:54 +00:00
if ' printdiff ' in dump :
invalidtasks = self . print_diffscenetasks ( )
2014-03-26 13:47:29 +00:00
self . dump_signatures ( dump )
2014-03-26 13:46:54 +00:00
if ' printdiff ' in dump :
self . write_diffscenetasks ( invalidtasks )
2013-12-19 09:40:52 +00:00
self . state = runQueueComplete
2010-08-31 13:49:43 +00:00
else :
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2013-06-07 17:11:49 +00:00
self . start_worker ( )
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2010-08-31 13:49:43 +00:00
self . rqexe = RunQueueExecuteScenequeue ( self )
2010-08-19 10:36:29 +00:00
V5 Disk space monitoring
Monitor disk availability and take action when the free disk space or
amount of free inode is running low, it is enabled when BB_DISKMON_DIRS
is set.
* Variable meanings(from meta-yocto/conf/local.conf.sample):
# Set the directories to monitor for disk usage, if more than one
# directories are mounted in the same device, then only one directory
# would be monitored since the monitor is based on the device.
# The format is:
# "action,directory,minimum_space,minimum_free_inode"
#
# The "action" must be set and should be one of:
# ABORT: Immediately abort
# STOPTASKS: The new tasks can't be executed any more, will stop the build
# when the running tasks have been done.
# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information)
#
# The "directory" must be set, any directory is OK.
#
# Either "minimum_space" or "minimum_free_inode" (or both of them)
# should be set, otherwise the monitor would not be enabled,
# the unit can be G, M, K or none, but do NOT use GB, MB or KB
# (B is not needed).
#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K"
#
# Set disk space and inode interval (only works when the action is "WARN",
# the unit can be G, M, or K, but do NOT use the GB, MB or KB
# (B is not needed), the format is:
# "disk_space_interval, disk_inode_interval", the default value is
# "50M,5K" which means that it would warn when the free space is
# lower than the minimum space(or inode), and would repeat the action
# when the disk space reduces 50M (or the amount of inode reduces 5k)
# again.
#BB_DISKMON_WARNINTERVAL = "50M,5K"
[YOCTO #1589]
(Bitbake rev: 4d173d441d2beb8e6492b6b1842682f8cf32e6cc)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-02-26 08:48:15 +00:00
if self . state in [ runQueueSceneRun , runQueueRunning , runQueueCleanUp ] :
self . dm . check ( self )
2010-08-19 10:36:29 +00:00
if self . state is runQueueSceneRun :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunInit :
2016-01-08 18:25:55 +00:00
if self . cooker . configuration . setsceneonly :
self . state = runQueueComplete
else :
2016-06-23 10:59:12 +00:00
# Just in case we didn't setscene
self . rqdata . init_progress_reporter . finish ( )
2016-01-08 18:25:55 +00:00
logger . info ( " Executing RunQueue Tasks " )
self . rqexe = RunQueueExecuteTasks ( self )
self . state = runQueueRunning
2010-01-20 18:46:02 +00:00
if self . state is runQueueRunning :
2010-09-13 15:57:13 +00:00
retval = self . rqexe . execute ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueCleanUp :
2014-12-08 16:38:14 +00:00
retval = self . rqexe . finish ( )
2010-01-20 18:46:02 +00:00
2014-07-21 08:35:53 +00:00
if ( self . state is runQueueComplete or self . state is runQueueFailed ) and self . rqexe :
2013-06-07 17:13:04 +00:00
self . teardown_workers ( )
2012-01-19 14:36:03 +00:00
if self . rqexe . stats . failed :
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and %d failed. " , self . rqexe . stats . completed + self . rqexe . stats . failed , self . rqexe . stats . skipped , self . rqexe . stats . failed )
else :
# Let's avoid the word "failed" if nothing actually did
logger . info ( " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and all succeeded. " , self . rqexe . stats . completed , self . rqexe . stats . skipped )
2010-01-20 18:46:02 +00:00
if self . state is runQueueFailed :
2016-08-16 16:47:06 +00:00
if not self . rqdata . taskData [ ' ' ] . tryaltconfigs :
raise bb . runqueue . TaskFailure ( self . rqexe . failed_tids )
for tid in self . rqexe . failed_tids :
( mc , fn , tn ) = split_tid ( tid )
self . rqdata . taskData [ mc ] . fail_fn ( fn )
2010-08-18 10:30:53 +00:00
self . rqdata . reset ( )
2010-01-20 18:46:02 +00:00
if self . state is runQueueComplete :
# All done
return False
# Loop
2010-08-18 16:37:15 +00:00
return retval
2007-04-01 15:04:49 +00:00
2012-07-25 18:43:53 +00:00
def execute_runqueue ( self ) :
2012-08-02 20:40:36 +00:00
# Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
2012-07-25 18:43:53 +00:00
try :
return self . _execute_runqueue ( )
2012-08-02 20:40:36 +00:00
except bb . runqueue . TaskFailure :
raise
2012-09-23 13:14:24 +00:00
except SystemExit :
raise
2015-06-19 11:26:54 +00:00
except bb . BBHandledException :
try :
self . teardown_workers ( )
except :
pass
self . state = runQueueComplete
raise
2016-07-15 16:44:01 +00:00
except Exception as err :
logger . exception ( " An uncaught exception occurred in runqueue " )
2013-06-07 17:11:09 +00:00
try :
2013-06-07 17:13:04 +00:00
self . teardown_workers ( )
2013-06-07 17:11:09 +00:00
except :
pass
2012-07-25 18:43:53 +00:00
self . state = runQueueComplete
raise
2010-08-18 16:13:06 +00:00
def finish_runqueue ( self , now = False ) :
2012-02-29 14:15:28 +00:00
if not self . rqexe :
2014-07-21 08:35:53 +00:00
self . state = runQueueComplete
2012-02-29 14:15:28 +00:00
return
2010-08-18 16:13:06 +00:00
if now :
self . rqexe . finish_now ( )
else :
self . rqexe . finish ( )
2007-04-01 15:04:49 +00:00
2014-03-26 13:47:29 +00:00
def dump_signatures ( self , options ) :
2010-08-31 13:49:43 +00:00
done = set ( )
bb . note ( " Reparsing files to collect dependency data " )
2016-08-16 16:47:06 +00:00
bb_cache = bb . cache . NoCache ( self . cooker . databuilder )
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
2016-08-16 16:47:06 +00:00
fn = taskfn_fromtid ( tid )
2016-06-12 22:55:48 +00:00
if fn not in done :
2016-08-16 16:47:06 +00:00
the_data = bb_cache . loadDataFull ( fn , self . cooker . collection . get_file_appends ( fn ) )
2016-06-12 22:55:48 +00:00
done . add ( fn )
2010-08-31 13:49:43 +00:00
2016-08-16 16:47:06 +00:00
bb . parse . siggen . dump_sigs ( self . rqdata . dataCaches , options )
2010-08-31 13:49:43 +00:00
return
2013-12-18 16:21:27 +00:00
def print_diffscenetasks ( self ) :
valid = [ ]
sq_hash = [ ]
sq_hashfn = [ ]
sq_fn = [ ]
sq_taskname = [ ]
sq_task = [ ]
noexec = [ ]
stamppresent = [ ]
valid_new = set ( )
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ taskfn ]
2013-12-18 16:21:27 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
2016-06-12 22:55:48 +00:00
noexec . append ( tid )
2013-12-18 16:21:27 +00:00
continue
sq_fn . append ( fn )
2016-08-16 16:47:06 +00:00
sq_hashfn . append ( self . rqdata . dataCaches [ mc ] . hashfn [ fn ] )
2016-06-12 22:55:48 +00:00
sq_hash . append ( self . rqdata . runtaskentries [ tid ] . hash )
2013-12-18 16:21:27 +00:00
sq_taskname . append ( taskname )
2016-06-12 22:55:48 +00:00
sq_task . append ( tid )
2015-04-03 22:04:10 +00:00
locs = { " sq_fn " : sq_fn , " sq_task " : sq_taskname , " sq_hash " : sq_hash , " sq_hashfn " : sq_hashfn , " d " : self . cooker . expanded_data }
2015-05-14 09:34:04 +00:00
try :
call = self . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True) "
valid = bb . utils . better_eval ( call , locs )
# Handle version with no siginfo parameter
except TypeError :
call = self . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d) "
valid = bb . utils . better_eval ( call , locs )
2013-12-18 16:21:27 +00:00
for v in valid :
valid_new . add ( sq_task [ v ] )
# Tasks which are both setscene and noexec never care about dependencies
# We therefore find tasks which are setscene and noexec and mark their
# unique dependencies as valid.
2016-06-12 22:55:48 +00:00
for tid in noexec :
if tid not in self . rqdata . runq_setscene_tids :
2013-12-18 16:21:27 +00:00
continue
2016-06-12 22:55:48 +00:00
for dep in self . rqdata . runtaskentries [ tid ] . depends :
2013-12-18 16:21:27 +00:00
hasnoexecparents = True
2016-06-12 22:55:48 +00:00
for dep2 in self . rqdata . runtaskentries [ dep ] . revdeps :
if dep2 in self . rqdata . runq_setscene_tids and dep2 in noexec :
2013-12-18 16:21:27 +00:00
continue
hasnoexecparents = False
break
if hasnoexecparents :
valid_new . add ( dep )
invalidtasks = set ( )
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
if tid not in valid_new and tid not in noexec :
invalidtasks . add ( tid )
2013-12-18 16:21:27 +00:00
found = set ( )
processed = set ( )
2016-06-12 22:55:48 +00:00
for tid in invalidtasks :
toprocess = set ( [ tid ] )
2013-12-18 16:21:27 +00:00
while toprocess :
next = set ( )
for t in toprocess :
2016-06-12 22:55:48 +00:00
for dep in self . rqdata . runtaskentries [ t ] . depends :
2013-12-18 16:21:27 +00:00
if dep in invalidtasks :
2016-06-12 22:55:48 +00:00
found . add ( tid )
2013-12-18 16:21:27 +00:00
if dep not in processed :
processed . add ( dep )
next . add ( dep )
toprocess = next
2016-06-12 22:55:48 +00:00
if tid in found :
2013-12-18 16:21:27 +00:00
toprocess = set ( )
tasklist = [ ]
2016-06-12 22:55:48 +00:00
for tid in invalidtasks . difference ( found ) :
tasklist . append ( tid )
2013-12-18 16:21:27 +00:00
if tasklist :
bb . plain ( " The differences between the current build and any cached tasks start at the following tasks: \n " + " \n " . join ( tasklist ) )
2010-08-18 16:13:06 +00:00
2013-12-19 09:40:52 +00:00
return invalidtasks . difference ( found )
def write_diffscenetasks ( self , invalidtasks ) :
# Define recursion callback
def recursecb ( key , hash1 , hash2 ) :
hashes = [ hash1 , hash2 ]
hashfiles = bb . siggen . find_siginfo ( key , None , hashes , self . cfgData )
recout = [ ]
if len ( hashfiles ) == 2 :
out2 = bb . siggen . compare_sigfiles ( hashfiles [ hash1 ] , hashfiles [ hash2 ] , recursecb )
recout . extend ( list ( ' ' + l for l in out2 ) )
else :
recout . append ( " Unable to find matching sigdata for %s with hashes %s or %s " % ( key , hash1 , hash2 ) )
return recout
2016-06-12 22:55:48 +00:00
for tid in invalidtasks :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
pn = self . rqdata . dataCaches [ mc ] . pkg_fn [ fn ]
2016-06-12 22:55:48 +00:00
h = self . rqdata . runtaskentries [ tid ] . hash
2013-12-19 09:40:52 +00:00
matches = bb . siggen . find_siginfo ( pn , taskname , [ ] , self . cfgData )
match = None
for m in matches :
if h in m :
match = m
if match is None :
bb . fatal ( " Can ' t find a task we ' re supposed to have written out? (hash: %s )? " % h )
2016-05-12 07:30:35 +00:00
matches = { k : v for k , v in iter ( matches . items ( ) ) if h not in k }
2014-01-27 21:47:27 +00:00
if matches :
2014-01-20 21:36:36 +00:00
latestmatch = sorted ( matches . keys ( ) , key = lambda f : matches [ f ] ) [ - 1 ]
prevh = __find_md5__ . search ( latestmatch ) . group ( 0 )
output = bb . siggen . compare_sigfiles ( latestmatch , match , recursecb )
bb . plain ( " \n Task %s : %s couldn ' t be used from the cache because: \n We need hash %s , closest matching task was %s \n " % ( pn , taskname , h , prevh ) + ' \n ' . join ( output ) )
2013-12-19 09:40:52 +00:00
2010-08-18 16:13:06 +00:00
class RunQueueExecute :
def __init__ ( self , rq ) :
self . rq = rq
self . cooker = rq . cooker
self . cfgData = rq . cfgData
self . rqdata = rq . rqdata
2012-03-03 10:41:41 +00:00
self . number_tasks = int ( self . cfgData . getVar ( " BB_NUMBER_THREADS " , True ) or 1 )
self . scheduler = self . cfgData . getVar ( " BB_SCHEDULER " , True ) or " speed "
2007-04-01 15:04:49 +00:00
2016-06-12 22:55:08 +00:00
self . runq_buildable = set ( )
self . runq_running = set ( )
self . runq_complete = set ( )
2013-06-07 17:11:09 +00:00
2011-06-28 09:05:19 +00:00
self . build_stamps = { }
2013-11-25 23:12:27 +00:00
self . build_stamps2 = [ ]
2016-08-16 16:47:06 +00:00
self . failed_tids = [ ]
2007-04-01 15:04:49 +00:00
2012-05-10 08:21:41 +00:00
self . stampcache = { }
2016-08-15 16:58:39 +00:00
for mc in rq . worker :
rq . worker [ mc ] . pipe . setrunqueueexec ( self )
for mc in rq . fakeworker :
rq . fakeworker [ mc ] . pipe . setrunqueueexec ( self )
2013-06-07 17:11:09 +00:00
2015-06-19 11:27:46 +00:00
if self . number_tasks < = 0 :
bb . fatal ( " Invalid BB_NUMBER_THREADS %s " % self . number_tasks )
2013-06-07 17:11:09 +00:00
def runqueue_process_waitpid ( self , task , status ) :
2012-05-09 23:32:20 +00:00
# self.build_stamps[pid] may not exist when use shared work directory.
2013-06-07 17:11:09 +00:00
if task in self . build_stamps :
2013-11-25 23:12:27 +00:00
self . build_stamps2 . remove ( self . build_stamps [ task ] )
2013-06-07 17:11:09 +00:00
del self . build_stamps [ task ]
2012-05-09 23:32:20 +00:00
if status != 0 :
self . task_fail ( task , status )
2010-12-08 00:08:04 +00:00
else :
self . task_complete ( task )
2011-02-15 23:20:20 +00:00
return True
2010-08-18 16:13:06 +00:00
def finish_now ( self ) :
2016-08-15 16:58:39 +00:00
for mc in self . rq . worker :
try :
self . rq . worker [ mc ] . process . stdin . write ( b " <finishnow></finishnow> " )
self . rq . worker [ mc ] . process . stdin . flush ( )
except IOError :
# worker must have died?
pass
for mc in self . rq . fakeworker :
2014-03-09 17:01:19 +00:00
try :
2016-08-15 16:58:39 +00:00
self . rq . fakeworker [ mc ] . process . stdin . write ( b " <finishnow></finishnow> " )
self . rq . fakeworker [ mc ] . process . stdin . flush ( )
2014-03-09 17:01:19 +00:00
except IOError :
# worker must have died?
pass
2016-08-15 16:58:39 +00:00
2016-08-16 16:47:06 +00:00
if len ( self . failed_tids ) != 0 :
2012-02-25 16:02:29 +00:00
self . rq . state = runQueueFailed
return
self . rq . state = runQueueComplete
return
2010-08-18 16:13:06 +00:00
def finish ( self ) :
self . rq . state = runQueueCleanUp
2010-08-19 10:36:29 +00:00
if self . stats . active > 0 :
bb . event . fire ( runQueueExitWait ( self . stats . active ) , self . cfgData )
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2014-12-08 16:38:14 +00:00
return self . rq . active_fds ( )
2010-08-18 16:13:06 +00:00
2016-08-16 16:47:06 +00:00
if len ( self . failed_tids ) != 0 :
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueFailed
2014-12-08 16:38:14 +00:00
return True
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueComplete
2014-12-08 16:38:14 +00:00
return True
2010-08-18 16:13:06 +00:00
2012-11-16 15:30:52 +00:00
def check_dependencies ( self , task , taskdeps , setscene = False ) :
if not self . rq . depvalidate :
return False
taskdata = { }
taskdeps . add ( task )
for dep in taskdeps :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( dep )
pn = self . rqdata . dataCaches [ mc ] . pkg_fn [ fn ]
2012-11-16 15:30:52 +00:00
taskdata [ dep ] = [ pn , taskname , fn ]
call = self . rq . depvalidate + " (task, taskdata, notneeded, d) "
2015-04-03 22:04:10 +00:00
locs = { " task " : task , " taskdata " : taskdata , " notneeded " : self . scenequeue_notneeded , " d " : self . cooker . expanded_data }
2012-11-16 15:30:52 +00:00
valid = bb . utils . better_eval ( call , locs )
return valid
2010-08-24 23:58:23 +00:00
class RunQueueExecuteDummy ( RunQueueExecute ) :
def __init__ ( self , rq ) :
self . rq = rq
self . stats = RunQueueStats ( 0 )
2011-01-01 23:55:54 +00:00
2010-08-24 23:58:23 +00:00
def finish ( self ) :
self . rq . state = runQueueComplete
2011-01-01 23:55:54 +00:00
return
2010-08-24 23:58:23 +00:00
2010-08-18 16:13:06 +00:00
class RunQueueExecuteTasks ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
2016-06-12 22:55:48 +00:00
self . stats = RunQueueStats ( len ( self . rqdata . runtaskentries ) )
2010-08-18 16:13:06 +00:00
2012-07-25 19:03:44 +00:00
self . stampcache = { }
2013-11-20 10:46:28 +00:00
initial_covered = self . rq . scenequeue_covered . copy ( )
2007-04-01 15:04:49 +00:00
# Mark initial buildable tasks
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
if len ( self . rqdata . runtaskentries [ tid ] . depends ) == 0 :
self . runq_buildable . add ( tid )
if len ( self . rqdata . runtaskentries [ tid ] . revdeps ) > 0 and self . rqdata . runtaskentries [ tid ] . revdeps . issubset ( self . rq . scenequeue_covered ) :
self . rq . scenequeue_covered . add ( tid )
2010-08-19 10:36:29 +00:00
found = True
while found :
found = False
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
if tid in self . rq . scenequeue_covered :
2010-08-19 10:36:29 +00:00
continue
2016-06-12 22:55:48 +00:00
logger . debug ( 1 , ' Considering %s : %s ' % ( tid , str ( self . rqdata . runtaskentries [ tid ] . revdeps ) ) )
2011-11-21 14:40:37 +00:00
2016-06-12 22:55:48 +00:00
if len ( self . rqdata . runtaskentries [ tid ] . revdeps ) > 0 and self . rqdata . runtaskentries [ tid ] . revdeps . issubset ( self . rq . scenequeue_covered ) :
2012-11-16 15:30:52 +00:00
found = True
2016-06-12 22:55:48 +00:00
self . rq . scenequeue_covered . add ( tid )
2010-08-19 10:36:29 +00:00
2011-11-21 14:34:23 +00:00
logger . debug ( 1 , ' Skip list (pre setsceneverify) %s ' , sorted ( self . rq . scenequeue_covered ) )
# Allow the metadata to elect for setscene tasks to run anyway
2011-03-25 13:22:01 +00:00
covered_remove = set ( )
2011-11-21 14:34:23 +00:00
if self . rq . setsceneverify :
2012-07-25 18:58:06 +00:00
invalidtasks = [ ]
2016-06-12 22:55:48 +00:00
tasknames = { }
fns = { }
for tid in self . rqdata . runtaskentries :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ taskfn ]
fns [ tid ] = taskfn
2016-06-12 22:55:48 +00:00
tasknames [ tid ] = taskname
2012-07-25 18:58:06 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
continue
2016-06-12 22:55:48 +00:00
if self . rq . check_stamp_task ( tid , taskname + " _setscene " , cache = self . stampcache ) :
logger . debug ( 2 , ' Setscene stamp current for task %s ' , tid )
2012-07-25 18:58:06 +00:00
continue
2016-06-12 22:55:48 +00:00
if self . rq . check_stamp_task ( tid , taskname , recurse = True , cache = self . stampcache ) :
logger . debug ( 2 , ' Normal stamp current for task %s ' , tid )
2012-07-25 18:58:06 +00:00
continue
2016-06-12 22:55:48 +00:00
invalidtasks . append ( tid )
2012-07-25 18:58:06 +00:00
2016-06-12 22:55:48 +00:00
call = self . rq . setsceneverify + " (covered, tasknames, fns, d, invalidtasks=invalidtasks) "
locs = { " covered " : self . rq . scenequeue_covered , " tasknames " : tasknames , " fns " : fns , " d " : self . cooker . expanded_data , " invalidtasks " : invalidtasks }
covered_remove = bb . utils . better_eval ( call , locs )
2011-03-25 13:22:01 +00:00
2016-06-12 22:55:48 +00:00
def removecoveredtask ( tid ) :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
taskname = taskname + ' _setscene '
taskfn = taskfn_fromtid ( tid )
bb . build . del_stamp ( taskname , self . rqdata . dataCaches [ mc ] , taskfn )
2016-06-12 22:55:48 +00:00
self . rq . scenequeue_covered . remove ( tid )
2011-03-25 13:22:01 +00:00
2013-11-20 10:46:28 +00:00
toremove = covered_remove
for task in toremove :
logger . debug ( 1 , ' Not skipping task %s due to setsceneverify ' , task )
while toremove :
covered_remove = [ ]
for task in toremove :
removecoveredtask ( task )
2016-06-12 22:55:48 +00:00
for deptask in self . rqdata . runtaskentries [ task ] . depends :
2013-11-20 10:46:28 +00:00
if deptask not in self . rq . scenequeue_covered :
continue
if deptask in toremove or deptask in covered_remove or deptask in initial_covered :
continue
logger . debug ( 1 , ' Task %s depends on task %s so not skipping ' % ( task , deptask ) )
covered_remove . append ( deptask )
toremove = covered_remove
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , ' Full skip list %s ' , self . rq . scenequeue_covered )
2010-08-19 10:36:29 +00:00
2016-08-16 16:47:06 +00:00
for mc in self . rqdata . dataCaches :
target_pairs = [ ]
for tid in self . rqdata . target_tids :
( tidmc , fn , taskname ) = split_tid ( tid )
if tidmc == mc :
target_pairs . append ( ( fn , taskname ) )
event . fire ( bb . event . StampUpdate ( target_pairs , self . rqdata . dataCaches [ mc ] . stamp ) , self . cfgData )
2010-08-18 10:30:53 +00:00
2011-01-10 12:48:49 +00:00
schedulers = self . get_schedulers ( )
for scheduler in schedulers :
2010-08-18 10:30:53 +00:00
if self . scheduler == scheduler . name :
self . sched = scheduler ( self , self . rqdata )
2011-01-01 23:55:54 +00:00
logger . debug ( 1 , " Using runqueue scheduler ' %s ' " , scheduler . name )
2010-08-18 10:30:53 +00:00
break
else :
2011-01-10 12:48:49 +00:00
bb . fatal ( " Invalid scheduler ' %s ' . Available schedulers: %s " %
( self . scheduler , " , " . join ( obj . name for obj in schedulers ) ) )
def get_schedulers ( self ) :
schedulers = set ( obj for obj in globals ( ) . values ( )
if type ( obj ) is type and
issubclass ( obj , RunQueueScheduler ) )
2011-11-25 14:57:53 +00:00
user_schedulers = self . cfgData . getVar ( " BB_SCHEDULERS " , True )
2011-01-10 12:48:49 +00:00
if user_schedulers :
for sched in user_schedulers . split ( ) :
if not " . " in sched :
bb . note ( " Ignoring scheduler ' %s ' from BB_SCHEDULERS: not an import " % sched )
continue
modname , name = sched . rsplit ( " . " , 1 )
try :
module = __import__ ( modname , fromlist = ( name , ) )
2011-06-14 23:44:58 +00:00
except ImportError as exc :
2011-01-10 12:48:49 +00:00
logger . critical ( " Unable to import scheduler ' %s ' from ' %s ' : %s " % ( name , modname , exc ) )
raise SystemExit ( 1 )
else :
schedulers . add ( getattr ( module , name ) )
return schedulers
2010-08-18 16:13:06 +00:00
2013-11-25 23:12:27 +00:00
def setbuildable ( self , task ) :
2016-06-12 22:55:08 +00:00
self . runq_buildable . add ( task )
2013-11-25 23:12:27 +00:00
self . sched . newbuilable ( task )
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2007-04-01 15:04:49 +00:00
"""
Mark a task as completed
2010-03-24 23:56:12 +00:00
Look at the reverse dependencies and mark any task with
2007-04-01 15:04:49 +00:00
completed dependencies as buildable
"""
2016-06-12 22:55:08 +00:00
self . runq_complete . add ( task )
2016-06-12 22:55:48 +00:00
for revdep in self . rqdata . runtaskentries [ task ] . revdeps :
2016-06-12 22:55:08 +00:00
if revdep in self . runq_running :
2007-04-01 15:04:49 +00:00
continue
2016-06-12 22:55:08 +00:00
if revdep in self . runq_buildable :
2007-04-01 15:04:49 +00:00
continue
alldeps = 1
2016-06-12 22:55:48 +00:00
for dep in self . rqdata . runtaskentries [ revdep ] . depends :
2016-06-12 22:55:08 +00:00
if dep not in self . runq_complete :
2007-04-01 15:04:49 +00:00
alldeps = 0
if alldeps == 1 :
2013-11-25 23:12:27 +00:00
self . setbuildable ( revdep )
2016-06-12 22:55:48 +00:00
fn = fn_from_tid ( revdep )
taskname = taskname_from_tid ( revdep )
logger . debug ( 1 , " Marking task %s as buildable " , revdep )
2007-04-01 15:04:49 +00:00
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
bb . event . fire ( runQueueTaskCompleted ( task , self . stats , self . rq ) , self . cfgData )
self . task_completeoutright ( task )
2010-01-20 18:46:02 +00:00
def task_fail ( self , task , exitcode ) :
"""
Called when a task has failed
Updates the state engine with the failure
"""
self . stats . taskFailed ( )
2016-08-16 16:47:06 +00:00
self . failed_tids . append ( task )
2010-12-06 21:58:55 +00:00
bb . event . fire ( runQueueTaskFailed ( task , self . stats , exitcode , self . rq ) , self . cfgData )
2016-08-16 16:47:06 +00:00
if self . rqdata . taskData [ ' ' ] . abort :
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2010-01-20 18:46:02 +00:00
2013-09-16 12:46:01 +00:00
def task_skip ( self , task , reason ) :
2016-06-12 22:55:08 +00:00
self . runq_running . add ( task )
2013-11-25 23:12:27 +00:00
self . setbuildable ( task )
2013-09-16 12:46:01 +00:00
bb . event . fire ( runQueueTaskSkipped ( task , self . stats , self . rq , reason ) , self . cfgData )
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-18 16:21:40 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
2010-08-18 16:13:06 +00:00
def execute ( self ) :
2006-11-16 15:02:15 +00:00
"""
2010-08-18 10:30:53 +00:00
Run the tasks in a queue prepared by rqdata . prepare ( )
2006-11-16 15:02:15 +00:00
"""
2016-06-23 10:59:11 +00:00
if self . rqdata . setscenewhitelist :
# Check tasks that are going to run against the whitelist
def check_norun_task ( tid , showerror = False ) :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
2016-06-23 10:59:11 +00:00
# Ignore covered tasks
if tid in self . rq . scenequeue_covered :
return False
# Ignore stamped tasks
if self . rq . check_stamp_task ( tid , taskname , cache = self . stampcache ) :
return False
# Ignore noexec tasks
2016-08-16 16:47:06 +00:00
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ fn ]
2016-06-23 10:59:11 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
return False
2016-06-12 22:55:48 +00:00
2016-08-16 16:47:06 +00:00
pn = self . rqdata . dataCaches [ mc ] . pkg_fn [ fn ]
2016-06-23 10:59:11 +00:00
if not check_setscene_enforce_whitelist ( pn , taskname , self . rqdata . setscenewhitelist ) :
if showerror :
if tid in self . rqdata . runq_setscene_tids :
logger . error ( ' Task %s . %s attempted to execute unexpectedly and should have been setscened ' % ( pn , taskname ) )
else :
logger . error ( ' Task %s . %s attempted to execute unexpectedly ' % ( pn , taskname ) )
return True
return False
# Look to see if any tasks that we think shouldn't run are going to
unexpected = False
for tid in self . rqdata . runtaskentries :
if check_norun_task ( tid ) :
unexpected = True
break
if unexpected :
# Run through the tasks in the rough order they'd have executed and print errors
# (since the order can be useful - usually missing sstate for the last few tasks
# is the cause of the problem)
task = self . sched . next ( )
while task is not None :
check_norun_task ( task , showerror = True )
self . task_skip ( task , ' Setscene enforcement check ' )
task = self . sched . next ( )
self . rq . state = runQueueCleanUp
return True
self . rq . read_workers ( )
2013-06-07 17:11:09 +00:00
2010-01-20 18:46:02 +00:00
if self . stats . total == 0 :
2006-11-16 15:02:15 +00:00
# nothing to do
2010-08-18 16:13:06 +00:00
self . rq . state = runQueueCleanUp
2008-03-03 22:01:45 +00:00
2011-01-10 12:48:49 +00:00
task = self . sched . next ( )
if task is not None :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( task )
taskfn = taskfn_fromtid ( task )
2011-11-23 12:38:44 +00:00
if task in self . rq . scenequeue_covered :
2016-06-12 22:55:48 +00:00
logger . debug ( 2 , " Setscene covered task %s " , task )
2013-09-16 12:46:01 +00:00
self . task_skip ( task , " covered " )
2011-11-23 12:38:44 +00:00
return True
2012-05-10 08:21:41 +00:00
if self . rq . check_stamp_task ( task , taskname , cache = self . stampcache ) :
2016-06-12 22:55:48 +00:00
logger . debug ( 2 , " Stamp current task %s " , task )
2013-09-16 12:46:01 +00:00
self . task_skip ( task , " existing " )
2010-09-13 20:00:10 +00:00
return True
2010-01-20 18:46:02 +00:00
2016-08-16 16:47:06 +00:00
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ taskfn ]
2010-11-06 14:33:05 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
2011-01-01 23:55:54 +00:00
startevent = runQueueTaskStarted ( task , self . stats , self . rq ,
noexec = True )
2011-01-01 14:36:38 +00:00
bb . event . fire ( startevent , self . cfgData )
2016-06-12 22:55:08 +00:00
self . runq_running . add ( task )
2010-11-07 20:33:12 +00:00
self . stats . taskActive ( )
2014-04-13 10:45:58 +00:00
if not self . cooker . configuration . dry_run :
2016-08-16 16:47:06 +00:00
bb . build . make_stamp ( taskname , self . rqdata . dataCaches [ mc ] , taskfn )
2010-11-06 14:33:05 +00:00
self . task_complete ( task )
return True
2011-01-01 14:36:38 +00:00
else :
startevent = runQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2010-11-06 14:33:05 +00:00
2013-11-25 23:18:22 +00:00
taskdepdata = self . build_taskdepdata ( task )
2016-08-16 16:47:06 +00:00
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ taskfn ]
2013-11-22 13:31:47 +00:00
if ' fakeroot ' in taskdep and taskname in taskdep [ ' fakeroot ' ] and not self . cooker . configuration . dry_run :
2013-06-07 17:13:04 +00:00
if not self . rq . fakeworker :
2015-06-19 11:25:45 +00:00
try :
self . rq . start_fakeworker ( self )
except OSError as exc :
2016-06-12 22:55:48 +00:00
logger . critical ( " Failed to spawn fakeroot worker to run %s : %s " % ( task , str ( exc ) ) )
2015-06-19 11:25:45 +00:00
self . rq . state = runQueueFailed
return True
2016-08-16 16:47:06 +00:00
self . rq . fakeworker [ mc ] . process . stdin . write ( b " <runtask> " + pickle . dumps ( ( taskfn , task , taskname , False , self . cooker . collection . get_file_appends ( fn ) , taskdepdata ) ) + b " </runtask> " )
self . rq . fakeworker [ mc ] . process . stdin . flush ( )
2013-06-07 17:13:04 +00:00
else :
2016-08-16 16:47:06 +00:00
self . rq . worker [ mc ] . process . stdin . write ( b " <runtask> " + pickle . dumps ( ( taskfn , task , taskname , False , self . cooker . collection . get_file_appends ( taskfn ) , taskdepdata ) ) + b " </runtask> " )
self . rq . worker [ mc ] . process . stdin . flush ( )
2010-01-20 18:46:02 +00:00
2016-08-31 10:37:53 +00:00
self . build_stamps [ task ] = bb . build . stampfile ( taskname , self . rqdata . dataCaches [ mc ] , taskfn , noextra = True )
2016-06-12 22:55:48 +00:00
self . build_stamps2 . append ( self . build_stamps [ task ] )
2016-06-12 22:55:08 +00:00
self . runq_running . add ( task )
2010-09-13 20:00:10 +00:00
self . stats . taskActive ( )
2011-02-15 23:20:54 +00:00
if self . stats . active < self . number_tasks :
return True
2010-01-20 18:46:02 +00:00
2010-09-13 20:00:10 +00:00
if self . stats . active > 0 :
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-08-31 22:40:55 +00:00
return self . rq . active_fds ( )
2013-06-07 17:11:09 +00:00
2016-08-16 16:47:06 +00:00
if len ( self . failed_tids ) != 0 :
2010-09-13 20:00:10 +00:00
self . rq . state = runQueueFailed
2010-09-13 15:57:13 +00:00
return True
2006-11-16 15:02:15 +00:00
2010-09-13 20:00:10 +00:00
# Sanity Checks
2016-06-12 22:55:48 +00:00
for task in self . rqdata . runtaskentries :
2016-06-12 22:55:08 +00:00
if task not in self . runq_buildable :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never buildable! " , task )
2016-06-12 22:55:08 +00:00
if task not in self . runq_running :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never ran! " , task )
2016-06-12 22:55:08 +00:00
if task not in self . runq_complete :
2011-01-01 23:55:54 +00:00
logger . error ( " Task %s never completed! " , task )
2010-09-13 20:00:10 +00:00
self . rq . state = runQueueComplete
2013-06-07 17:11:09 +00:00
2010-09-13 20:00:10 +00:00
return True
2013-11-25 23:18:22 +00:00
def build_taskdepdata ( self , task ) :
taskdepdata = { }
2016-06-12 22:55:48 +00:00
next = self . rqdata . runtaskentries [ task ] . depends
2013-11-25 23:18:22 +00:00
next . add ( task )
while next :
additional = [ ]
for revdep in next :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( revdep )
taskfn = taskfn_fromtid ( revdep )
pn = self . rqdata . dataCaches [ mc ] . pkg_fn [ taskfn ]
2016-06-12 22:55:48 +00:00
deps = self . rqdata . runtaskentries [ revdep ] . depends
2016-08-16 16:47:06 +00:00
provides = self . rqdata . dataCaches [ mc ] . fn_provides [ taskfn ]
2015-08-03 08:45:05 +00:00
taskdepdata [ revdep ] = [ pn , taskname , fn , deps , provides ]
2013-11-25 23:18:22 +00:00
for revdep2 in deps :
if revdep2 not in taskdepdata :
additional . append ( revdep2 )
next = additional
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
2010-08-19 10:36:29 +00:00
class RunQueueExecuteScenequeue ( RunQueueExecute ) :
def __init__ ( self , rq ) :
RunQueueExecute . __init__ ( self , rq )
self . scenequeue_covered = set ( )
self . scenequeue_notcovered = set ( )
2012-11-16 15:30:52 +00:00
self . scenequeue_notneeded = set ( )
2010-08-19 10:36:29 +00:00
# If we don't have any setscene functions, skip this step
2016-06-12 22:55:48 +00:00
if len ( self . rqdata . runq_setscene_tids ) == 0 :
2010-08-19 10:36:29 +00:00
rq . scenequeue_covered = set ( )
rq . state = runQueueRunInit
return
2016-06-12 22:55:48 +00:00
self . stats = RunQueueStats ( len ( self . rqdata . runq_setscene_tids ) )
2010-08-19 10:36:29 +00:00
2016-06-12 22:55:48 +00:00
sq_revdeps = { }
sq_revdeps_new = { }
sq_revdeps_squash = { }
2014-02-10 22:50:28 +00:00
self . sq_harddeps = { }
2010-08-19 10:36:29 +00:00
# We need to construct a dependency graph for the setscene functions. Intermediate
# dependencies between the setscene tasks only complicate the code. This code
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
# only containing the setscene functions.
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2012-11-16 15:30:52 +00:00
# First process the chains up to the first setscene task.
endpoints = { }
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
sq_revdeps [ tid ] = copy . copy ( self . rqdata . runtaskentries [ tid ] . revdeps )
sq_revdeps_new [ tid ] = set ( )
if ( len ( sq_revdeps [ tid ] ) == 0 ) and tid not in self . rqdata . runq_setscene_tids :
#bb.warn("Added endpoint %s" % (tid))
endpoints [ tid ] = set ( )
2010-08-19 10:36:29 +00:00
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2012-11-16 15:30:52 +00:00
# Secondly process the chains between setscene tasks.
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runq_setscene_tids :
#bb.warn("Added endpoint 2 %s" % (tid))
for dep in self . rqdata . runtaskentries [ tid ] . depends :
2012-01-26 12:53:21 +00:00
if dep not in endpoints :
endpoints [ dep ] = set ( )
2016-06-12 22:55:48 +00:00
#bb.warn(" Added endpoint 3 %s" % (dep))
endpoints [ dep ] . add ( tid )
2010-08-19 10:36:29 +00:00
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2010-08-19 10:36:29 +00:00
def process_endpoints ( endpoints ) :
newendpoints = { }
for point , task in endpoints . items ( ) :
tasks = set ( )
if task :
2012-01-26 12:53:21 +00:00
tasks | = task
2010-08-19 10:36:29 +00:00
if sq_revdeps_new [ point ] :
tasks | = sq_revdeps_new [ point ]
sq_revdeps_new [ point ] = set ( )
2016-06-12 22:55:48 +00:00
if point in self . rqdata . runq_setscene_tids :
2012-11-16 15:30:52 +00:00
sq_revdeps_new [ point ] = tasks
2016-01-10 18:01:01 +00:00
tasks = set ( )
2016-06-12 22:55:48 +00:00
for dep in self . rqdata . runtaskentries [ point ] . depends :
2010-08-19 10:36:29 +00:00
if point in sq_revdeps [ dep ] :
sq_revdeps [ dep ] . remove ( point )
if tasks :
sq_revdeps_new [ dep ] | = tasks
2016-06-12 22:55:48 +00:00
if ( len ( sq_revdeps [ dep ] ) == 0 or len ( sq_revdeps_new [ dep ] ) != 0 ) and dep not in self . rqdata . runq_setscene_tids :
2010-08-19 10:36:29 +00:00
newendpoints [ dep ] = task
if len ( newendpoints ) != 0 :
process_endpoints ( newendpoints )
process_endpoints ( endpoints )
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2014-08-12 08:53:16 +00:00
# Build a list of setscene tasks which are "unskippable"
2012-11-16 15:30:52 +00:00
# These are direct endpoints referenced by the build
endpoints2 = { }
2016-06-12 22:55:48 +00:00
sq_revdeps2 = { }
sq_revdeps_new2 = { }
2012-11-16 15:30:52 +00:00
def process_endpoints2 ( endpoints ) :
newendpoints = { }
for point , task in endpoints . items ( ) :
tasks = set ( [ point ] )
if task :
tasks | = task
if sq_revdeps_new2 [ point ] :
tasks | = sq_revdeps_new2 [ point ]
sq_revdeps_new2 [ point ] = set ( )
2016-06-12 22:55:48 +00:00
if point in self . rqdata . runq_setscene_tids :
2012-11-16 15:30:52 +00:00
sq_revdeps_new2 [ point ] = tasks
2016-06-12 22:55:48 +00:00
for dep in self . rqdata . runtaskentries [ point ] . depends :
2012-11-16 15:30:52 +00:00
if point in sq_revdeps2 [ dep ] :
sq_revdeps2 [ dep ] . remove ( point )
if tasks :
sq_revdeps_new2 [ dep ] | = tasks
2016-06-12 22:55:48 +00:00
if ( len ( sq_revdeps2 [ dep ] ) == 0 or len ( sq_revdeps_new2 [ dep ] ) != 0 ) and dep not in self . rqdata . runq_setscene_tids :
2012-11-16 15:30:52 +00:00
newendpoints [ dep ] = tasks
if len ( newendpoints ) != 0 :
process_endpoints2 ( newendpoints )
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runtaskentries :
sq_revdeps2 [ tid ] = copy . copy ( self . rqdata . runtaskentries [ tid ] . revdeps )
sq_revdeps_new2 [ tid ] = set ( )
if ( len ( sq_revdeps2 [ tid ] ) == 0 ) and tid not in self . rqdata . runq_setscene_tids :
endpoints2 [ tid ] = set ( )
2012-11-16 15:30:52 +00:00
process_endpoints2 ( endpoints2 )
self . unskippable = [ ]
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runq_setscene_tids :
if sq_revdeps_new2 [ tid ] :
self . unskippable . append ( tid )
2012-11-16 15:30:52 +00:00
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( len ( self . rqdata . runtaskentries ) )
for taskcounter , tid in enumerate ( self . rqdata . runtaskentries ) :
2016-06-12 22:55:48 +00:00
if tid in self . rqdata . runq_setscene_tids :
2010-08-19 10:36:29 +00:00
deps = set ( )
2016-06-12 22:55:48 +00:00
for dep in sq_revdeps_new [ tid ] :
deps . add ( dep )
sq_revdeps_squash [ tid ] = deps
elif len ( sq_revdeps_new [ tid ] ) != 0 :
2011-08-15 16:29:09 +00:00
bb . msg . fatal ( " RunQueue " , " Something went badly wrong during scenequeue generation, aborting. Please report this problem. " )
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . update ( taskcounter )
self . rqdata . init_progress_reporter . next_stage ( )
2010-08-19 10:36:29 +00:00
2012-01-26 20:09:08 +00:00
# Resolve setscene inter-task dependencies
# e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
# Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2016-06-12 22:55:48 +00:00
for tid in self . rqdata . runq_setscene_tids :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
realtid = fn + " : " + taskname + " _setscene "
idepends = self . rqdata . taskData [ mc ] . taskentries [ realtid ] . idepends
2016-06-12 22:55:48 +00:00
for ( depname , idependtask ) in idepends :
2016-08-16 16:47:06 +00:00
if depname not in self . rqdata . taskData [ mc ] . build_targets :
2012-01-26 20:09:08 +00:00
continue
2016-08-16 16:47:06 +00:00
depfn = self . rqdata . taskData [ mc ] . build_targets [ depname ] [ 0 ]
2016-06-12 22:55:48 +00:00
if depfn is None :
2012-01-26 20:09:08 +00:00
continue
2016-06-12 22:55:48 +00:00
deptid = depfn + " : " + idependtask . replace ( " _setscene " , " " )
if deptid not in self . rqdata . runtaskentries :
bb . msg . fatal ( " RunQueue " , " Task %s depends upon non-existent task %s : %s " % ( realtid , depfn , idependtask ) )
2014-02-10 22:50:28 +00:00
2016-06-12 22:55:48 +00:00
if not deptid in self . sq_harddeps :
self . sq_harddeps [ deptid ] = set ( )
self . sq_harddeps [ deptid ] . add ( tid )
2012-01-26 20:09:08 +00:00
2016-06-12 22:55:48 +00:00
sq_revdeps_squash [ tid ] . add ( deptid )
2012-01-26 20:09:08 +00:00
# Have to zero this to avoid circular dependencies
2016-06-12 22:55:48 +00:00
sq_revdeps_squash [ deptid ] = set ( )
2012-01-26 20:09:08 +00:00
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2014-03-31 22:07:20 +00:00
for task in self . sq_harddeps :
for dep in self . sq_harddeps [ task ] :
sq_revdeps_squash [ dep ] . add ( task )
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2016-06-12 22:55:48 +00:00
#for tid in sq_revdeps_squash:
# for dep in sq_revdeps_squash[tid]:
# data = data + "\n %s" % dep
# bb.warn("Task %s_setscene: is %s " % (tid, data
2010-08-19 10:36:29 +00:00
2016-06-12 22:55:48 +00:00
self . sq_deps = { }
2010-08-19 10:36:29 +00:00
self . sq_revdeps = sq_revdeps_squash
self . sq_revdeps2 = copy . deepcopy ( self . sq_revdeps )
2016-06-12 22:55:48 +00:00
for tid in self . sq_revdeps :
self . sq_deps [ tid ] = set ( )
for tid in self . sq_revdeps :
for dep in self . sq_revdeps [ tid ] :
self . sq_deps [ dep ] . add ( tid )
2010-08-19 10:36:29 +00:00
2016-06-23 10:59:12 +00:00
self . rqdata . init_progress_reporter . next_stage ( )
2016-06-12 22:55:48 +00:00
for tid in self . sq_revdeps :
if len ( self . sq_revdeps [ tid ] ) == 0 :
self . runq_buildable . add ( tid )
2010-08-19 10:36:29 +00:00
2016-08-11 03:36:57 +00:00
self . rqdata . init_progress_reporter . finish ( )
2014-04-01 08:16:30 +00:00
self . outrightfail = [ ]
2010-10-05 21:21:34 +00:00
if self . rq . hashvalidate :
sq_hash = [ ]
sq_hashfn = [ ]
sq_fn = [ ]
2010-12-14 11:21:24 +00:00
sq_taskname = [ ]
2010-10-05 21:21:34 +00:00
sq_task = [ ]
2010-11-28 15:59:16 +00:00
noexec = [ ]
2011-05-27 14:03:51 +00:00
stamppresent = [ ]
2016-06-12 22:55:48 +00:00
for tid in self . sq_revdeps :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( tid )
taskfn = taskfn_fromtid ( tid )
2016-06-12 22:55:48 +00:00
2016-08-16 16:47:06 +00:00
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ fn ]
2011-05-27 14:03:51 +00:00
2010-11-28 15:59:16 +00:00
if ' noexec ' in taskdep and taskname in taskdep [ ' noexec ' ] :
2016-06-12 22:55:48 +00:00
noexec . append ( tid )
self . task_skip ( tid )
2016-08-16 16:47:06 +00:00
bb . build . make_stamp ( taskname + " _setscene " , self . rqdata . dataCaches [ mc ] , taskfn )
2010-11-28 15:59:16 +00:00
continue
2011-05-27 14:03:51 +00:00
2016-06-12 22:55:48 +00:00
if self . rq . check_stamp_task ( tid , taskname + " _setscene " , cache = self . stampcache ) :
logger . debug ( 2 , ' Setscene stamp current for task %s ' , tid )
stamppresent . append ( tid )
self . task_skip ( tid )
2011-05-27 14:03:51 +00:00
continue
2016-06-12 22:55:48 +00:00
if self . rq . check_stamp_task ( tid , taskname , recurse = True , cache = self . stampcache ) :
logger . debug ( 2 , ' Normal stamp current for task %s ' , tid )
stamppresent . append ( tid )
self . task_skip ( tid )
2012-07-25 18:48:41 +00:00
continue
2010-10-05 21:21:34 +00:00
sq_fn . append ( fn )
2016-08-16 16:47:06 +00:00
sq_hashfn . append ( self . rqdata . dataCaches [ mc ] . hashfn [ fn ] )
2016-06-12 22:55:48 +00:00
sq_hash . append ( self . rqdata . runtaskentries [ tid ] . hash )
2010-12-14 11:21:24 +00:00
sq_taskname . append ( taskname )
2016-06-12 22:55:48 +00:00
sq_task . append ( tid )
2010-10-05 21:21:34 +00:00
call = self . rq . hashvalidate + " (sq_fn, sq_task, sq_hash, sq_hashfn, d) "
2015-04-03 22:04:10 +00:00
locs = { " sq_fn " : sq_fn , " sq_task " : sq_taskname , " sq_hash " : sq_hash , " sq_hashfn " : sq_hashfn , " d " : self . cooker . expanded_data }
2010-10-05 21:21:34 +00:00
valid = bb . utils . better_eval ( call , locs )
2010-12-14 11:21:24 +00:00
2011-05-27 14:03:51 +00:00
valid_new = stamppresent
2010-12-14 11:21:24 +00:00
for v in valid :
valid_new . append ( sq_task [ v ] )
2016-06-12 22:55:48 +00:00
for tid in self . sq_revdeps :
if tid not in valid_new and tid not in noexec :
logger . debug ( 2 , ' No package found, so skipping setscene task %s ' , tid )
self . outrightfail . append ( tid )
2010-10-05 21:21:34 +00:00
2011-01-10 12:48:49 +00:00
logger . info ( ' Executing SetScene Tasks ' )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueSceneRun
2013-06-14 16:03:45 +00:00
def scenequeue_updatecounters ( self , task , fail = False ) :
2010-08-19 10:36:29 +00:00
for dep in self . sq_deps [ task ] :
2014-02-10 22:50:28 +00:00
if fail and task in self . sq_harddeps and dep in self . sq_harddeps [ task ] :
2016-06-12 22:55:48 +00:00
logger . debug ( 2 , " %s was unavailable and is a hard dependency of %s so skipping " % ( task , dep ) )
2014-08-27 12:07:24 +00:00
self . scenequeue_updatecounters ( dep , fail )
continue
if task not in self . sq_revdeps2 [ dep ] :
# May already have been removed by the fail case above
2013-06-14 16:03:45 +00:00
continue
2010-08-19 10:36:29 +00:00
self . sq_revdeps2 [ dep ] . remove ( task )
if len ( self . sq_revdeps2 [ dep ] ) == 0 :
2016-06-12 22:55:08 +00:00
self . runq_buildable . add ( dep )
2010-08-19 10:36:29 +00:00
2010-08-19 21:35:33 +00:00
def task_completeoutright ( self , task ) :
2010-08-19 10:36:29 +00:00
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
2016-06-12 22:55:48 +00:00
logger . debug ( 1 , ' Found task %s which could be accelerated ' , task )
2010-08-19 10:36:29 +00:00
self . scenequeue_covered . add ( task )
self . scenequeue_updatecounters ( task )
2016-06-23 10:59:11 +00:00
def check_taskfail ( self , task ) :
if self . rqdata . setscenewhitelist :
realtask = task . split ( ' _setscene ' ) [ 0 ]
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( realtask )
pn = self . rqdata . dataCaches [ mc ] . pkg_fn [ fn ]
2016-06-23 10:59:11 +00:00
if not check_setscene_enforce_whitelist ( pn , taskname , self . rqdata . setscenewhitelist ) :
logger . error ( ' Task %s . %s failed ' % ( pn , taskname + " _setscene " ) )
self . rq . state = runQueueCleanUp
2010-08-19 21:35:33 +00:00
def task_complete ( self , task ) :
self . stats . taskCompleted ( )
2013-09-09 16:40:56 +00:00
bb . event . fire ( sceneQueueTaskCompleted ( task , self . stats , self . rq ) , self . cfgData )
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
def task_fail ( self , task , result ) :
self . stats . taskFailed ( )
2012-03-01 14:57:35 +00:00
bb . event . fire ( sceneQueueTaskFailed ( task , self . stats , result , self ) , self . cfgData )
2010-08-19 10:36:29 +00:00
self . scenequeue_notcovered . add ( task )
2013-06-14 16:03:45 +00:00
self . scenequeue_updatecounters ( task , True )
2016-06-23 10:59:11 +00:00
self . check_taskfail ( task )
2010-08-19 10:36:29 +00:00
def task_failoutright ( self , task ) :
2016-06-12 22:55:08 +00:00
self . runq_running . add ( task )
self . runq_buildable . add ( task )
2010-08-19 10:36:29 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
self . scenequeue_notcovered . add ( task )
2013-06-14 16:03:45 +00:00
self . scenequeue_updatecounters ( task , True )
2010-08-19 10:36:29 +00:00
def task_skip ( self , task ) :
2016-06-12 22:55:08 +00:00
self . runq_running . add ( task )
self . runq_buildable . add ( task )
2010-08-19 21:35:33 +00:00
self . task_completeoutright ( task )
2010-08-19 10:36:29 +00:00
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
def execute ( self ) :
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-06-07 17:11:09 +00:00
2010-08-19 10:36:29 +00:00
task = None
if self . stats . active < self . number_tasks :
# Find the next setscene to run
2016-06-12 22:55:48 +00:00
for nexttask in self . rqdata . runq_setscene_tids :
2016-06-12 22:55:08 +00:00
if nexttask in self . runq_buildable and nexttask not in self . runq_running :
2012-11-16 15:30:52 +00:00
if nexttask in self . unskippable :
2016-06-12 22:55:48 +00:00
logger . debug ( 2 , " Setscene task %s is unskippable " % nexttask )
2012-11-16 15:30:52 +00:00
if nexttask not in self . unskippable and len ( self . sq_revdeps [ nexttask ] ) > 0 and self . sq_revdeps [ nexttask ] . issubset ( self . scenequeue_covered ) and self . check_dependencies ( nexttask , self . sq_revdeps [ nexttask ] , True ) :
2016-06-12 22:55:48 +00:00
fn = fn_from_tid ( nexttask )
2014-02-15 13:49:38 +00:00
foundtarget = False
2016-08-16 16:47:06 +00:00
if nexttask in self . rqdata . target_tids :
foundtarget = True
2014-02-15 13:49:38 +00:00
if not foundtarget :
2016-06-12 22:55:48 +00:00
logger . debug ( 2 , " Skipping setscene for task %s " % nexttask )
2014-02-13 15:01:25 +00:00
self . task_skip ( nexttask )
self . scenequeue_notneeded . add ( nexttask )
return True
2014-04-01 08:16:30 +00:00
if nexttask in self . outrightfail :
self . task_failoutright ( nexttask )
return True
2010-08-19 10:36:29 +00:00
task = nexttask
break
if task is not None :
2016-08-16 16:47:06 +00:00
( mc , fn , taskname ) = split_tid ( task )
taskfn = taskfn_fromtid ( task )
taskname = taskname + " _setscene "
2016-06-12 22:55:48 +00:00
if self . rq . check_stamp_task ( task , taskname_from_tid ( task ) , recurse = True , cache = self . stampcache ) :
logger . debug ( 2 , ' Stamp for underlying task %s is current, so skipping setscene variant ' , task )
2010-08-19 10:36:29 +00:00
self . task_failoutright ( task )
return True
if self . cooker . configuration . force :
2016-08-16 16:47:06 +00:00
if task in self . rqdata . target_tids :
self . task_failoutright ( task )
return True
2010-08-19 10:36:29 +00:00
2016-06-12 22:55:48 +00:00
if self . rq . check_stamp_task ( task , taskname , cache = self . stampcache ) :
logger . debug ( 2 , ' Setscene stamp current task %s , so skip it and its dependencies ' , task )
2010-08-19 10:36:29 +00:00
self . task_skip ( task )
return True
2012-02-24 01:31:30 +00:00
startevent = sceneQueueTaskStarted ( task , self . stats , self . rq )
bb . event . fire ( startevent , self . cfgData )
2016-08-16 16:47:06 +00:00
taskdep = self . rqdata . dataCaches [ mc ] . task_deps [ taskfn ]
2016-05-09 13:15:26 +00:00
if ' fakeroot ' in taskdep and taskname in taskdep [ ' fakeroot ' ] and not self . cooker . configuration . dry_run :
2013-06-07 17:13:04 +00:00
if not self . rq . fakeworker :
self . rq . start_fakeworker ( self )
2016-08-16 16:47:06 +00:00
self . rq . fakeworker [ mc ] . process . stdin . write ( b " <runtask> " + pickle . dumps ( ( taskfn , task , taskname , True , self . cooker . collection . get_file_appends ( taskfn ) , None ) ) + b " </runtask> " )
self . rq . fakeworker [ mc ] . process . stdin . flush ( )
2013-06-07 17:13:04 +00:00
else :
2016-08-16 16:47:06 +00:00
self . rq . worker [ mc ] . process . stdin . write ( b " <runtask> " + pickle . dumps ( ( taskfn , task , taskname , True , self . cooker . collection . get_file_appends ( taskfn ) , None ) ) + b " </runtask> " )
self . rq . worker [ mc ] . process . stdin . flush ( )
2010-08-19 10:36:29 +00:00
2016-06-12 22:55:08 +00:00
self . runq_running . add ( task )
2010-08-19 10:36:29 +00:00
self . stats . taskActive ( )
if self . stats . active < self . number_tasks :
return True
if self . stats . active > 0 :
2013-06-07 17:13:04 +00:00
self . rq . read_workers ( )
2013-09-02 08:15:19 +00:00
return self . rq . active_fds ( )
2010-08-19 10:36:29 +00:00
2016-06-12 22:55:48 +00:00
#for tid in self.sq_revdeps:
# if tid not in self.runq_running:
# buildable = tid in self.runq_buildable
# revdeps = self.sq_revdeps[tid]
# bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
2014-02-10 22:50:28 +00:00
2010-08-19 10:36:29 +00:00
# Convert scenequeue_covered task numbers into full taskgraph ids
oldcovered = self . scenequeue_covered
self . rq . scenequeue_covered = set ( )
for task in oldcovered :
2016-06-12 22:55:48 +00:00
self . rq . scenequeue_covered . add ( task )
2010-08-19 10:36:29 +00:00
2011-11-21 14:40:21 +00:00
logger . debug ( 1 , ' We can skip tasks %s ' , sorted ( self . rq . scenequeue_covered ) )
2010-08-19 10:36:29 +00:00
self . rq . state = runQueueRunInit
2014-08-02 08:51:00 +00:00
completeevent = sceneQueueComplete ( self . stats , self . rq )
bb . event . fire ( completeevent , self . cfgData )
2010-08-19 10:36:29 +00:00
return True
2010-07-06 16:41:11 +00:00
2013-06-07 17:11:09 +00:00
def runqueue_process_waitpid ( self , task , status ) :
RunQueueExecute . runqueue_process_waitpid ( self , task , status )
2011-01-10 13:13:08 +00:00
2010-01-20 18:46:02 +00:00
class TaskFailure ( Exception ) :
"""
Exception raised when a task in a runqueue fails
"""
2010-03-24 23:56:12 +00:00
def __init__ ( self , x ) :
2010-01-20 18:46:02 +00:00
self . args = x
class runQueueExitWait ( bb . event . Event ) :
"""
Event when waiting for task processes to exit
"""
def __init__ ( self , remain ) :
self . remain = remain
self . message = " Waiting for %s active tasks to finish " % remain
bb . event . Event . __init__ ( self )
class runQueueEvent ( bb . event . Event ) :
"""
Base runQueue event class
"""
def __init__ ( self , task , stats , rq ) :
self . taskid = task
2016-06-12 22:55:48 +00:00
self . taskstring = task
self . taskname = taskname_from_tid ( task )
self . taskfile = fn_from_tid ( task )
2013-09-18 12:15:53 +00:00
self . taskhash = rq . rqdata . get_task_hash ( task )
2011-02-16 22:18:06 +00:00
self . stats = stats . copy ( )
2010-01-20 18:46:02 +00:00
bb . event . Event . __init__ ( self )
2012-02-27 18:54:11 +00:00
class sceneQueueEvent ( runQueueEvent ) :
"""
Base sceneQueue event class
"""
def __init__ ( self , task , stats , rq , noexec = False ) :
runQueueEvent . __init__ ( self , task , stats , rq )
2016-06-12 22:55:48 +00:00
self . taskstring = task + " _setscene "
self . taskname = taskname_from_tid ( task ) + " _setscene "
self . taskfile = fn_from_tid ( task )
self . taskhash = rq . rqdata . get_task_hash ( task )
2012-02-27 18:54:11 +00:00
2010-01-20 18:46:02 +00:00
class runQueueTaskStarted ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task was started
2010-01-20 18:46:02 +00:00
"""
2011-01-01 14:36:38 +00:00
def __init__ ( self , task , stats , rq , noexec = False ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2011-01-01 14:36:38 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
2012-02-27 18:54:11 +00:00
class sceneQueueTaskStarted ( sceneQueueEvent ) :
2012-02-24 01:31:30 +00:00
"""
2014-08-13 09:18:50 +00:00
Event notifying a setscene task was started
2012-02-24 01:31:30 +00:00
"""
def __init__ ( self , task , stats , rq , noexec = False ) :
2012-02-27 18:54:11 +00:00
sceneQueueEvent . __init__ ( self , task , stats , rq )
2012-02-24 01:31:30 +00:00
self . noexec = noexec
2010-01-20 18:46:02 +00:00
class runQueueTaskFailed ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task failed
2010-01-20 18:46:02 +00:00
"""
2010-12-06 21:58:55 +00:00
def __init__ ( self , task , stats , exitcode , rq ) :
2010-01-20 18:46:02 +00:00
runQueueEvent . __init__ ( self , task , stats , rq )
2010-12-06 21:58:55 +00:00
self . exitcode = exitcode
2010-01-20 18:46:02 +00:00
2012-02-27 18:54:11 +00:00
class sceneQueueTaskFailed ( sceneQueueEvent ) :
2011-02-28 14:28:25 +00:00
"""
2014-08-13 09:18:50 +00:00
Event notifying a setscene task failed
2011-02-28 14:28:25 +00:00
"""
def __init__ ( self , task , stats , exitcode , rq ) :
2012-02-27 18:54:11 +00:00
sceneQueueEvent . __init__ ( self , task , stats , rq )
2012-02-24 01:31:30 +00:00
self . exitcode = exitcode
2011-02-28 14:28:25 +00:00
2014-08-02 08:51:00 +00:00
class sceneQueueComplete ( sceneQueueEvent ) :
"""
Event when all the sceneQueue tasks are complete
"""
def __init__ ( self , stats , rq ) :
self . stats = stats . copy ( )
bb . event . Event . __init__ ( self )
2010-01-20 18:46:02 +00:00
class runQueueTaskCompleted ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task completed
2010-01-20 18:46:02 +00:00
"""
2008-05-13 07:53:18 +00:00
2013-09-09 16:40:56 +00:00
class sceneQueueTaskCompleted ( sceneQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a setscene task completed
2013-09-09 16:40:56 +00:00
"""
2013-09-16 12:46:01 +00:00
class runQueueTaskSkipped ( runQueueEvent ) :
"""
2014-08-13 09:18:50 +00:00
Event notifying a task was skipped
2013-09-16 12:46:01 +00:00
"""
def __init__ ( self , task , stats , rq , reason ) :
runQueueEvent . __init__ ( self , task , stats , rq )
self . reason = reason
2010-01-20 18:46:02 +00:00
class runQueuePipe ( ) :
"""
Abstraction for a pipe between a worker thread and the server
"""
2014-03-09 17:00:17 +00:00
def __init__ ( self , pipein , pipeout , d , rq , rqexec ) :
2011-01-10 12:48:49 +00:00
self . input = pipein
2013-06-07 17:11:09 +00:00
if pipeout :
pipeout . close ( )
2012-06-22 11:53:16 +00:00
bb . utils . nonblockingfd ( self . input )
2016-05-12 07:30:35 +00:00
self . queue = b " "
2010-01-20 18:46:02 +00:00
self . d = d
2013-06-07 17:11:09 +00:00
self . rq = rq
2014-03-09 17:00:17 +00:00
self . rqexec = rqexec
2013-06-07 17:11:09 +00:00
2014-03-09 17:00:17 +00:00
def setrunqueueexec ( self , rqexec ) :
self . rqexec = rqexec
2010-01-20 18:46:02 +00:00
def read ( self ) :
2014-03-19 17:44:39 +00:00
for w in [ self . rq . worker , self . rq . fakeworker ] :
2016-08-15 16:58:39 +00:00
for mc in w :
w [ mc ] . process . poll ( )
if w [ mc ] . process . returncode is not None and not self . rq . teardown :
name = None
if w in self . rq . worker :
name = " Worker "
elif w in self . rq . fakeworker :
name = " Fakeroot "
bb . error ( " %s process ( %s ) exited unexpectedly ( %s ), shutting down... " % ( name , w . pid , str ( w . returncode ) ) )
self . rq . finish_runqueue ( True )
2014-03-19 12:53:05 +00:00
2010-01-20 18:46:02 +00:00
start = len ( self . queue )
2010-01-21 23:46:20 +00:00
try :
2016-05-12 07:30:35 +00:00
self . queue = self . queue + ( self . input . read ( 102400 ) or b " " )
2013-06-07 17:11:09 +00:00
except ( OSError , IOError ) as e :
if e . errno != errno . EAGAIN :
raise
2010-01-20 18:46:02 +00:00
end = len ( self . queue )
2013-06-07 17:11:09 +00:00
found = True
while found and len ( self . queue ) :
found = False
2016-05-12 07:30:35 +00:00
index = self . queue . find ( b " </event> " )
while index != - 1 and self . queue . startswith ( b " <event> " ) :
2014-02-23 10:02:18 +00:00
try :
event = pickle . loads ( self . queue [ 7 : index ] )
except ValueError as e :
bb . msg . fatal ( " RunQueue " , " failed load pickle ' %s ' : ' %s ' " % ( e , self . queue [ 7 : index ] ) )
2013-06-07 17:11:09 +00:00
bb . event . fire_from_worker ( event , self . d )
found = True
self . queue = self . queue [ index + 8 : ]
2016-05-12 07:30:35 +00:00
index = self . queue . find ( b " </event> " )
index = self . queue . find ( b " </exitcode> " )
while index != - 1 and self . queue . startswith ( b " <exitcode> " ) :
2014-02-23 10:02:18 +00:00
try :
task , status = pickle . loads ( self . queue [ 10 : index ] )
except ValueError as e :
bb . msg . fatal ( " RunQueue " , " failed load pickle ' %s ' : ' %s ' " % ( e , self . queue [ 10 : index ] ) )
2014-03-09 17:00:17 +00:00
self . rqexec . runqueue_process_waitpid ( task , status )
2013-06-07 17:11:09 +00:00
found = True
self . queue = self . queue [ index + 11 : ]
2016-05-12 07:30:35 +00:00
index = self . queue . find ( b " </exitcode> " )
2010-01-20 18:46:02 +00:00
return ( end > start )
def close ( self ) :
while self . read ( ) :
continue
if len ( self . queue ) > 0 :
2010-08-20 11:25:19 +00:00
print ( " Warning, worker left partial message: %s " % self . queue )
2011-01-10 12:48:49 +00:00
self . input . close ( )
2016-06-23 10:59:11 +00:00
def get_setscene_enforce_whitelist ( d ) :
if d . getVar ( ' BB_SETSCENE_ENFORCE ' , True ) != ' 1 ' :
return None
whitelist = ( d . getVar ( " BB_SETSCENE_ENFORCE_WHITELIST " , True ) or " " ) . split ( )
outlist = [ ]
for item in whitelist [ : ] :
if item . startswith ( ' % : ' ) :
for target in sys . argv [ 1 : ] :
if not target . startswith ( ' - ' ) :
outlist . append ( target . split ( ' : ' ) [ 0 ] + ' : ' + item . split ( ' : ' ) [ 1 ] )
else :
outlist . append ( item )
return outlist
def check_setscene_enforce_whitelist ( pn , taskname , whitelist ) :
import fnmatch
if whitelist :
item = ' %s : %s ' % ( pn , taskname )
for whitelist_item in whitelist :
if fnmatch . fnmatch ( item , whitelist_item ) :
return True
return False
return True