diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 48c6a79ffb..d42eb81664 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py @@ -36,6 +36,7 @@ from bb import msg, data, event from bb import monitordisk import subprocess import pickle +from multiprocessing import Process bblogger = logging.getLogger("BitBake") logger = logging.getLogger("BitBake.RunQueue") @@ -1303,15 +1304,36 @@ class RunQueue: else: self.rqexe.finish() - def dump_signatures(self, options): - done = set() - bb.note("Reparsing files to collect dependency data") + def rq_dump_sigfn(self, fn, options): bb_cache = bb.cache.NoCache(self.cooker.databuilder) + the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn)) + siggen = bb.parse.siggen + dataCaches = self.rqdata.dataCaches + siggen.dump_sigfn(fn, dataCaches, options) + + def dump_signatures(self, options): + fns = set() + bb.note("Reparsing files to collect dependency data") + for tid in self.rqdata.runtaskentries: fn = fn_from_tid(tid) - if fn not in done: - the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn)) - done.add(fn) + fns.add(fn) + + max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) + # We cannot use the real multiprocessing.Pool easily due to some local data + # that can't be pickled. This is a cheap multi-process solution. + launched = [] + while fns: + if len(launched) < max_process: + p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options)) + p.start() + launched.append(p) + for q in launched: + # The finished processes are joined when calling is_alive() + if not q.is_alive(): + launched.remove(q) + for p in launched: + p.join() bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) diff --git a/bitbake/lib/bb/siggen.py b/bitbake/lib/bb/siggen.py index 4226c80c85..3ceeef1ccc 100644 --- a/bitbake/lib/bb/siggen.py +++ b/bitbake/lib/bb/siggen.py @@ -307,8 +307,8 @@ class SignatureGeneratorBasic(SignatureGenerator): pass raise err - def dump_sigs(self, dataCaches, options): - for fn in self.taskdeps: + def dump_sigfn(self, fn, dataCaches, options): + if fn in self.taskdeps: for task in self.taskdeps[fn]: tid = fn + ":" + task (mc, _, _) = bb.runqueue.split_tid(tid)