bitbake: use multiple processes to dump signatures.
This change significantly shortens the time on reparsing stage of '-S' option. Each file is reparsed and then dumped within a dedicated process. The maximum number of the running processes is not greater than the value of BB_NUMBER_PARSE_THREADS if it is set. The dump_sigs() in class SignatureGeneratorBasic is _replaced_ by a new dump_sigfn() interface, so calls from the outside and subclasses are dispatched to the implementation in the base class of SignatureGeneratorBasic. Fixes [YOCTO #10352] (Bitbake rev: 99d3703edd77a21770b366c6ad65a3c0f5183493) Signed-off-by: Jianxun Zhang <jianxun.zhang@linux.intel.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
This commit is contained in:
parent
62591d9722
commit
7eb02e837e
|
@ -36,6 +36,7 @@ from bb import msg, data, event
|
|||
from bb import monitordisk
|
||||
import subprocess
|
||||
import pickle
|
||||
from multiprocessing import Process
|
||||
|
||||
bblogger = logging.getLogger("BitBake")
|
||||
logger = logging.getLogger("BitBake.RunQueue")
|
||||
|
@ -1303,15 +1304,36 @@ class RunQueue:
|
|||
else:
|
||||
self.rqexe.finish()
|
||||
|
||||
def dump_signatures(self, options):
|
||||
done = set()
|
||||
bb.note("Reparsing files to collect dependency data")
|
||||
def rq_dump_sigfn(self, fn, options):
|
||||
bb_cache = bb.cache.NoCache(self.cooker.databuilder)
|
||||
the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
|
||||
siggen = bb.parse.siggen
|
||||
dataCaches = self.rqdata.dataCaches
|
||||
siggen.dump_sigfn(fn, dataCaches, options)
|
||||
|
||||
def dump_signatures(self, options):
|
||||
fns = set()
|
||||
bb.note("Reparsing files to collect dependency data")
|
||||
|
||||
for tid in self.rqdata.runtaskentries:
|
||||
fn = fn_from_tid(tid)
|
||||
if fn not in done:
|
||||
the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
|
||||
done.add(fn)
|
||||
fns.add(fn)
|
||||
|
||||
max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
|
||||
# We cannot use the real multiprocessing.Pool easily due to some local data
|
||||
# that can't be pickled. This is a cheap multi-process solution.
|
||||
launched = []
|
||||
while fns:
|
||||
if len(launched) < max_process:
|
||||
p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
|
||||
p.start()
|
||||
launched.append(p)
|
||||
for q in launched:
|
||||
# The finished processes are joined when calling is_alive()
|
||||
if not q.is_alive():
|
||||
launched.remove(q)
|
||||
for p in launched:
|
||||
p.join()
|
||||
|
||||
bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
|
||||
|
||||
|
|
|
@ -307,8 +307,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
|||
pass
|
||||
raise err
|
||||
|
||||
def dump_sigs(self, dataCaches, options):
|
||||
for fn in self.taskdeps:
|
||||
def dump_sigfn(self, fn, dataCaches, options):
|
||||
if fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
tid = fn + ":" + task
|
||||
(mc, _, _) = bb.runqueue.split_tid(tid)
|
||||
|
|
Loading…
Reference in New Issue