2006-05-09 15:44:08 +00:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2007-01-08 23:53:01 +00:00
|
|
|
#
|
2012-05-22 23:23:31 +00:00
|
|
|
# BitBake Cache implementation
|
2007-01-08 23:53:01 +00:00
|
|
|
#
|
|
|
|
# Caching of bitbake variables before task execution
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
# Copyright (C) 2006 Richard Purdie
|
2012-05-22 23:23:31 +00:00
|
|
|
# Copyright (C) 2012 Intel Corporation
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
# but small sections based on code from bin/bitbake:
|
|
|
|
# Copyright (C) 2003, 2004 Chris Larson
|
|
|
|
# Copyright (C) 2003, 2004 Phil Blundell
|
|
|
|
# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
|
|
|
|
# Copyright (C) 2005 Holger Hans Peter Freyther
|
|
|
|
# Copyright (C) 2005 ROAD GmbH
|
2007-01-08 23:53:01 +00:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License along
|
|
|
|
# with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
|
2010-04-08 17:22:29 +00:00
|
|
|
import os
|
2010-06-10 17:35:31 +00:00
|
|
|
import logging
|
2011-06-03 00:17:23 +00:00
|
|
|
from collections import defaultdict
|
2006-05-09 15:44:08 +00:00
|
|
|
import bb.utils
|
|
|
|
|
2010-06-10 17:35:31 +00:00
|
|
|
logger = logging.getLogger("BitBake.Cache")
|
|
|
|
|
2006-05-09 15:44:08 +00:00
|
|
|
try:
|
|
|
|
import cPickle as pickle
|
|
|
|
except ImportError:
|
|
|
|
import pickle
|
2010-11-18 03:27:25 +00:00
|
|
|
logger.info("Importing cPickle failed. "
|
|
|
|
"Falling back to a very slow implementation.")
|
2006-05-09 15:44:08 +00:00
|
|
|
|
bitbake: cache/fetch2/siggen: Ensure we track include history for file checksums
Currently, if you reference a file url, its checksum is included in the
task hash, however if you change to a different file at a different
location, perhaps taking advantage of the FILESPATH functionality, the
system will not reparse the file in question and change its checksum to
match the new file.
To correctly handle this, the system not only needs to know if the
existing file still exists or not, but also check the existance
of every file it would have looked at when computing the original file.
We already do this in the bitbake parsing code for class inclusion. This
change uses the same technique to log the file list we looked at and
if files in these locations exist when they previously did not, to
invalidate and reparse the file.
Since data stored in the cache is flattened text, we have to use a string
form of the data and split on the ":" character which is ugly, but is
an internal detail we can improve later if a better method is found.
The cache version changes to trigger a reparse since the previous
cache data is now incompatible.
[YOCTO #7019]
(Bitbake rev: 6c0706a28d72c591f1b75b6e3f3b645859387c7e)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2014-12-08 21:25:23 +00:00
|
|
|
__cache_version__ = "148"
|
2010-11-16 19:58:52 +00:00
|
|
|
|
2012-02-23 13:47:13 +00:00
|
|
|
def getCacheFile(path, filename, data_hash):
|
|
|
|
return os.path.join(path, filename + "." + data_hash)
|
2011-06-03 00:17:23 +00:00
|
|
|
|
|
|
|
# RecipeInfoCommon defines common data retrieving methods
|
|
|
|
# from meta data for caches. CoreRecipeInfo as well as other
|
|
|
|
# Extra RecipeInfo needs to inherit this class
|
|
|
|
class RecipeInfoCommon(object):
|
2010-11-16 19:58:52 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def listvar(cls, var, metadata):
|
|
|
|
return cls.getvar(var, metadata).split()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def intvar(cls, var, metadata):
|
|
|
|
return int(cls.getvar(var, metadata) or 0)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def depvar(cls, var, metadata):
|
|
|
|
return bb.utils.explode_deps(cls.getvar(var, metadata))
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def pkgvar(cls, var, packages, metadata):
|
|
|
|
return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata))
|
|
|
|
for pkg in packages)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def taskvar(cls, var, tasks, metadata):
|
|
|
|
return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata))
|
|
|
|
for task in tasks)
|
2011-01-01 23:55:54 +00:00
|
|
|
|
2011-01-18 08:18:18 +00:00
|
|
|
@classmethod
|
2012-05-22 23:23:32 +00:00
|
|
|
def flaglist(cls, flag, varlist, metadata, squash=False):
|
|
|
|
out_dict = dict((var, metadata.getVarFlag(var, flag, True))
|
2011-01-18 08:18:18 +00:00
|
|
|
for var in varlist)
|
2012-05-22 23:23:32 +00:00
|
|
|
if squash:
|
|
|
|
return dict((k,v) for (k,v) in out_dict.iteritems() if v)
|
|
|
|
else:
|
|
|
|
return out_dict
|
2011-01-18 08:18:18 +00:00
|
|
|
|
2010-11-16 19:58:52 +00:00
|
|
|
@classmethod
|
|
|
|
def getvar(cls, var, metadata):
|
|
|
|
return metadata.getVar(var, True) or ''
|
|
|
|
|
2011-06-03 00:17:23 +00:00
|
|
|
|
|
|
|
class CoreRecipeInfo(RecipeInfoCommon):
|
|
|
|
__slots__ = ()
|
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
cachefile = "bb_cache.dat"
|
2011-06-03 00:17:23 +00:00
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
def __init__(self, filename, metadata):
|
2011-06-03 00:17:23 +00:00
|
|
|
self.file_depends = metadata.getVar('__depends', False)
|
|
|
|
self.timestamp = bb.parse.cached_mtime(filename)
|
|
|
|
self.variants = self.listvar('__VARIANTS', metadata) + ['']
|
2011-07-06 16:54:49 +00:00
|
|
|
self.appends = self.listvar('__BBAPPEND', metadata)
|
2011-06-03 00:17:23 +00:00
|
|
|
self.nocache = self.getvar('__BB_DONT_CACHE', metadata)
|
|
|
|
|
2011-06-01 14:37:07 +00:00
|
|
|
self.skipreason = self.getvar('__SKIPPED', metadata)
|
|
|
|
if self.skipreason:
|
2011-07-25 13:54:41 +00:00
|
|
|
self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0]
|
2011-06-03 00:17:23 +00:00
|
|
|
self.skipped = True
|
2011-06-01 14:37:07 +00:00
|
|
|
self.provides = self.depvar('PROVIDES', metadata)
|
|
|
|
self.rprovides = self.depvar('RPROVIDES', metadata)
|
2011-06-03 00:17:23 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
self.tasks = metadata.getVar('__BBTASKS', False)
|
|
|
|
|
|
|
|
self.pn = self.getvar('PN', metadata)
|
|
|
|
self.packages = self.listvar('PACKAGES', metadata)
|
|
|
|
if not self.pn in self.packages:
|
|
|
|
self.packages.append(self.pn)
|
|
|
|
|
|
|
|
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
|
|
|
self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
|
|
|
|
|
|
|
|
self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}}
|
|
|
|
|
|
|
|
self.skipped = False
|
|
|
|
self.pe = self.getvar('PE', metadata)
|
|
|
|
self.pv = self.getvar('PV', metadata)
|
|
|
|
self.pr = self.getvar('PR', metadata)
|
|
|
|
self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
|
|
|
|
self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
|
|
|
|
self.stamp = self.getvar('STAMP', metadata)
|
2012-09-18 10:32:04 +00:00
|
|
|
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
2011-06-28 09:05:19 +00:00
|
|
|
self.stamp_base = self.flaglist('stamp-base', self.tasks, metadata)
|
2012-09-18 10:32:04 +00:00
|
|
|
self.stamp_base_clean = self.flaglist('stamp-base-clean', self.tasks, metadata)
|
2011-06-03 00:17:23 +00:00
|
|
|
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
|
2012-05-22 23:23:32 +00:00
|
|
|
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
|
2011-06-03 00:17:23 +00:00
|
|
|
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
|
|
|
self.depends = self.depvar('DEPENDS', metadata)
|
|
|
|
self.provides = self.depvar('PROVIDES', metadata)
|
|
|
|
self.rdepends = self.depvar('RDEPENDS', metadata)
|
|
|
|
self.rprovides = self.depvar('RPROVIDES', metadata)
|
|
|
|
self.rrecommends = self.depvar('RRECOMMENDS', metadata)
|
|
|
|
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
|
|
|
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
|
|
|
|
self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
|
|
|
|
self.inherits = self.getvar('__inherit_cache', metadata)
|
|
|
|
self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
|
|
|
|
self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
|
2011-11-08 22:19:38 +00:00
|
|
|
self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata)
|
2011-01-07 15:38:41 +00:00
|
|
|
|
2010-11-16 19:58:52 +00:00
|
|
|
@classmethod
|
2011-06-03 00:17:23 +00:00
|
|
|
def init_cacheData(cls, cachedata):
|
|
|
|
# CacheData in Core RecipeInfo Class
|
|
|
|
cachedata.task_deps = {}
|
|
|
|
cachedata.pkg_fn = {}
|
|
|
|
cachedata.pkg_pn = defaultdict(list)
|
|
|
|
cachedata.pkg_pepvpr = {}
|
|
|
|
cachedata.pkg_dp = {}
|
|
|
|
|
|
|
|
cachedata.stamp = {}
|
2012-09-18 10:32:04 +00:00
|
|
|
cachedata.stampclean = {}
|
2011-06-28 09:05:19 +00:00
|
|
|
cachedata.stamp_base = {}
|
2012-09-18 10:32:04 +00:00
|
|
|
cachedata.stamp_base_clean = {}
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.stamp_extrainfo = {}
|
2012-05-22 23:23:32 +00:00
|
|
|
cachedata.file_checksums = {}
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.fn_provides = {}
|
|
|
|
cachedata.pn_provides = defaultdict(list)
|
|
|
|
cachedata.all_depends = []
|
|
|
|
|
|
|
|
cachedata.deps = defaultdict(list)
|
|
|
|
cachedata.packages = defaultdict(list)
|
|
|
|
cachedata.providers = defaultdict(list)
|
|
|
|
cachedata.rproviders = defaultdict(list)
|
|
|
|
cachedata.packages_dynamic = defaultdict(list)
|
|
|
|
|
|
|
|
cachedata.rundeps = defaultdict(lambda: defaultdict(list))
|
|
|
|
cachedata.runrecs = defaultdict(lambda: defaultdict(list))
|
|
|
|
cachedata.possible_world = []
|
|
|
|
cachedata.universe_target = []
|
|
|
|
cachedata.hashfn = {}
|
|
|
|
|
|
|
|
cachedata.basetaskhash = {}
|
|
|
|
cachedata.inherits = {}
|
|
|
|
cachedata.fakerootenv = {}
|
2011-11-08 22:19:38 +00:00
|
|
|
cachedata.fakerootnoenv = {}
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.fakerootdirs = {}
|
|
|
|
|
|
|
|
def add_cacheData(self, cachedata, fn):
|
|
|
|
cachedata.task_deps[fn] = self.task_deps
|
|
|
|
cachedata.pkg_fn[fn] = self.pn
|
2011-06-09 18:13:06 +00:00
|
|
|
cachedata.pkg_pn[self.pn].append(fn)
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr)
|
|
|
|
cachedata.pkg_dp[fn] = self.defaultpref
|
|
|
|
cachedata.stamp[fn] = self.stamp
|
2012-09-18 10:32:04 +00:00
|
|
|
cachedata.stampclean[fn] = self.stampclean
|
2011-06-28 09:05:19 +00:00
|
|
|
cachedata.stamp_base[fn] = self.stamp_base
|
2012-09-18 10:32:04 +00:00
|
|
|
cachedata.stamp_base_clean[fn] = self.stamp_base_clean
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo
|
2012-05-22 23:23:32 +00:00
|
|
|
cachedata.file_checksums[fn] = self.file_checksums
|
2011-06-03 00:17:23 +00:00
|
|
|
|
|
|
|
provides = [self.pn]
|
|
|
|
for provide in self.provides:
|
|
|
|
if provide not in provides:
|
|
|
|
provides.append(provide)
|
|
|
|
cachedata.fn_provides[fn] = provides
|
|
|
|
|
|
|
|
for provide in provides:
|
|
|
|
cachedata.providers[provide].append(fn)
|
|
|
|
if provide not in cachedata.pn_provides[self.pn]:
|
|
|
|
cachedata.pn_provides[self.pn].append(provide)
|
|
|
|
|
|
|
|
for dep in self.depends:
|
|
|
|
if dep not in cachedata.deps[fn]:
|
|
|
|
cachedata.deps[fn].append(dep)
|
|
|
|
if dep not in cachedata.all_depends:
|
|
|
|
cachedata.all_depends.append(dep)
|
|
|
|
|
|
|
|
rprovides = self.rprovides
|
|
|
|
for package in self.packages:
|
|
|
|
cachedata.packages[package].append(fn)
|
|
|
|
rprovides += self.rprovides_pkg[package]
|
|
|
|
|
|
|
|
for rprovide in rprovides:
|
|
|
|
cachedata.rproviders[rprovide].append(fn)
|
|
|
|
|
|
|
|
for package in self.packages_dynamic:
|
|
|
|
cachedata.packages_dynamic[package].append(fn)
|
|
|
|
|
2014-08-20 12:23:27 +00:00
|
|
|
# Build hash of runtime depends and recommends
|
2011-06-03 00:17:23 +00:00
|
|
|
for package in self.packages + [self.pn]:
|
|
|
|
cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
|
|
|
|
cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
|
|
|
|
|
|
|
|
# Collect files we may need for possible world-dep
|
|
|
|
# calculations
|
2014-06-06 14:35:06 +00:00
|
|
|
if self.not_world:
|
|
|
|
logger.debug(1, "EXCLUDE FROM WORLD: %s", fn)
|
|
|
|
else:
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.possible_world.append(fn)
|
|
|
|
|
|
|
|
# create a collection of all targets for sanity checking
|
|
|
|
# tasks, such as upstream versions, license, and tools for
|
|
|
|
# task and image creation.
|
|
|
|
cachedata.universe_target.append(self.pn)
|
|
|
|
|
|
|
|
cachedata.hashfn[fn] = self.hashfilename
|
|
|
|
for task, taskhash in self.basetaskhashes.iteritems():
|
|
|
|
identifier = '%s.%s' % (fn, task)
|
|
|
|
cachedata.basetaskhash[identifier] = taskhash
|
|
|
|
|
|
|
|
cachedata.inherits[fn] = self.inherits
|
|
|
|
cachedata.fakerootenv[fn] = self.fakerootenv
|
2011-11-08 22:19:38 +00:00
|
|
|
cachedata.fakerootnoenv[fn] = self.fakerootnoenv
|
2011-06-03 00:17:23 +00:00
|
|
|
cachedata.fakerootdirs[fn] = self.fakerootdirs
|
|
|
|
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2010-11-18 18:14:27 +00:00
|
|
|
|
|
|
|
class Cache(object):
|
2006-05-09 15:44:08 +00:00
|
|
|
"""
|
|
|
|
BitBake Cache implementation
|
|
|
|
"""
|
|
|
|
|
2012-02-23 13:47:13 +00:00
|
|
|
def __init__(self, data, data_hash, caches_array):
|
2011-06-03 00:21:44 +00:00
|
|
|
# Pass caches_array information into Cache Constructor
|
2014-08-20 12:23:27 +00:00
|
|
|
# It will be used later for deciding whether we
|
2011-06-03 00:21:44 +00:00
|
|
|
# need extra cache file dump/load support
|
|
|
|
self.caches_array = caches_array
|
2011-11-25 14:57:53 +00:00
|
|
|
self.cachedir = data.getVar("CACHE", True)
|
2010-11-17 00:43:33 +00:00
|
|
|
self.clean = set()
|
|
|
|
self.checked = set()
|
2006-05-09 15:44:08 +00:00
|
|
|
self.depends_cache = {}
|
|
|
|
self.data = None
|
|
|
|
self.data_fn = None
|
2008-03-03 22:01:45 +00:00
|
|
|
self.cacheclean = True
|
2012-02-23 13:47:13 +00:00
|
|
|
self.data_hash = data_hash
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
if self.cachedir in [None, '']:
|
|
|
|
self.has_cache = False
|
2010-11-18 03:27:25 +00:00
|
|
|
logger.info("Not using a cache. "
|
|
|
|
"Set CACHE = <directory> to enable.")
|
2009-01-02 23:57:03 +00:00
|
|
|
return
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2009-01-02 23:57:03 +00:00
|
|
|
self.has_cache = True
|
2012-02-23 13:47:13 +00:00
|
|
|
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
|
2009-01-01 14:43:54 +00:00
|
|
|
|
2010-06-10 17:35:31 +00:00
|
|
|
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
2010-06-10 16:46:02 +00:00
|
|
|
bb.utils.mkdirhier(self.cachedir)
|
2008-03-03 22:01:45 +00:00
|
|
|
|
2011-08-11 00:57:29 +00:00
|
|
|
cache_ok = True
|
2011-06-03 00:22:40 +00:00
|
|
|
if self.caches_array:
|
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
2012-02-23 13:47:13 +00:00
|
|
|
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
|
|
|
cache_ok = cache_ok and os.path.exists(cachefile)
|
2011-06-03 00:22:40 +00:00
|
|
|
cache_class.init_cacheData(self)
|
2011-08-11 00:57:29 +00:00
|
|
|
if cache_ok:
|
2010-11-29 15:50:19 +00:00
|
|
|
self.load_cachefile()
|
|
|
|
elif os.path.isfile(self.cachefile):
|
|
|
|
logger.info("Out of date cache found, rebuilding...")
|
|
|
|
|
|
|
|
def load_cachefile(self):
|
2011-06-03 00:22:40 +00:00
|
|
|
# Firstly, using core cache file information for
|
|
|
|
# valid checking
|
2010-11-29 15:50:19 +00:00
|
|
|
with open(self.cachefile, "rb") as cachefile:
|
|
|
|
pickled = pickle.Unpickler(cachefile)
|
2006-05-09 15:44:08 +00:00
|
|
|
try:
|
2010-11-29 15:50:19 +00:00
|
|
|
cache_ver = pickled.load()
|
|
|
|
bitbake_ver = pickled.load()
|
|
|
|
except Exception:
|
|
|
|
logger.info('Invalid cache, rebuilding...')
|
|
|
|
return
|
|
|
|
|
|
|
|
if cache_ver != __cache_version__:
|
|
|
|
logger.info('Cache version mismatch, rebuilding...')
|
|
|
|
return
|
|
|
|
elif bitbake_ver != bb.__version__:
|
|
|
|
logger.info('Bitbake version mismatch, rebuilding...')
|
|
|
|
return
|
|
|
|
|
2010-11-22 15:13:56 +00:00
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
cachesize = 0
|
|
|
|
previous_progress = 0
|
|
|
|
previous_percent = 0
|
2010-11-22 15:13:56 +00:00
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
# Calculate the correct cachesize of all those cache files
|
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
2012-02-23 13:47:13 +00:00
|
|
|
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
2011-06-03 00:22:40 +00:00
|
|
|
with open(cachefile, "rb") as cachefile:
|
|
|
|
cachesize += os.fstat(cachefile.fileno()).st_size
|
2010-11-22 15:13:56 +00:00
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
|
|
|
|
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
2012-02-23 13:47:13 +00:00
|
|
|
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
2011-06-03 00:22:40 +00:00
|
|
|
with open(cachefile, "rb") as cachefile:
|
|
|
|
pickled = pickle.Unpickler(cachefile)
|
|
|
|
while cachefile:
|
|
|
|
try:
|
|
|
|
key = pickled.load()
|
|
|
|
value = pickled.load()
|
|
|
|
except Exception:
|
|
|
|
break
|
|
|
|
if self.depends_cache.has_key(key):
|
|
|
|
self.depends_cache[key].append(value)
|
|
|
|
else:
|
|
|
|
self.depends_cache[key] = [value]
|
|
|
|
# only fire events on even percentage boundaries
|
|
|
|
current_progress = cachefile.tell() + previous_progress
|
|
|
|
current_percent = 100 * current_progress / cachesize
|
|
|
|
if current_percent > previous_percent:
|
|
|
|
previous_percent = current_percent
|
2012-02-23 13:47:16 +00:00
|
|
|
bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize),
|
2011-06-03 00:22:40 +00:00
|
|
|
self.data)
|
|
|
|
|
|
|
|
previous_progress += current_progress
|
|
|
|
|
|
|
|
# Note: depends cache number is corresponding to the parsing file numbers.
|
|
|
|
# The same file has several caches, still regarded as one item in the cache
|
|
|
|
bb.event.fire(bb.event.CacheLoadCompleted(cachesize,
|
|
|
|
len(self.depends_cache)),
|
|
|
|
self.data)
|
|
|
|
|
|
|
|
|
2010-11-18 03:27:25 +00:00
|
|
|
@staticmethod
|
|
|
|
def virtualfn2realfn(virtualfn):
|
2009-01-01 14:43:54 +00:00
|
|
|
"""
|
|
|
|
Convert a virtual file name to a real one + the associated subclass keyword
|
|
|
|
"""
|
|
|
|
|
|
|
|
fn = virtualfn
|
|
|
|
cls = ""
|
|
|
|
if virtualfn.startswith('virtual:'):
|
2011-07-22 14:41:07 +00:00
|
|
|
elems = virtualfn.split(':')
|
|
|
|
cls = ":".join(elems[1:-1])
|
|
|
|
fn = elems[-1]
|
2009-01-01 14:43:54 +00:00
|
|
|
return (fn, cls)
|
|
|
|
|
2010-11-18 03:27:25 +00:00
|
|
|
@staticmethod
|
|
|
|
def realfn2virtual(realfn, cls):
|
2009-01-01 14:43:54 +00:00
|
|
|
"""
|
|
|
|
Convert a real filename + the associated subclass keyword to a virtual filename
|
|
|
|
"""
|
|
|
|
if cls == "":
|
|
|
|
return realfn
|
|
|
|
return "virtual:" + cls + ":" + realfn
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2010-11-19 18:06:38 +00:00
|
|
|
@classmethod
|
|
|
|
def loadDataFull(cls, virtualfn, appends, cfgData):
|
2006-05-09 15:44:08 +00:00
|
|
|
"""
|
|
|
|
Return a complete set of data for fn.
|
|
|
|
To do this, we need to parse the file.
|
|
|
|
"""
|
2009-01-01 14:43:54 +00:00
|
|
|
|
2010-11-19 18:06:38 +00:00
|
|
|
(fn, virtual) = cls.virtualfn2realfn(virtualfn)
|
2009-01-01 14:43:54 +00:00
|
|
|
|
2010-06-10 17:35:31 +00:00
|
|
|
logger.debug(1, "Parsing %s (full)", fn)
|
2008-03-03 22:01:45 +00:00
|
|
|
|
2011-04-10 17:50:16 +00:00
|
|
|
cfgData.setVar("__ONLYFINALISE", virtual or "default")
|
2010-11-19 18:06:38 +00:00
|
|
|
bb_data = cls.load_bbfile(fn, appends, cfgData)
|
|
|
|
return bb_data[virtual]
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2010-11-19 03:21:54 +00:00
|
|
|
@classmethod
|
2011-06-03 00:21:44 +00:00
|
|
|
def parse(cls, filename, appends, configdata, caches_array):
|
2010-11-19 03:21:54 +00:00
|
|
|
"""Parse the specified filename, returning the recipe information"""
|
|
|
|
infos = []
|
|
|
|
datastores = cls.load_bbfile(filename, appends, configdata)
|
2012-11-19 15:01:20 +00:00
|
|
|
depends = []
|
2010-11-19 03:21:54 +00:00
|
|
|
for variant, data in sorted(datastores.iteritems(),
|
|
|
|
key=lambda i: i[0],
|
|
|
|
reverse=True):
|
|
|
|
virtualfn = cls.realfn2virtual(filename, variant)
|
2012-11-19 15:01:20 +00:00
|
|
|
depends = depends + (data.getVar("__depends", False) or [])
|
2010-11-19 03:21:54 +00:00
|
|
|
if depends and not variant:
|
|
|
|
data.setVar("__depends", depends)
|
2011-06-03 00:22:40 +00:00
|
|
|
|
|
|
|
info_array = []
|
|
|
|
for cache_class in caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
|
|
|
info = cache_class(filename, data)
|
|
|
|
info_array.append(info)
|
|
|
|
infos.append((virtualfn, info_array))
|
|
|
|
|
2010-11-19 03:21:54 +00:00
|
|
|
return infos
|
|
|
|
|
|
|
|
def load(self, filename, appends, configdata):
|
|
|
|
"""Obtain the recipe information for the specified filename,
|
|
|
|
using cached values if available, otherwise parsing.
|
|
|
|
|
|
|
|
Note that if it does parse to obtain the info, it will not
|
|
|
|
automatically add the information to the cache or to your
|
|
|
|
CacheData. Use the add or add_info method to do so after
|
|
|
|
running this, or use loadData instead."""
|
2011-07-06 16:54:49 +00:00
|
|
|
cached = self.cacheValid(filename, appends)
|
2010-11-19 03:21:54 +00:00
|
|
|
if cached:
|
|
|
|
infos = []
|
2011-06-03 00:22:40 +00:00
|
|
|
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
|
|
|
info_array = self.depends_cache[filename]
|
|
|
|
for variant in info_array[0].variants:
|
2010-11-19 03:21:54 +00:00
|
|
|
virtualfn = self.realfn2virtual(filename, variant)
|
|
|
|
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
|
|
|
else:
|
|
|
|
logger.debug(1, "Parsing %s", filename)
|
2011-06-03 00:21:44 +00:00
|
|
|
return self.parse(filename, appends, configdata, self.caches_array)
|
2010-01-20 18:46:02 +00:00
|
|
|
|
2010-11-19 03:21:54 +00:00
|
|
|
return cached, infos
|
2010-01-20 18:46:02 +00:00
|
|
|
|
2010-11-19 03:21:54 +00:00
|
|
|
def loadData(self, fn, appends, cfgData, cacheData):
|
|
|
|
"""Load the recipe info for the specified filename,
|
|
|
|
parsing and adding to the cache if necessary, and adding
|
|
|
|
the recipe information to the supplied CacheData instance."""
|
|
|
|
skipped, virtuals = 0, 0
|
2010-11-16 19:58:52 +00:00
|
|
|
|
2010-11-19 03:21:54 +00:00
|
|
|
cached, infos = self.load(fn, appends, cfgData)
|
2011-06-03 00:22:40 +00:00
|
|
|
for virtualfn, info_array in infos:
|
|
|
|
if info_array[0].skipped:
|
2011-06-01 14:37:07 +00:00
|
|
|
logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
2010-11-16 19:58:52 +00:00
|
|
|
skipped += 1
|
2010-01-20 18:46:02 +00:00
|
|
|
else:
|
2011-06-03 00:22:40 +00:00
|
|
|
self.add_info(virtualfn, info_array, cacheData, not cached)
|
2010-01-20 18:46:02 +00:00
|
|
|
virtuals += 1
|
2010-11-16 19:58:52 +00:00
|
|
|
|
|
|
|
return cached, skipped, virtuals
|
2009-01-01 14:43:54 +00:00
|
|
|
|
2011-07-06 16:54:49 +00:00
|
|
|
def cacheValid(self, fn, appends):
|
2006-05-09 15:44:08 +00:00
|
|
|
"""
|
|
|
|
Is the cache valid for fn?
|
|
|
|
Fast version, no timestamps checked.
|
|
|
|
"""
|
2010-11-19 03:21:54 +00:00
|
|
|
if fn not in self.checked:
|
2011-07-06 16:54:49 +00:00
|
|
|
self.cacheValidUpdate(fn, appends)
|
2010-11-19 03:21:54 +00:00
|
|
|
|
2006-05-09 15:44:08 +00:00
|
|
|
# Is cache enabled?
|
|
|
|
if not self.has_cache:
|
|
|
|
return False
|
|
|
|
if fn in self.clean:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2011-07-06 16:54:49 +00:00
|
|
|
def cacheValidUpdate(self, fn, appends):
|
2006-05-09 15:44:08 +00:00
|
|
|
"""
|
|
|
|
Is the cache valid for fn?
|
|
|
|
Make thorough (slower) checks including timestamps.
|
|
|
|
"""
|
|
|
|
# Is cache enabled?
|
|
|
|
if not self.has_cache:
|
|
|
|
return False
|
|
|
|
|
2010-11-17 00:43:33 +00:00
|
|
|
self.checked.add(fn)
|
2008-03-03 22:01:45 +00:00
|
|
|
|
2006-05-09 15:44:08 +00:00
|
|
|
# File isn't in depends_cache
|
|
|
|
if not fn in self.depends_cache:
|
2010-06-10 17:35:31 +00:00
|
|
|
logger.debug(2, "Cache: %s is not cached", fn)
|
2006-05-09 15:44:08 +00:00
|
|
|
return False
|
|
|
|
|
2010-03-24 23:56:12 +00:00
|
|
|
mtime = bb.parse.cached_mtime_noerror(fn)
|
2008-03-14 11:44:34 +00:00
|
|
|
|
2008-03-03 22:01:45 +00:00
|
|
|
# Check file still exists
|
2008-03-14 11:44:34 +00:00
|
|
|
if mtime == 0:
|
2010-06-10 17:35:31 +00:00
|
|
|
logger.debug(2, "Cache: %s no longer exists", fn)
|
2008-03-03 22:01:45 +00:00
|
|
|
self.remove(fn)
|
|
|
|
return False
|
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
info_array = self.depends_cache[fn]
|
2006-05-09 15:44:08 +00:00
|
|
|
# Check the file's timestamp
|
2011-06-03 00:22:40 +00:00
|
|
|
if mtime != info_array[0].timestamp:
|
2010-06-10 17:35:31 +00:00
|
|
|
logger.debug(2, "Cache: %s changed", fn)
|
2006-05-09 15:44:08 +00:00
|
|
|
self.remove(fn)
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Check dependencies are still valid
|
2011-06-03 00:22:40 +00:00
|
|
|
depends = info_array[0].file_depends
|
2008-03-14 11:44:34 +00:00
|
|
|
if depends:
|
2010-03-24 23:56:12 +00:00
|
|
|
for f, old_mtime in depends:
|
2008-03-14 11:44:34 +00:00
|
|
|
fmtime = bb.parse.cached_mtime_noerror(f)
|
|
|
|
# Check if file still exists
|
2009-11-03 23:20:15 +00:00
|
|
|
if old_mtime != 0 and fmtime == 0:
|
2010-11-18 18:14:38 +00:00
|
|
|
logger.debug(2, "Cache: %s's dependency %s was removed",
|
|
|
|
fn, f)
|
2008-03-14 11:44:34 +00:00
|
|
|
self.remove(fn)
|
|
|
|
return False
|
|
|
|
|
2008-03-28 17:19:49 +00:00
|
|
|
if (fmtime != old_mtime):
|
2010-11-18 03:27:25 +00:00
|
|
|
logger.debug(2, "Cache: %s's dependency %s changed",
|
|
|
|
fn, f)
|
2008-03-14 11:44:34 +00:00
|
|
|
self.remove(fn)
|
|
|
|
return False
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2013-06-04 08:59:37 +00:00
|
|
|
if hasattr(info_array[0], 'file_checksums'):
|
|
|
|
for _, fl in info_array[0].file_checksums.items():
|
2015-09-16 20:44:20 +00:00
|
|
|
fl = fl.strip()
|
|
|
|
while fl:
|
|
|
|
# A .split() would be simpler but means spaces or colons in filenames would break
|
|
|
|
a = fl.find(":True")
|
|
|
|
b = fl.find(":False")
|
|
|
|
if ((a < 0) and b) or ((b > 0) and (b < a)):
|
|
|
|
f = fl[:b+6]
|
|
|
|
fl = fl[b+7:]
|
|
|
|
elif ((b < 0) and a) or ((a > 0) and (a < b)):
|
|
|
|
f = fl[:a+5]
|
|
|
|
fl = fl[a+6:]
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
fl = fl.strip()
|
bitbake: cache/fetch2/siggen: Ensure we track include history for file checksums
Currently, if you reference a file url, its checksum is included in the
task hash, however if you change to a different file at a different
location, perhaps taking advantage of the FILESPATH functionality, the
system will not reparse the file in question and change its checksum to
match the new file.
To correctly handle this, the system not only needs to know if the
existing file still exists or not, but also check the existance
of every file it would have looked at when computing the original file.
We already do this in the bitbake parsing code for class inclusion. This
change uses the same technique to log the file list we looked at and
if files in these locations exist when they previously did not, to
invalidate and reparse the file.
Since data stored in the cache is flattened text, we have to use a string
form of the data and split on the ":" character which is ugly, but is
an internal detail we can improve later if a better method is found.
The cache version changes to trigger a reparse since the previous
cache data is now incompatible.
[YOCTO #7019]
(Bitbake rev: 6c0706a28d72c591f1b75b6e3f3b645859387c7e)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2014-12-08 21:25:23 +00:00
|
|
|
if "*" in f:
|
|
|
|
continue
|
|
|
|
f, exist = f.split(":")
|
|
|
|
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
|
|
|
logger.debug(2, "Cache: %s's file checksum list file %s changed",
|
2013-06-04 08:59:37 +00:00
|
|
|
fn, f)
|
|
|
|
self.remove(fn)
|
|
|
|
return False
|
|
|
|
|
2011-07-06 16:54:49 +00:00
|
|
|
if appends != info_array[0].appends:
|
|
|
|
logger.debug(2, "Cache: appends for %s changed", fn)
|
2013-05-29 04:01:18 +00:00
|
|
|
logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends)))
|
2011-07-06 16:54:49 +00:00
|
|
|
self.remove(fn)
|
|
|
|
return False
|
|
|
|
|
2010-07-25 10:33:11 +00:00
|
|
|
invalid = False
|
2011-06-03 00:22:40 +00:00
|
|
|
for cls in info_array[0].variants:
|
2010-01-20 18:46:02 +00:00
|
|
|
virtualfn = self.realfn2virtual(fn, cls)
|
2010-11-17 00:43:33 +00:00
|
|
|
self.clean.add(virtualfn)
|
2010-11-16 19:58:52 +00:00
|
|
|
if virtualfn not in self.depends_cache:
|
2010-06-10 17:35:31 +00:00
|
|
|
logger.debug(2, "Cache: %s is not cached", virtualfn)
|
2010-07-25 10:33:11 +00:00
|
|
|
invalid = True
|
|
|
|
|
2010-11-18 03:27:25 +00:00
|
|
|
# If any one of the variants is not present, mark as invalid for all
|
2010-07-25 10:33:11 +00:00
|
|
|
if invalid:
|
2011-07-19 20:05:53 +00:00
|
|
|
for cls in info_array[0].variants:
|
2010-07-25 10:33:11 +00:00
|
|
|
virtualfn = self.realfn2virtual(fn, cls)
|
2010-11-13 23:36:37 +00:00
|
|
|
if virtualfn in self.clean:
|
|
|
|
logger.debug(2, "Cache: Removing %s from cache", virtualfn)
|
2010-11-17 00:43:33 +00:00
|
|
|
self.clean.remove(virtualfn)
|
2010-11-13 23:36:37 +00:00
|
|
|
if fn in self.clean:
|
|
|
|
logger.debug(2, "Cache: Marking %s as not clean", fn)
|
2010-11-17 00:43:33 +00:00
|
|
|
self.clean.remove(fn)
|
2010-07-25 10:33:11 +00:00
|
|
|
return False
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2011-01-03 19:57:22 +00:00
|
|
|
self.clean.add(fn)
|
2010-01-20 18:46:02 +00:00
|
|
|
return True
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
def remove(self, fn):
|
|
|
|
"""
|
|
|
|
Remove a fn from the cache
|
|
|
|
Called from the parser in error cases
|
|
|
|
"""
|
|
|
|
if fn in self.depends_cache:
|
2010-11-13 23:36:37 +00:00
|
|
|
logger.debug(1, "Removing %s from cache", fn)
|
2006-05-09 15:44:08 +00:00
|
|
|
del self.depends_cache[fn]
|
|
|
|
if fn in self.clean:
|
2010-11-13 23:36:37 +00:00
|
|
|
logger.debug(1, "Marking %s as unclean", fn)
|
2010-11-17 00:43:33 +00:00
|
|
|
self.clean.remove(fn)
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
def sync(self):
|
|
|
|
"""
|
|
|
|
Save the cache
|
2006-11-16 15:02:15 +00:00
|
|
|
Called from the parser when complete (or exiting)
|
2006-05-09 15:44:08 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
if not self.has_cache:
|
|
|
|
return
|
|
|
|
|
2008-03-03 22:01:45 +00:00
|
|
|
if self.cacheclean:
|
2010-10-28 01:16:47 +00:00
|
|
|
logger.debug(2, "Cache is clean, not saving.")
|
2008-03-03 22:01:45 +00:00
|
|
|
return
|
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
file_dict = {}
|
|
|
|
pickler_dict = {}
|
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
|
|
|
cache_class_name = cache_class.__name__
|
2012-02-23 13:47:13 +00:00
|
|
|
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
2011-06-03 00:22:40 +00:00
|
|
|
file_dict[cache_class_name] = open(cachefile, "wb")
|
|
|
|
pickler_dict[cache_class_name] = pickle.Pickler(file_dict[cache_class_name], pickle.HIGHEST_PROTOCOL)
|
|
|
|
|
|
|
|
pickler_dict['CoreRecipeInfo'].dump(__cache_version__)
|
|
|
|
pickler_dict['CoreRecipeInfo'].dump(bb.__version__)
|
|
|
|
|
|
|
|
try:
|
|
|
|
for key, info_array in self.depends_cache.iteritems():
|
|
|
|
for info in info_array:
|
|
|
|
if isinstance(info, RecipeInfoCommon):
|
|
|
|
cache_class_name = info.__class__.__name__
|
|
|
|
pickler_dict[cache_class_name].dump(key)
|
|
|
|
pickler_dict[cache_class_name].dump(info)
|
|
|
|
finally:
|
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
|
|
|
cache_class_name = cache_class.__name__
|
|
|
|
file_dict[cache_class_name].close()
|
2010-11-19 05:28:09 +00:00
|
|
|
|
2010-09-23 18:21:29 +00:00
|
|
|
del self.depends_cache
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2010-11-18 03:27:25 +00:00
|
|
|
@staticmethod
|
|
|
|
def mtime(cachefile):
|
2006-11-16 15:02:15 +00:00
|
|
|
return bb.parse.cached_mtime_noerror(cachefile)
|
2006-05-09 15:44:08 +00:00
|
|
|
|
bitbake: cooker/cache/parse: Implement pyinofity based reconfigure
Memory resident bitbake has one current flaw, changes in the base configuration
are not noticed by bitbake. The parsing cache is also refreshed on each invocation
of bitbake (although the mtime cache is not cleared so its pointless).
This change adds in pyinotify support and adds two different watchers, one
for the base configuration and one for the parsed recipes.
Changes in the latter will trigger a reparse (and an update of the mtime cache).
The former will trigger a complete reload of the configuration.
Note that this code will also correctly handle creation of new configuration files
since the __depends and __base_depends variables already track these for cache
correctness purposes.
We could be a little more clever about parsing cache invalidation, right now we just
invalidate the whole thing and recheck. For now, its better than what we have and doesn't
seem to perform that badly though.
For education and QA purposes I can document a workflow that illustrates this:
$ source oe-init-build-env-memres
$ time bitbake bash
[base configuration is loaded, recipes are parsed, bash builds]
$ time bitbake bash
[command returns quickly since all caches are valid]
$ touch ../meta/classes/gettext.bbclass
$ time bitbake bash
[reparse is triggered, time is longer than above]
$ echo 'FOO = "1"' >> conf/local.conf
$ time bitbake bash
[reparse is triggered, but with a base configuration reload too]
As far as changes go, I like this one a lot, it makes memory resident bitbake
truly usable and may be the tweak we need to make it the default.
The new pyinotify dependency is covered in the previous commit.
(Bitbake rev: 0557d03c170fba8d7efe82be1b9641d0eb229213)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2015-01-13 14:13:53 +00:00
|
|
|
def add_info(self, filename, info_array, cacheData, parsed=None, watcher=None):
|
2011-06-03 00:22:40 +00:00
|
|
|
if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped):
|
|
|
|
cacheData.add_from_recipeinfo(filename, info_array)
|
2011-04-10 17:55:48 +00:00
|
|
|
|
bitbake: cooker/cache/parse: Implement pyinofity based reconfigure
Memory resident bitbake has one current flaw, changes in the base configuration
are not noticed by bitbake. The parsing cache is also refreshed on each invocation
of bitbake (although the mtime cache is not cleared so its pointless).
This change adds in pyinotify support and adds two different watchers, one
for the base configuration and one for the parsed recipes.
Changes in the latter will trigger a reparse (and an update of the mtime cache).
The former will trigger a complete reload of the configuration.
Note that this code will also correctly handle creation of new configuration files
since the __depends and __base_depends variables already track these for cache
correctness purposes.
We could be a little more clever about parsing cache invalidation, right now we just
invalidate the whole thing and recheck. For now, its better than what we have and doesn't
seem to perform that badly though.
For education and QA purposes I can document a workflow that illustrates this:
$ source oe-init-build-env-memres
$ time bitbake bash
[base configuration is loaded, recipes are parsed, bash builds]
$ time bitbake bash
[command returns quickly since all caches are valid]
$ touch ../meta/classes/gettext.bbclass
$ time bitbake bash
[reparse is triggered, time is longer than above]
$ echo 'FOO = "1"' >> conf/local.conf
$ time bitbake bash
[reparse is triggered, but with a base configuration reload too]
As far as changes go, I like this one a lot, it makes memory resident bitbake
truly usable and may be the tweak we need to make it the default.
The new pyinotify dependency is covered in the previous commit.
(Bitbake rev: 0557d03c170fba8d7efe82be1b9641d0eb229213)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2015-01-13 14:13:53 +00:00
|
|
|
if watcher:
|
|
|
|
watcher(info_array[0].file_depends)
|
|
|
|
|
2010-11-19 15:03:09 +00:00
|
|
|
if not self.has_cache:
|
|
|
|
return
|
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache:
|
2010-11-19 05:28:09 +00:00
|
|
|
if parsed:
|
|
|
|
self.cacheclean = False
|
2011-06-03 00:22:40 +00:00
|
|
|
self.depends_cache[filename] = info_array
|
2010-11-19 03:21:54 +00:00
|
|
|
|
|
|
|
def add(self, file_name, data, cacheData, parsed=None):
|
2006-11-16 15:02:15 +00:00
|
|
|
"""
|
2010-03-24 23:56:12 +00:00
|
|
|
Save data we need into the cache
|
2006-11-16 15:02:15 +00:00
|
|
|
"""
|
2011-01-01 23:55:54 +00:00
|
|
|
|
2010-11-16 19:58:52 +00:00
|
|
|
realfn = self.virtualfn2realfn(file_name)[0]
|
2011-06-03 00:22:40 +00:00
|
|
|
|
|
|
|
info_array = []
|
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
|
|
|
info_array.append(cache_class(realfn, data))
|
|
|
|
self.add_info(file_name, info_array, cacheData, parsed)
|
2006-11-16 15:02:15 +00:00
|
|
|
|
2010-11-18 03:27:25 +00:00
|
|
|
@staticmethod
|
|
|
|
def load_bbfile(bbfile, appends, config):
|
2006-05-09 15:44:08 +00:00
|
|
|
"""
|
|
|
|
Load and parse one .bb build file
|
|
|
|
Return the data and whether parsing resulted in the file being skipped
|
|
|
|
"""
|
2010-06-04 12:04:42 +00:00
|
|
|
chdir_back = False
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2015-05-25 07:50:33 +00:00
|
|
|
from bb import parse
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
# expand tmpdir to include this topdir
|
2015-05-25 07:50:33 +00:00
|
|
|
config.setVar('TMPDIR', config.getVar('TMPDIR', True) or "")
|
2006-11-16 15:02:15 +00:00
|
|
|
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
2006-05-09 15:44:08 +00:00
|
|
|
oldpath = os.path.abspath(os.getcwd())
|
2010-06-04 12:04:42 +00:00
|
|
|
parse.cached_mtime_noerror(bbfile_loc)
|
2015-05-25 07:50:33 +00:00
|
|
|
bb_data = config.createCopy()
|
2010-06-04 12:04:42 +00:00
|
|
|
# The ConfHandler first looks if there is a TOPDIR and if not
|
|
|
|
# then it would call getcwd().
|
|
|
|
# Previously, we chdir()ed to bbfile_loc, called the handler
|
|
|
|
# and finally chdir()ed back, a couple of thousand times. We now
|
|
|
|
# just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
|
2015-05-25 07:50:33 +00:00
|
|
|
if not bb_data.getVar('TOPDIR', False):
|
2010-06-04 12:04:42 +00:00
|
|
|
chdir_back = True
|
2015-05-25 07:50:33 +00:00
|
|
|
bb_data.setVar('TOPDIR', bbfile_loc)
|
2006-05-09 15:44:08 +00:00
|
|
|
try:
|
2010-07-16 14:10:22 +00:00
|
|
|
if appends:
|
2015-05-25 07:50:33 +00:00
|
|
|
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
2010-11-18 03:27:25 +00:00
|
|
|
bb_data = parse.handle(bbfile, bb_data)
|
|
|
|
if chdir_back:
|
|
|
|
os.chdir(oldpath)
|
2010-01-20 18:46:02 +00:00
|
|
|
return bb_data
|
2006-05-09 15:44:08 +00:00
|
|
|
except:
|
2010-11-18 03:27:25 +00:00
|
|
|
if chdir_back:
|
|
|
|
os.chdir(oldpath)
|
2006-05-09 15:44:08 +00:00
|
|
|
raise
|
|
|
|
|
2010-11-18 03:27:25 +00:00
|
|
|
|
2006-05-09 15:44:08 +00:00
|
|
|
def init(cooker):
|
|
|
|
"""
|
2010-03-24 23:56:12 +00:00
|
|
|
The Objective: Cache the minimum amount of data possible yet get to the
|
2006-05-09 15:44:08 +00:00
|
|
|
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
|
|
|
|
|
2010-03-24 23:56:12 +00:00
|
|
|
To do this, we intercept getVar calls and only cache the variables we see
|
|
|
|
being accessed. We rely on the cache getVar calls being made for all
|
|
|
|
variables bitbake might need to use to reach this stage. For each cached
|
2006-05-09 15:44:08 +00:00
|
|
|
file we need to track:
|
|
|
|
|
|
|
|
* Its mtime
|
|
|
|
* The mtimes of all its dependencies
|
2014-05-30 14:55:37 +00:00
|
|
|
* Whether it caused a parse.SkipRecipe exception
|
2006-05-09 15:44:08 +00:00
|
|
|
|
|
|
|
Files causing parsing errors are evicted from the cache.
|
|
|
|
|
|
|
|
"""
|
2012-02-23 13:47:13 +00:00
|
|
|
return Cache(cooker.configuration.data, cooker.configuration.data_hash)
|
2006-05-09 15:44:08 +00:00
|
|
|
|
2006-11-16 15:02:15 +00:00
|
|
|
|
2010-11-18 18:14:27 +00:00
|
|
|
class CacheData(object):
|
2006-11-16 15:02:15 +00:00
|
|
|
"""
|
|
|
|
The data structures we compile from the cached data
|
|
|
|
"""
|
|
|
|
|
2011-06-03 00:21:44 +00:00
|
|
|
def __init__(self, caches_array):
|
|
|
|
self.caches_array = caches_array
|
2011-06-03 00:22:40 +00:00
|
|
|
for cache_class in self.caches_array:
|
|
|
|
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
|
|
|
cache_class.init_cacheData(self)
|
|
|
|
|
2011-01-20 21:32:16 +00:00
|
|
|
# Direct cache variables
|
2006-11-16 15:02:15 +00:00
|
|
|
self.task_queues = {}
|
|
|
|
self.preferred = {}
|
2010-08-31 13:49:43 +00:00
|
|
|
self.tasks = {}
|
2011-01-20 21:32:16 +00:00
|
|
|
# Indirect Cache variables (set elsewhere)
|
2006-11-16 15:02:15 +00:00
|
|
|
self.ignored_dependencies = []
|
2009-05-12 15:53:22 +00:00
|
|
|
self.world_target = set()
|
2006-11-16 15:02:15 +00:00
|
|
|
self.bbfile_priority = {}
|
2010-11-16 19:58:52 +00:00
|
|
|
|
2011-06-03 00:22:40 +00:00
|
|
|
def add_from_recipeinfo(self, fn, info_array):
|
|
|
|
for info in info_array:
|
|
|
|
info.add_cacheData(self, fn)
|
2010-11-16 19:58:52 +00:00
|
|
|
|
2012-05-22 23:23:31 +00:00
|
|
|
class MultiProcessCache(object):
|
|
|
|
"""
|
|
|
|
BitBake multi-process cache implementation
|
|
|
|
|
|
|
|
Used by the codeparser & file checksum caches
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.cachefile = None
|
|
|
|
self.cachedata = self.create_cachedata()
|
|
|
|
self.cachedata_extras = self.create_cachedata()
|
|
|
|
|
|
|
|
def init_cache(self, d):
|
|
|
|
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
|
|
|
d.getVar("CACHE", True))
|
|
|
|
if cachedir in [None, '']:
|
|
|
|
return
|
|
|
|
bb.utils.mkdirhier(cachedir)
|
|
|
|
self.cachefile = os.path.join(cachedir, self.__class__.cache_file_name)
|
|
|
|
logger.debug(1, "Using cache in '%s'", self.cachefile)
|
|
|
|
|
2013-06-07 17:11:09 +00:00
|
|
|
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
|
|
|
|
2012-05-22 23:23:31 +00:00
|
|
|
try:
|
2013-05-09 21:06:45 +00:00
|
|
|
with open(self.cachefile, "rb") as f:
|
|
|
|
p = pickle.Unpickler(f)
|
|
|
|
data, version = p.load()
|
2012-05-22 23:23:31 +00:00
|
|
|
except:
|
2013-06-07 17:11:09 +00:00
|
|
|
bb.utils.unlockfile(glf)
|
2012-05-22 23:23:31 +00:00
|
|
|
return
|
|
|
|
|
2013-06-07 17:11:09 +00:00
|
|
|
bb.utils.unlockfile(glf)
|
|
|
|
|
2012-05-22 23:23:31 +00:00
|
|
|
if version != self.__class__.CACHE_VERSION:
|
|
|
|
return
|
|
|
|
|
|
|
|
self.cachedata = data
|
|
|
|
|
|
|
|
def create_cachedata(self):
|
|
|
|
data = [{}]
|
|
|
|
return data
|
|
|
|
|
|
|
|
def save_extras(self, d):
|
|
|
|
if not self.cachefile:
|
|
|
|
return
|
|
|
|
|
|
|
|
glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True)
|
|
|
|
|
|
|
|
i = os.getpid()
|
|
|
|
lf = None
|
|
|
|
while not lf:
|
|
|
|
lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False)
|
|
|
|
if not lf or os.path.exists(self.cachefile + "-" + str(i)):
|
|
|
|
if lf:
|
|
|
|
bb.utils.unlockfile(lf)
|
|
|
|
lf = None
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
|
2013-05-09 21:06:45 +00:00
|
|
|
with open(self.cachefile + "-" + str(i), "wb") as f:
|
|
|
|
p = pickle.Pickler(f, -1)
|
|
|
|
p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION])
|
2012-05-22 23:23:31 +00:00
|
|
|
|
|
|
|
bb.utils.unlockfile(lf)
|
|
|
|
bb.utils.unlockfile(glf)
|
|
|
|
|
|
|
|
def merge_data(self, source, dest):
|
|
|
|
for j in range(0,len(dest)):
|
|
|
|
for h in source[j]:
|
|
|
|
if h not in dest[j]:
|
|
|
|
dest[j][h] = source[j][h]
|
|
|
|
|
|
|
|
def save_merge(self, d):
|
|
|
|
if not self.cachefile:
|
|
|
|
return
|
|
|
|
|
|
|
|
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
|
|
|
|
2014-07-25 13:50:43 +00:00
|
|
|
data = self.cachedata
|
2012-05-22 23:23:31 +00:00
|
|
|
|
|
|
|
for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
|
|
|
|
f = os.path.join(os.path.dirname(self.cachefile), f)
|
|
|
|
try:
|
2013-05-09 21:06:45 +00:00
|
|
|
with open(f, "rb") as fd:
|
|
|
|
p = pickle.Unpickler(fd)
|
|
|
|
extradata, version = p.load()
|
2012-05-22 23:23:31 +00:00
|
|
|
except (IOError, EOFError):
|
2014-07-25 13:52:19 +00:00
|
|
|
os.unlink(f)
|
|
|
|
continue
|
2012-05-22 23:23:31 +00:00
|
|
|
|
|
|
|
if version != self.__class__.CACHE_VERSION:
|
2014-07-25 13:52:19 +00:00
|
|
|
os.unlink(f)
|
2012-05-22 23:23:31 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
self.merge_data(extradata, data)
|
|
|
|
os.unlink(f)
|
|
|
|
|
2013-05-09 21:06:45 +00:00
|
|
|
with open(self.cachefile, "wb") as f:
|
|
|
|
p = pickle.Pickler(f, -1)
|
|
|
|
p.dump([data, self.__class__.CACHE_VERSION])
|
2012-05-22 23:23:31 +00:00
|
|
|
|
|
|
|
bb.utils.unlockfile(glf)
|
|
|
|
|