summaryrefslogtreecommitdiffstats
path: root/bitbake/lib/bb/cooker.py
diff options
context:
space:
mode:
Diffstat (limited to 'bitbake/lib/bb/cooker.py')
-rw-r--r--bitbake/lib/bb/cooker.py711
1 files changed, 387 insertions, 324 deletions
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py
index af794b4c42..c5bfef55d6 100644
--- a/bitbake/lib/bb/cooker.py
+++ b/bitbake/lib/bb/cooker.py
@@ -13,7 +13,6 @@ import sys, os, glob, os.path, re, time
import itertools
import logging
import multiprocessing
-import sre_constants
import threading
from io import StringIO, UnsupportedOperation
from contextlib import closing
@@ -23,7 +22,6 @@ from bb import utils, data, parse, event, cache, providers, taskdata, runqueue,
import queue
import signal
import prserv.serv
-import pyinotify
import json
import pickle
import codecs
@@ -81,7 +79,7 @@ class SkippedPackage:
class CookerFeatures(object):
- _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS] = list(range(3))
+ _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS, RECIPE_SIGGEN_INFO] = list(range(4))
def __init__(self):
self._features=set()
@@ -104,12 +102,15 @@ class CookerFeatures(object):
class EventWriter:
def __init__(self, cooker, eventfile):
- self.file_inited = None
self.cooker = cooker
self.eventfile = eventfile
self.event_queue = []
- def write_event(self, event):
+ def write_variables(self):
+ with open(self.eventfile, "a") as f:
+ f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
+
+ def send(self, event):
with open(self.eventfile, "a") as f:
try:
str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8')
@@ -119,28 +120,6 @@ class EventWriter:
import traceback
print(err, traceback.format_exc())
- def send(self, event):
- if self.file_inited:
- # we have the file, just write the event
- self.write_event(event)
- else:
- # init on bb.event.BuildStarted
- name = "%s.%s" % (event.__module__, event.__class__.__name__)
- if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"):
- with open(self.eventfile, "w") as f:
- f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
-
- self.file_inited = True
-
- # write pending events
- for evt in self.event_queue:
- self.write_event(evt)
-
- # also write the current event
- self.write_event(event)
- else:
- # queue all events until the file is inited
- self.event_queue.append(event)
#============================================================================#
# BBCooker
@@ -150,8 +129,10 @@ class BBCooker:
Manages one bitbake build run
"""
- def __init__(self, featureSet=None, idleCallBackRegister=None):
+ def __init__(self, featureSet=None, server=None):
self.recipecaches = None
+ self.baseconfig_valid = False
+ self.parsecache_valid = False
self.eventlog = None
self.skiplist = {}
self.featureset = CookerFeatures()
@@ -159,34 +140,22 @@ class BBCooker:
for f in featureSet:
self.featureset.setFeature(f)
+ self.orig_syspath = sys.path.copy()
+ self.orig_sysmodules = [*sys.modules]
+
self.configuration = bb.cookerdata.CookerConfiguration()
- self.idleCallBackRegister = idleCallBackRegister
+ self.process_server = server
+ self.idleCallBackRegister = None
+ self.waitIdle = None
+ if server:
+ self.idleCallBackRegister = server.register_idle_function
+ self.waitIdle = server.wait_for_idle
bb.debug(1, "BBCooker starting %s" % time.time())
- sys.stdout.flush()
-
- self.configwatcher = pyinotify.WatchManager()
- bb.debug(1, "BBCooker pyinotify1 %s" % time.time())
- sys.stdout.flush()
-
- self.configwatcher.bbseen = set()
- self.configwatcher.bbwatchedfiles = set()
- self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
- bb.debug(1, "BBCooker pyinotify2 %s" % time.time())
- sys.stdout.flush()
- self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
- pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
- pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
- self.watcher = pyinotify.WatchManager()
- bb.debug(1, "BBCooker pyinotify3 %s" % time.time())
- sys.stdout.flush()
- self.watcher.bbseen = set()
- self.watcher.bbwatchedfiles = set()
- self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
-
- bb.debug(1, "BBCooker pyinotify complete %s" % time.time())
- sys.stdout.flush()
+
+ self.configwatched = {}
+ self.parsewatched = {}
# If being called by something like tinfoil, we need to clean cached data
# which may now be invalid
@@ -197,14 +166,6 @@ class BBCooker:
self.hashserv = None
self.hashservaddr = None
- self.inotify_modified_files = []
-
- def _process_inotify_updates(server, cooker, abort):
- cooker.process_inotify_updates()
- return 1.0
-
- self.idleCallBackRegister(_process_inotify_updates, self)
-
# TOSTOP must not be set or our children will hang when they output
try:
fd = sys.stdout.fileno()
@@ -218,7 +179,7 @@ class BBCooker:
except UnsupportedOperation:
pass
- self.command = bb.command.Command(self)
+ self.command = bb.command.Command(self, self.process_server)
self.state = state.initial
self.parser = None
@@ -228,84 +189,37 @@ class BBCooker:
signal.signal(signal.SIGHUP, self.sigterm_exception)
bb.debug(1, "BBCooker startup complete %s" % time.time())
- sys.stdout.flush()
def init_configdata(self):
if not hasattr(self, "data"):
self.initConfigurationData()
bb.debug(1, "BBCooker parsed base configuration %s" % time.time())
- sys.stdout.flush()
self.handlePRServ()
- def process_inotify_updates(self):
- for n in [self.confignotifier, self.notifier]:
- if n.check_events(timeout=0):
- # read notified events and enqeue them
- n.read_events()
- n.process_events()
-
- def config_notifications(self, event):
- if event.maskname == "IN_Q_OVERFLOW":
- bb.warn("inotify event queue overflowed, invalidating caches.")
- self.parsecache_valid = False
- self.baseconfig_valid = False
- bb.parse.clear_cache()
- return
- if not event.pathname in self.configwatcher.bbwatchedfiles:
- return
- if not event.pathname in self.inotify_modified_files:
- self.inotify_modified_files.append(event.pathname)
- self.baseconfig_valid = False
-
- def notifications(self, event):
- if event.maskname == "IN_Q_OVERFLOW":
- bb.warn("inotify event queue overflowed, invalidating caches.")
- self.parsecache_valid = False
- bb.parse.clear_cache()
- return
- if event.pathname.endswith("bitbake-cookerdaemon.log") \
- or event.pathname.endswith("bitbake.lock"):
- return
- if not event.pathname in self.inotify_modified_files:
- self.inotify_modified_files.append(event.pathname)
- self.parsecache_valid = False
+ def _baseconfig_set(self, value):
+ if value and not self.baseconfig_valid:
+ bb.server.process.serverlog("Base config valid")
+ elif not value and self.baseconfig_valid:
+ bb.server.process.serverlog("Base config invalidated")
+ self.baseconfig_valid = value
+
+ def _parsecache_set(self, value):
+ if value and not self.parsecache_valid:
+ bb.server.process.serverlog("Parse cache valid")
+ elif not value and self.parsecache_valid:
+ bb.server.process.serverlog("Parse cache invalidated")
+ self.parsecache_valid = value
+
+ def add_filewatch(self, deps, configwatcher=False):
+ if configwatcher:
+ watcher = self.configwatched
+ else:
+ watcher = self.parsewatched
- def add_filewatch(self, deps, watcher=None, dirs=False):
- if not watcher:
- watcher = self.watcher
for i in deps:
- watcher.bbwatchedfiles.add(i[0])
- if dirs:
- f = i[0]
- else:
- f = os.path.dirname(i[0])
- if f in watcher.bbseen:
- continue
- watcher.bbseen.add(f)
- watchtarget = None
- while True:
- # We try and add watches for files that don't exist but if they did, would influence
- # the parser. The parent directory of these files may not exist, in which case we need
- # to watch any parent that does exist for changes.
- try:
- watcher.add_watch(f, self.watchmask, quiet=False)
- if watchtarget:
- watcher.bbwatchedfiles.add(watchtarget)
- break
- except pyinotify.WatchManagerError as e:
- if 'ENOENT' in str(e):
- watchtarget = f
- f = os.path.dirname(f)
- if f in watcher.bbseen:
- break
- watcher.bbseen.add(f)
- continue
- if 'ENOSPC' in str(e):
- providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?")
- providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.")
- providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.")
- providerlog.error("Root privilege is required to modify max_user_watches.")
- raise
+ f = i[0]
+ mtime = i[1]
+ watcher[f] = mtime
def sigterm_exception(self, signum, stackframe):
if signum == signal.SIGTERM:
@@ -313,6 +227,7 @@ class BBCooker:
elif signum == signal.SIGHUP:
bb.warn("Cooker received SIGHUP, shutting down...")
self.state = state.forceshutdown
+ bb.event._should_exit.set()
def setFeatures(self, features):
# we only accept a new feature set if we're in state initial, so we can reset without problems
@@ -330,6 +245,13 @@ class BBCooker:
self.state = state.initial
self.caches_array = []
+ sys.path = self.orig_syspath.copy()
+ for mod in [*sys.modules]:
+ if mod not in self.orig_sysmodules:
+ del sys.modules[mod]
+
+ self.configwatched = {}
+
# Need to preserve BB_CONSOLELOG over resets
consolelog = None
if hasattr(self, "data"):
@@ -338,12 +260,12 @@ class BBCooker:
if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
self.enableDataTracking()
- all_extra_cache_names = []
+ caches_name_array = ['bb.cache:CoreRecipeInfo']
# We hardcode all known cache types in a single place, here.
if CookerFeatures.HOB_EXTRA_CACHES in self.featureset:
- all_extra_cache_names.append("bb.cache_extra:HobRecipeInfo")
-
- caches_name_array = ['bb.cache:CoreRecipeInfo'] + all_extra_cache_names
+ caches_name_array.append("bb.cache_extra:HobRecipeInfo")
+ if CookerFeatures.RECIPE_SIGGEN_INFO in self.featureset:
+ caches_name_array.append("bb.cache:SiggenRecipeInfo")
# At least CoreRecipeInfo will be loaded, so caches_array will never be empty!
# This is the entry point, no further check needed!
@@ -362,6 +284,10 @@ class BBCooker:
self.data_hash = self.databuilder.data_hash
self.extraconfigdata = {}
+ eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG")
+ if not self.configuration.writeeventlog and eventlog:
+ self.setupEventLog(eventlog)
+
if consolelog:
self.data.setVar("BB_CONSOLELOG", consolelog)
@@ -371,11 +297,10 @@ class BBCooker:
self.disableDataTracking()
for mc in self.databuilder.mcdata.values():
- mc.renameVar("__depends", "__base_depends")
- self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
+ self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True)
- self.baseconfig_valid = True
- self.parsecache_valid = False
+ self._baseconfig_set(True)
+ self._parsecache_set(False)
def handlePRServ(self):
# Setup a PR Server based on the new configuration
@@ -388,18 +313,26 @@ class BBCooker:
# Create a new hash server bound to a unix domain socket
if not self.hashserv:
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
+ upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
+ if upstream:
+ import socket
+ try:
+ sock = socket.create_connection(upstream.split(":"), 5)
+ sock.close()
+ except socket.error as e:
+ bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
+ % (upstream, repr(e)))
+
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
self.hashserv = hashserv.create_server(
self.hashservaddr,
dbfile,
sync=False,
- upstream=self.data.getVar("BB_HASHSERVE_UPSTREAM") or None,
+ upstream=upstream,
)
- self.hashserv.serve_as_process()
- self.data.setVar("BB_HASHSERVE", self.hashservaddr)
- self.databuilder.origdata.setVar("BB_HASHSERVE", self.hashservaddr)
- self.databuilder.data.setVar("BB_HASHSERVE", self.hashservaddr)
+ self.hashserv.serve_as_process(log_level=logging.WARNING)
for mc in self.databuilder.mcdata:
+ self.databuilder.mcorigdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
bb.parse.init_parser(self.data)
@@ -414,6 +347,29 @@ class BBCooker:
if hasattr(self, "data"):
self.data.disableTracking()
+ def revalidateCaches(self):
+ bb.parse.clear_cache()
+
+ clean = True
+ for f in self.configwatched:
+ if not bb.parse.check_mtime(f, self.configwatched[f]):
+ bb.server.process.serverlog("Found %s changed, invalid cache" % f)
+ self._baseconfig_set(False)
+ self._parsecache_set(False)
+ clean = False
+ break
+
+ if clean:
+ for f in self.parsewatched:
+ if not bb.parse.check_mtime(f, self.parsewatched[f]):
+ bb.server.process.serverlog("Found %s changed, invalid cache" % f)
+ self._parsecache_set(False)
+ clean = False
+ break
+
+ if not clean:
+ bb.parse.BBHandler.cached_statements = {}
+
def parseConfiguration(self):
self.updateCacheSync()
@@ -432,8 +388,24 @@ class BBCooker:
self.recipecaches[mc] = bb.cache.CacheData(self.caches_array)
self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS"))
-
- self.parsecache_valid = False
+ self.collections = {}
+ for mc in self.multiconfigs:
+ self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
+
+ self._parsecache_set(False)
+
+ def setupEventLog(self, eventlog):
+ if self.eventlog and self.eventlog[0] != eventlog:
+ bb.event.unregister_UIHhandler(self.eventlog[1])
+ self.eventlog = None
+ if not self.eventlog or self.eventlog[0] != eventlog:
+ # we log all events to a file if so directed
+ # register the log file writer as UI Handler
+ if not os.path.exists(os.path.dirname(eventlog)):
+ bb.utils.mkdirhier(os.path.dirname(eventlog))
+ writer = EventWriter(self, eventlog)
+ EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
+ self.eventlog = (eventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)), writer)
def updateConfigOpts(self, options, environment, cmdline):
self.ui_cmdline = cmdline
@@ -454,14 +426,7 @@ class BBCooker:
setattr(self.configuration, o, options[o])
if self.configuration.writeeventlog:
- if self.eventlog and self.eventlog[0] != self.configuration.writeeventlog:
- bb.event.unregister_UIHhandler(self.eventlog[1])
- if not self.eventlog or self.eventlog[0] != self.configuration.writeeventlog:
- # we log all events to a file if so directed
- # register the log file writer as UI Handler
- writer = EventWriter(self, self.configuration.writeeventlog)
- EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
- self.eventlog = (self.configuration.writeeventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)))
+ self.setupEventLog(self.configuration.writeeventlog)
bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel
bb.msg.loggerDefaultDomains = self.configuration.debug_domains
@@ -491,19 +456,11 @@ class BBCooker:
# Now update all the variables not in the datastore to match
self.configuration.env = environment
+ self.revalidateCaches()
if not clean:
logger.debug("Base environment change, triggering reparse")
self.reset()
- def runCommands(self, server, data, abort):
- """
- Run any queued asynchronous command
- This is done by the idle handler so it runs in true context rather than
- tied to any UI.
- """
-
- return self.command.runAsyncCommand()
-
def showVersions(self):
(latest_versions, preferred_versions, required) = self.findProviders()
@@ -545,6 +502,8 @@ class BBCooker:
if not orig_tracking:
self.enableDataTracking()
self.reset()
+ # reset() resets to the UI requested value so we have to redo this
+ self.enableDataTracking()
def mc_base(p):
if p.startswith('mc:'):
@@ -568,21 +527,21 @@ class BBCooker:
if pkgs_to_build[0] in set(ignore.split()):
bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0])
- taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True)
+ taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.halt, allowincomplete=True)
mc = runlist[0][0]
fn = runlist[0][3]
if fn:
try:
- bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
- envdata = bb_caches[mc].loadDataFull(fn, self.collections[mc].get_file_appends(fn))
+ layername = self.collections[mc].calc_bbfile_priority(fn)[2]
+ envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn), layername)
except Exception as e:
parselog.exception("Unable to read %s", fn)
raise
else:
if not mc in self.databuilder.mcdata:
- bb.fatal('Not multiconfig named "%s" found' % mc)
+ bb.fatal('No multiconfig named "%s" found' % mc)
envdata = self.databuilder.mcdata[mc]
data.expandKeys(envdata)
parse.ast.runAnonFuncs(envdata)
@@ -597,7 +556,7 @@ class BBCooker:
data.emit_env(env, envdata, True)
logger.plain(env.getvalue())
- # emit the metadata which isnt valid shell
+ # emit the metadata which isn't valid shell
for e in sorted(envdata.keys()):
if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False):
logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False))
@@ -606,7 +565,7 @@ class BBCooker:
self.disableDataTracking()
self.reset()
- def buildTaskData(self, pkgs_to_build, task, abort, allowincomplete=False):
+ def buildTaskData(self, pkgs_to_build, task, halt, allowincomplete=False):
"""
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
"""
@@ -653,7 +612,7 @@ class BBCooker:
localdata = {}
for mc in self.multiconfigs:
- taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete)
+ taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete)
localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
bb.data.expandKeys(localdata[mc])
@@ -702,19 +661,18 @@ class BBCooker:
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
mcdeps |= set(taskdata[mc].get_mcdepends())
new = False
- for mc in self.multiconfigs:
- for k in mcdeps:
- if k in seen:
- continue
- l = k.split(':')
- depmc = l[2]
- if depmc not in self.multiconfigs:
- bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
- else:
- logger.debug("Adding providers for multiconfig dependency %s" % l[3])
- taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
- seen.add(k)
- new = True
+ for k in mcdeps:
+ if k in seen:
+ continue
+ l = k.split(':')
+ depmc = l[2]
+ if depmc not in self.multiconfigs:
+ bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
+ else:
+ logger.debug("Adding providers for multiconfig dependency %s" % l[3])
+ taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
+ seen.add(k)
+ new = True
for mc in self.multiconfigs:
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
@@ -727,7 +685,7 @@ class BBCooker:
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
"""
- # We set abort to False here to prevent unbuildable targets raising
+ # We set halt to False here to prevent unbuildable targets raising
# an exception when we're just generating data
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
@@ -804,7 +762,9 @@ class BBCooker:
for dep in rq.rqdata.runtaskentries[tid].depends:
(depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep)
deppn = self.recipecaches[depmc].pkg_fn[deptaskfn]
- depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep)))
+ if depmc:
+ depmc = "mc:" + depmc + ":"
+ depend_tree["tdepends"][dotname].append("%s%s.%s" % (depmc, deppn, bb.runqueue.taskname_from_tid(dep)))
if taskfn not in seen_fns:
seen_fns.append(taskfn)
packages = []
@@ -1234,15 +1194,15 @@ class BBCooker:
except bb.utils.VersionStringException as vse:
bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse)))
if not res:
- parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver)
+ parselog.debug3("Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver)
continue
else:
- parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec)
+ parselog.debug3("Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec)
continue
- parselog.debug(3,"Layer '%s' recommends layer '%s', so we are adding it", c, rec)
+ parselog.debug3("Layer '%s' recommends layer '%s', so we are adding it", c, rec)
collection_depends[c].append(rec)
else:
- parselog.debug(3,"Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec)
+ parselog.debug3("Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec)
# Recursively work out collection priorities based on dependencies
def calc_layer_priority(collection):
@@ -1254,7 +1214,7 @@ class BBCooker:
if depprio > max_depprio:
max_depprio = depprio
max_depprio += 1
- parselog.debug(1, "Calculated priority of layer %s as %d", collection, max_depprio)
+ parselog.debug("Calculated priority of layer %s as %d", collection, max_depprio)
collection_priorities[collection] = max_depprio
# Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities
@@ -1266,7 +1226,7 @@ class BBCooker:
errors = True
continue
elif regex == "":
- parselog.debug(1, "BBFILE_PATTERN_%s is empty" % c)
+ parselog.debug("BBFILE_PATTERN_%s is empty" % c)
cre = re.compile('^NULL$')
errors = False
else:
@@ -1313,8 +1273,8 @@ class BBCooker:
if bf.startswith("/") or bf.startswith("../"):
bf = os.path.abspath(bf)
- self.collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
- filelist, masked, searchdirs = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
+ collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)}
+ filelist, masked, searchdirs = collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
try:
os.stat(bf)
bf = os.path.abspath(bf)
@@ -1380,7 +1340,8 @@ class BBCooker:
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
- infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn))
+ layername = self.collections[mc].calc_bbfile_priority(fn)[2]
+ infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername)
infos = dict(infos)
fn = bb.cache.realfn2virtual(fn, cls, mc)
@@ -1406,14 +1367,16 @@ class BBCooker:
self.recipecaches[mc].rundeps[fn] = defaultdict(list)
self.recipecaches[mc].runrecs[fn] = defaultdict(list)
+ bb.parse.siggen.setup_datacache(self.recipecaches)
+
# Invalidate task for target if force mode active
if self.configuration.force:
logger.verbose("Invalidate task %s, %s", task, fn)
- bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn)
+ bb.parse.siggen.invalidate_task(task, fn)
# Setup taskdata structure
taskdata = {}
- taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort)
+ taskdata[mc] = bb.taskdata.TaskData(self.configuration.halt)
taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item)
if quietlog:
@@ -1423,17 +1386,20 @@ class BBCooker:
buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME")
if fireevents:
bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc])
+ if self.eventlog:
+ self.eventlog[2].write_variables()
+ bb.event.enable_heartbeat()
# Execute the runqueue
runlist = [[mc, item, task, fn]]
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
- def buildFileIdle(server, rq, abort):
+ def buildFileIdle(server, rq, halt):
msg = None
interrupted = 0
- if abort or self.state == state.forceshutdown:
+ if halt or self.state == state.forceshutdown:
rq.finish_runqueue(True)
msg = "Forced shutdown"
interrupted = 2
@@ -1448,37 +1414,68 @@ class BBCooker:
failures += len(exc.args)
retval = False
except SystemExit as exc:
- self.command.finishAsyncCommand(str(exc))
if quietlog:
bb.runqueue.logger.setLevel(rqloglevel)
- return False
+ return bb.server.process.idleFinish(str(exc))
if not retval:
if fireevents:
bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc])
- self.command.finishAsyncCommand(msg)
+ bb.event.disable_heartbeat()
# We trashed self.recipecaches above
- self.parsecache_valid = False
+ self._parsecache_set(False)
self.configuration.limited_deps = False
bb.parse.siggen.reset(self.data)
if quietlog:
bb.runqueue.logger.setLevel(rqloglevel)
- return False
+ return bb.server.process.idleFinish(msg)
if retval is True:
return True
return retval
self.idleCallBackRegister(buildFileIdle, rq)
+ def getTaskSignatures(self, target, tasks):
+ sig = []
+ getAllTaskSignatures = False
+
+ if not tasks:
+ tasks = ["do_build"]
+ getAllTaskSignatures = True
+
+ for task in tasks:
+ taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt)
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
+ rq.rqdata.prepare()
+
+ for l in runlist:
+ mc, pn, taskname, fn = l
+
+ taskdep = rq.rqdata.dataCaches[mc].task_deps[fn]
+ for t in taskdep['tasks']:
+ if t in taskdep['nostamp'] or "setscene" in t:
+ continue
+ tid = bb.runqueue.build_tid(mc, fn, t)
+
+ if t in task or getAllTaskSignatures:
+ try:
+ rq.rqdata.prepare_task_hash(tid)
+ sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
+ except KeyError:
+ sig.append(self.getTaskSignatures(target, [t])[0])
+
+ return sig
+
def buildTargets(self, targets, task):
"""
Attempt to build the targets specified
"""
- def buildTargetsIdle(server, rq, abort):
+ def buildTargetsIdle(server, rq, halt):
msg = None
interrupted = 0
- if abort or self.state == state.forceshutdown:
+ if halt or self.state == state.forceshutdown:
+ bb.event._should_exit.set()
rq.finish_runqueue(True)
msg = "Forced shutdown"
interrupted = 2
@@ -1493,16 +1490,16 @@ class BBCooker:
failures += len(exc.args)
retval = False
except SystemExit as exc:
- self.command.finishAsyncCommand(str(exc))
- return False
+ return bb.server.process.idleFinish(str(exc))
if not retval:
try:
for mc in self.multiconfigs:
bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc])
finally:
- self.command.finishAsyncCommand(msg)
- return False
+ bb.event.disable_heartbeat()
+ return bb.server.process.idleFinish(msg)
+
if retval is True:
return True
return retval
@@ -1521,7 +1518,7 @@ class BBCooker:
bb.event.fire(bb.event.BuildInit(packages), self.data)
- taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort)
+ taskdata, runlist = self.buildTaskData(targets, task, self.configuration.halt)
buildname = self.data.getVar("BUILDNAME", False)
@@ -1534,6 +1531,9 @@ class BBCooker:
for mc in self.multiconfigs:
bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc])
+ if self.eventlog:
+ self.eventlog[2].write_variables()
+ bb.event.enable_heartbeat()
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
if 'universe' in targets:
@@ -1543,7 +1543,13 @@ class BBCooker:
def getAllKeysWithFlags(self, flaglist):
+ def dummy_autorev(d):
+ return
+
dump = {}
+ # Horrible but for now we need to avoid any sideeffects of autorev being called
+ saved = bb.fetch2.get_autorev
+ bb.fetch2.get_autorev = dummy_autorev
for k in self.data.keys():
try:
expand = True
@@ -1563,6 +1569,7 @@ class BBCooker:
dump[k][d] = None
except Exception as e:
print(e)
+ bb.fetch2.get_autorev = saved
return dump
@@ -1570,13 +1577,6 @@ class BBCooker:
if self.state == state.running:
return
- # reload files for which we got notifications
- for p in self.inotify_modified_files:
- bb.parse.update_cache(p)
- if p in bb.parse.BBHandler.cached_statements:
- del bb.parse.BBHandler.cached_statements[p]
- self.inotify_modified_files = []
-
if not self.baseconfig_valid:
logger.debug("Reloading base configuration data")
self.initConfigurationData()
@@ -1589,7 +1589,7 @@ class BBCooker:
if self.state in (state.shutdown, state.forceshutdown, state.error):
if hasattr(self.parser, 'shutdown'):
- self.parser.shutdown(clean=False, force = True)
+ self.parser.shutdown(clean=False)
self.parser.final_cleanup()
raise bb.BBHandledException()
@@ -1597,6 +1597,9 @@ class BBCooker:
self.updateCacheSync()
if self.state != state.parsing and not self.parsecache_valid:
+ bb.server.process.serverlog("Parsing started")
+ self.parsewatched = {}
+
bb.parse.siggen.reset(self.data)
self.parseConfiguration ()
if CookerFeatures.SEND_SANITYEVENTS in self.featureset:
@@ -1610,30 +1613,27 @@ class BBCooker:
for dep in self.configuration.extra_assume_provided:
self.recipecaches[mc].ignored_dependencies.add(dep)
- self.collections = {}
-
mcfilelist = {}
total_masked = 0
searchdirs = set()
for mc in self.multiconfigs:
- self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc)
(filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc])
mcfilelist[mc] = filelist
total_masked += masked
searchdirs |= set(search)
- # Add inotify watches for directories searched for bb/bbappend files
+ # Add mtimes for directories searched for bb/bbappend files
for dirent in searchdirs:
- self.add_filewatch([[dirent]], dirs=True)
+ self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))])
self.parser = CookerParser(self, mcfilelist, total_masked)
- self.parsecache_valid = True
+ self._parsecache_set(True)
self.state = state.parsing
if not self.parser.parse_next():
- collectlog.debug(1, "parsing complete")
+ collectlog.debug("parsing complete")
if self.parser.error:
raise bb.BBHandledException()
self.show_appends_with_no_recipes()
@@ -1656,7 +1656,7 @@ class BBCooker:
# Return a copy, don't modify the original
pkgs_to_build = pkgs_to_build[:]
- if len(pkgs_to_build) == 0:
+ if not pkgs_to_build:
raise NothingToBuild
ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split()
@@ -1678,7 +1678,7 @@ class BBCooker:
if 'universe' in pkgs_to_build:
parselog.verbnote("The \"universe\" target is only intended for testing and may produce errors.")
- parselog.debug(1, "collating packages for \"universe\"")
+ parselog.debug("collating packages for \"universe\"")
pkgs_to_build.remove('universe')
for mc in self.multiconfigs:
for t in self.recipecaches[mc].universe_target:
@@ -1703,26 +1703,36 @@ class BBCooker:
def post_serve(self):
self.shutdown(force=True)
prserv.serv.auto_shutdown()
+ if hasattr(bb.parse, "siggen"):
+ bb.parse.siggen.exit()
if self.hashserv:
self.hashserv.process.terminate()
self.hashserv.process.join()
if hasattr(self, "data"):
bb.event.fire(CookerExit(), self.data)
- def shutdown(self, force = False):
+ def shutdown(self, force=False):
if force:
self.state = state.forceshutdown
+ bb.event._should_exit.set()
else:
self.state = state.shutdown
if self.parser:
- self.parser.shutdown(clean=not force, force=force)
+ self.parser.shutdown(clean=False)
self.parser.final_cleanup()
def finishcommand(self):
+ if hasattr(self.parser, 'shutdown'):
+ self.parser.shutdown(clean=False)
+ self.parser.final_cleanup()
self.state = state.initial
+ bb.event._should_exit.clear()
def reset(self):
+ if hasattr(bb.parse, "siggen"):
+ bb.parse.siggen.exit()
+ self.finishcommand()
self.initConfigurationData()
self.handlePRServ()
@@ -1734,9 +1744,9 @@ class BBCooker:
if hasattr(self, "data"):
self.databuilder.reset()
self.data = self.databuilder.data
- self.parsecache_valid = False
- self.baseconfig_valid = False
-
+ # In theory tinfoil could have modified the base data before parsing,
+ # ideally need to track if anything did modify the datastore
+ self._parsecache_set(False)
class CookerExit(bb.event.Event):
"""
@@ -1751,16 +1761,16 @@ class CookerCollectFiles(object):
def __init__(self, priorities, mc=''):
self.mc = mc
self.bbappends = []
- # Priorities is a list of tupples, with the second element as the pattern.
+ # Priorities is a list of tuples, with the second element as the pattern.
# We need to sort the list with the longest pattern first, and so on to
# the shortest. This allows nested layers to be properly evaluated.
self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True)
def calc_bbfile_priority(self, filename):
- for _, _, regex, pri in self.bbfile_config_priorities:
+ for layername, _, regex, pri in self.bbfile_config_priorities:
if regex.match(filename):
- return pri, regex
- return 0, None
+ return pri, regex, layername
+ return 0, None, None
def get_bbfiles(self):
"""Get list of default .bb files by reading out the current directory"""
@@ -1779,7 +1789,7 @@ class CookerCollectFiles(object):
for ignored in ('SCCS', 'CVS', '.svn'):
if ignored in dirs:
dirs.remove(ignored)
- found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))]
+ found += [os.path.join(dir, f) for f in files if (f.endswith(('.bb', '.bbappend')))]
return found
@@ -1787,7 +1797,7 @@ class CookerCollectFiles(object):
"""Collect all available .bb build files"""
masked = 0
- collectlog.debug(1, "collecting .bb files")
+ collectlog.debug("collecting .bb files")
files = (config.getVar( "BBFILES") or "").split()
@@ -1795,14 +1805,14 @@ class CookerCollectFiles(object):
files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] )
config.setVar("BBFILES_PRIORITIZED", " ".join(files))
- if not len(files):
+ if not files:
files = self.get_bbfiles()
- if not len(files):
+ if not files:
collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
bb.event.fire(CookerExit(), eventdata)
- # We need to track where we look so that we can add inotify watches. There
+ # We need to track where we look so that we can know when the cache is invalid. There
# is no nice way to do this, this is horrid. We intercept the os.listdir()
# (or os.scandir() for python 3.6+) calls while we run glob().
origlistdir = os.listdir
@@ -1858,7 +1868,7 @@ class CookerCollectFiles(object):
try:
re.compile(mask)
bbmasks.append(mask)
- except sre_constants.error:
+ except re.error:
collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask)
# Then validate the combined regular expressions. This should never
@@ -1866,7 +1876,7 @@ class CookerCollectFiles(object):
bbmask = "|".join(bbmasks)
try:
bbmask_compiled = re.compile(bbmask)
- except sre_constants.error:
+ except re.error:
collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask)
bbmask = None
@@ -1874,7 +1884,7 @@ class CookerCollectFiles(object):
bbappend = []
for f in newfiles:
if bbmask and bbmask_compiled.search(f):
- collectlog.debug(1, "skipping masked file %s", f)
+ collectlog.debug("skipping masked file %s", f)
masked += 1
continue
if f.endswith('.bb'):
@@ -1882,7 +1892,7 @@ class CookerCollectFiles(object):
elif f.endswith('.bbappend'):
bbappend.append(f)
else:
- collectlog.debug(1, "skipping %s: unknown file extension", f)
+ collectlog.debug("skipping %s: unknown file extension", f)
# Build a list of .bbappend files for each .bb file
for f in bbappend:
@@ -1933,7 +1943,7 @@ class CookerCollectFiles(object):
# Calculate priorities for each file
for p in pkgfns:
realfn, cls, mc = bb.cache.virtualfn2realfn(p)
- priorities[p], regex = self.calc_bbfile_priority(realfn)
+ priorities[p], regex, _ = self.calc_bbfile_priority(realfn)
if regex in unmatched_regex:
matched_regex.add(regex)
unmatched_regex.remove(regex)
@@ -1984,15 +1994,30 @@ class ParsingFailure(Exception):
Exception.__init__(self, realexception, recipe)
class Parser(multiprocessing.Process):
- def __init__(self, jobs, results, quit, init, profile):
+ def __init__(self, jobs, results, quit, profile):
self.jobs = jobs
self.results = results
self.quit = quit
- self.init = init
multiprocessing.Process.__init__(self)
self.context = bb.utils.get_context().copy()
self.handlers = bb.event.get_class_handlers().copy()
self.profile = profile
+ self.queue_signals = False
+ self.signal_received = []
+ self.signal_threadlock = threading.Lock()
+
+ def catch_sig(self, signum, frame):
+ if self.queue_signals:
+ self.signal_received.append(signum)
+ else:
+ self.handle_sig(signum, frame)
+
+ def handle_sig(self, signum, frame):
+ if signum == signal.SIGTERM:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ os.kill(os.getpid(), signal.SIGTERM)
+ elif signum == signal.SIGINT:
+ signal.default_int_handler(signum, frame)
def run(self):
@@ -2012,38 +2037,50 @@ class Parser(multiprocessing.Process):
prof.dump_stats(logfile)
def realrun(self):
- if self.init:
- self.init()
+ # Signal handling here is hard. We must not terminate any process or thread holding the write
+ # lock for the event stream as it will not be released, ever, and things will hang.
+ # Python handles signals in the main thread/process but they can be raised from any thread and
+ # we want to defer processing of any SIGTERM/SIGINT signal until we're outside the critical section
+ # and don't hold the lock (see server/process.py). We therefore always catch the signals (so any
+ # new thread should also do so) and we defer handling but we handle with the local thread lock
+ # held (a threading lock, not a multiprocessing one) so that no other thread in the process
+ # can be in the critical section.
+ signal.signal(signal.SIGTERM, self.catch_sig)
+ signal.signal(signal.SIGHUP, signal.SIG_DFL)
+ signal.signal(signal.SIGINT, self.catch_sig)
+ bb.utils.set_process_name(multiprocessing.current_process().name)
+ multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1)
+ multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1)
pending = []
- while True:
- try:
- self.quit.get_nowait()
- except queue.Empty:
- pass
- else:
- self.results.close()
- self.results.join_thread()
- break
+ havejobs = True
+ try:
+ while havejobs or pending:
+ if self.quit.is_set():
+ break
- if pending:
- result = pending.pop()
- else:
+ job = None
try:
job = self.jobs.pop()
except IndexError:
- self.results.close()
- self.results.join_thread()
- break
- result = self.parse(*job)
- # Clear the siggen cache after parsing to control memory usage, its huge
- bb.parse.siggen.postparsing_clean_cache()
- try:
- self.results.put(result, timeout=0.25)
- except queue.Full:
- pending.append(result)
+ havejobs = False
+ if job:
+ result = self.parse(*job)
+ # Clear the siggen cache after parsing to control memory usage, its huge
+ bb.parse.siggen.postparsing_clean_cache()
+ pending.append(result)
+
+ if pending:
+ try:
+ result = pending.pop()
+ self.results.put(result, timeout=0.05)
+ except queue.Full:
+ pending.append(result)
+ finally:
+ self.results.close()
+ self.results.join_thread()
- def parse(self, mc, cache, filename, appends):
+ def parse(self, mc, cache, filename, appends, layername):
try:
origfilter = bb.event.LogHandler.filter
# Record the filename we're parsing into any events generated
@@ -2057,17 +2094,17 @@ class Parser(multiprocessing.Process):
bb.event.set_class_handlers(self.handlers.copy())
bb.event.LogHandler.filter = parse_filter
- return True, mc, cache.parse(filename, appends)
+ return True, mc, cache.parse(filename, appends, layername)
except Exception as exc:
tb = sys.exc_info()[2]
exc.recipe = filename
exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
- return True, exc
+ return True, None, exc
# Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
# and for example a worker thread doesn't just exit on its own in response to
# a SystemExit event for example.
except BaseException as exc:
- return True, ParsingFailure(exc, filename)
+ return True, None, ParsingFailure(exc, filename)
finally:
bb.event.LogHandler.filter = origfilter
@@ -2097,10 +2134,11 @@ class CookerParser(object):
for mc in self.cooker.multiconfigs:
for filename in self.mcfilelist[mc]:
appends = self.cooker.collections[mc].get_file_appends(filename)
+ layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
if not self.bb_caches[mc].cacheValid(filename, appends):
- self.willparse.add((mc, self.bb_caches[mc], filename, appends))
+ self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername))
else:
- self.fromcache.add((mc, self.bb_caches[mc], filename, appends))
+ self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername))
self.total = len(self.fromcache) + len(self.willparse)
self.toparse = len(self.willparse)
@@ -2109,6 +2147,7 @@ class CookerParser(object):
self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or
multiprocessing.cpu_count()), self.toparse)
+ bb.cache.SiggenRecipeInfo.reset()
self.start()
self.haveshutdown = False
self.syncthread = None
@@ -2118,15 +2157,8 @@ class CookerParser(object):
self.processes = []
if self.toparse:
bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata)
- def init():
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
- signal.signal(signal.SIGHUP, signal.SIG_DFL)
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- bb.utils.set_process_name(multiprocessing.current_process().name)
- multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1)
- multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1)
-
- self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes)
+
+ self.parser_quit = multiprocessing.Event()
self.result_queue = multiprocessing.Queue()
def chunkify(lst,n):
@@ -2134,14 +2166,14 @@ class CookerParser(object):
self.jobs = chunkify(list(self.willparse), self.num_processes)
for i in range(0, self.num_processes):
- parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, init, self.cooker.configuration.profile)
+ parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile)
parser.start()
self.process_names.append(parser.name)
self.processes.append(parser)
self.results = itertools.chain(self.results, self.parse_generator())
- def shutdown(self, clean=True, force=False):
+ def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"):
if not self.toparse:
return
if self.haveshutdown:
@@ -2155,9 +2187,9 @@ class CookerParser(object):
self.total)
bb.event.fire(event, self.cfgdata)
-
- for process in self.processes:
- self.parser_quit.put(None)
+ else:
+ bb.event.fire(bb.event.ParseError(eventmsg), self.cfgdata)
+ bb.error("Parsing halted due to errors, see error messages above")
# Cleanup the queue before call process.join(), otherwise there might be
# deadlocks.
@@ -2167,25 +2199,39 @@ class CookerParser(object):
except queue.Empty:
break
- for process in self.processes:
- if force:
- process.join(.1)
- process.terminate()
- else:
- process.join()
-
- self.parser_quit.close()
- # Allow data left in the cancel queue to be discarded
- self.parser_quit.cancel_join_thread()
-
def sync_caches():
for c in self.bb_caches.values():
+ bb.cache.SiggenRecipeInfo.reset()
c.sync()
- sync = threading.Thread(target=sync_caches, name="SyncThread")
- self.syncthread = sync
- sync.start()
+ self.syncthread = threading.Thread(target=sync_caches, name="SyncThread")
+ self.syncthread.start()
+
+ self.parser_quit.set()
+
+ for process in self.processes:
+ process.join(0.5)
+
+ for process in self.processes:
+ if process.exitcode is None:
+ os.kill(process.pid, signal.SIGINT)
+
+ for process in self.processes:
+ process.join(0.5)
+
+ for process in self.processes:
+ if process.exitcode is None:
+ process.terminate()
+
+ for process in self.processes:
+ process.join()
+ # Added in 3.7, cleans up zombies
+ if hasattr(process, "close"):
+ process.close()
+
+ bb.codeparser.parser_cache_save()
bb.codeparser.parser_cache_savemerge()
+ bb.cache.SiggenRecipeInfo.reset()
bb.fetch.fetcher_parse_done()
if self.cooker.configuration.profile:
profiles = []
@@ -2203,49 +2249,64 @@ class CookerParser(object):
self.syncthread.join()
def load_cached(self):
- for mc, cache, filename, appends in self.fromcache:
- cached, infos = cache.load(filename, appends)
- yield not cached, mc, infos
+ for mc, cache, filename, appends, layername in self.fromcache:
+ infos = cache.loadCached(filename, appends)
+ yield False, mc, infos
def parse_generator(self):
- while True:
+ empty = False
+ while self.processes or not empty:
+ for process in self.processes.copy():
+ if not process.is_alive():
+ process.join()
+ self.processes.remove(process)
+
if self.parsed >= self.toparse:
break
try:
result = self.result_queue.get(timeout=0.25)
except queue.Empty:
- pass
+ empty = True
+ yield None, None, None
else:
- value = result[1]
- if isinstance(value, BaseException):
- raise value
- else:
- yield result
+ empty = False
+ yield result
+
+ if not (self.parsed >= self.toparse):
+ raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None)
+
def parse_next(self):
result = []
parsed = None
try:
parsed, mc, result = next(self.results)
+ if isinstance(result, BaseException):
+ # Turn exceptions back into exceptions
+ raise result
+ if parsed is None:
+ # Timeout, loop back through the main loop
+ return True
+
except StopIteration:
self.shutdown()
return False
except bb.BBHandledException as exc:
self.error += 1
- logger.error('Failed to parse recipe: %s' % exc.recipe)
- self.shutdown(clean=False, force=True)
+ logger.debug('Failed to parse recipe: %s' % exc.recipe)
+ self.shutdown(clean=False)
return False
except ParsingFailure as exc:
self.error += 1
logger.error('Unable to parse %s: %s' %
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
- self.shutdown(clean=False, force=True)
+ self.shutdown(clean=False)
return False
except bb.parse.ParseError as exc:
self.error += 1
logger.error(str(exc))
- self.shutdown(clean=False, force=True)
+ self.shutdown(clean=False, eventmsg=str(exc))
return False
except bb.data_smart.ExpansionError as exc:
self.error += 1
@@ -2254,7 +2315,7 @@ class CookerParser(object):
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
logger.error('ExpansionError during parsing %s', value.recipe,
exc_info=(etype, value, tb))
- self.shutdown(clean=False, force=True)
+ self.shutdown(clean=False)
return False
except Exception as exc:
self.error += 1
@@ -2266,7 +2327,7 @@ class CookerParser(object):
# Most likely, an exception occurred during raising an exception
import traceback
logger.error('Exception during parse: %s' % traceback.format_exc())
- self.shutdown(clean=False, force=True)
+ self.shutdown(clean=False)
return False
self.current += 1
@@ -2288,11 +2349,13 @@ class CookerParser(object):
return True
def reparse(self, filename):
+ bb.cache.SiggenRecipeInfo.reset()
to_reparse = set()
for mc in self.cooker.multiconfigs:
- to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename)))
+ layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2]
+ to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename), layername))
- for mc, filename, appends in to_reparse:
- infos = self.bb_caches[mc].parse(filename, appends)
+ for mc, filename, appends, layername in to_reparse:
+ infos = self.bb_caches[mc].parse(filename, appends, layername)
for vfn, info_array in infos:
self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array)