diff options
Diffstat (limited to 'bitbake/lib/bb/utils.py')
-rw-r--r-- | bitbake/lib/bb/utils.py | 506 |
1 files changed, 406 insertions, 100 deletions
diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py index 8d40bcdf83..ebee65d3dd 100644 --- a/bitbake/lib/bb/utils.py +++ b/bitbake/lib/bb/utils.py @@ -13,10 +13,12 @@ import errno import logging import bb import bb.msg +import locale import multiprocessing import fcntl import importlib -from importlib import machinery +import importlib.machinery +import importlib.util import itertools import subprocess import glob @@ -24,9 +26,13 @@ import fnmatch import traceback import errno import signal -import ast import collections import copy +import ctypes +import random +import socket +import struct +import tempfile from subprocess import getstatusoutput from contextlib import contextmanager from ctypes import cdll @@ -44,7 +50,7 @@ def clean_context(): def get_context(): return _context - + def set_context(ctx): _context = ctx @@ -130,6 +136,7 @@ def vercmp(ta, tb): return r def vercmp_string(a, b): + """ Split version strings and compare them """ ta = split_version(a) tb = split_version(b) return vercmp(ta, tb) @@ -205,8 +212,8 @@ def explode_dep_versions2(s, *, sort=True): inversion = True # This list is based on behavior and supported comparisons from deb, opkg and rpm. # - # Even though =<, <<, ==, !=, =>, and >> may not be supported, - # we list each possibly valid item. + # Even though =<, <<, ==, !=, =>, and >> may not be supported, + # we list each possibly valid item. # The build system is responsible for validation of what it supports. if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')): lastcmp = i[0:2] @@ -248,6 +255,12 @@ def explode_dep_versions2(s, *, sort=True): return r def explode_dep_versions(s): + """ + Take an RDEPENDS style string of format: + "DEPEND1 (optional version) DEPEND2 (optional version) ..." + skip null value and items appeared in dependency string multiple times + and return a dictionary of dependencies and versions. + """ r = explode_dep_versions2(s) for d in r: if not r[d]: @@ -334,7 +347,7 @@ def _print_exception(t, value, tb, realfile, text, context): exception = traceback.format_exception_only(t, value) error.append('Error executing a python function in %s:\n' % realfile) - # Strip 'us' from the stack (better_exec call) unless that was where the + # Strip 'us' from the stack (better_exec call) unless that was where the # error came from if tb.tb_next is not None: tb = tb.tb_next @@ -373,7 +386,7 @@ def _print_exception(t, value, tb, realfile, text, context): error.append("Exception: %s" % ''.join(exception)) - # If the exception is from spwaning a task, let's be helpful and display + # If the exception is from spawning a task, let's be helpful and display # the output (which hopefully includes stderr). if isinstance(value, subprocess.CalledProcessError) and value.output: error.append("Subprocess output:") @@ -394,7 +407,7 @@ def better_exec(code, context, text = None, realfile = "<code>", pythonexception code = better_compile(code, realfile, realfile) try: exec(code, get_context(), context) - except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError): + except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError, bb.process.ExecutionError): # Error already shown so passthrough, no need for traceback raise except Exception as e: @@ -403,8 +416,8 @@ def better_exec(code, context, text = None, realfile = "<code>", pythonexception (t, value, tb) = sys.exc_info() try: _print_exception(t, value, tb, realfile, text, context) - except Exception as e: - logger.error("Exception handler error: %s" % str(e)) + except Exception as e2: + logger.error("Exception handler error: %s" % str(e2)) e = bb.BBHandledException(e) raise e @@ -421,31 +434,20 @@ def better_eval(source, locals, extraglobals = None): return eval(source, ctx, locals) @contextmanager -def fileslocked(files): +def fileslocked(files, *args, **kwargs): """Context manager for locking and unlocking file locks.""" locks = [] if files: for lockfile in files: - locks.append(bb.utils.lockfile(lockfile)) - - yield - - for lock in locks: - bb.utils.unlockfile(lock) - -@contextmanager -def timeout(seconds): - def timeout_handler(signum, frame): - pass - - original_handler = signal.signal(signal.SIGALRM, timeout_handler) + l = bb.utils.lockfile(lockfile, *args, **kwargs) + if l is not None: + locks.append(l) try: - signal.alarm(seconds) yield finally: - signal.alarm(0) - signal.signal(signal.SIGALRM, original_handler) + for lock in locks: + bb.utils.unlockfile(lock) def lockfile(name, shared=False, retry=True, block=False): """ @@ -458,9 +460,16 @@ def lockfile(name, shared=False, retry=True, block=False): consider the possibility of sending a signal to the process to break out - at which point you want block=True rather than retry=True. """ + basename = os.path.basename(name) + if len(basename) > 255: + root, ext = os.path.splitext(basename) + basename = root[:255 - len(ext)] + ext + dirname = os.path.dirname(name) mkdirhier(dirname) + name = os.path.join(dirname, basename) + if not os.access(dirname, os.W_OK): logger.error("Unable to acquire lock '%s', directory is not writable", name) @@ -494,7 +503,7 @@ def lockfile(name, shared=False, retry=True, block=False): return lf lf.close() except OSError as e: - if e.errno == errno.EACCES: + if e.errno == errno.EACCES or e.errno == errno.ENAMETOOLONG: logger.error("Unable to acquire lock '%s', %s", e.strerror, name) sys.exit(1) @@ -539,7 +548,12 @@ def md5_file(filename): Return the hex string representation of the MD5 checksum of filename. """ import hashlib - return _hasher(hashlib.md5(), filename) + try: + sig = hashlib.new('MD5', usedforsecurity=False) + except TypeError: + # Some configurations don't appear to support two arguments + sig = hashlib.new('MD5') + return _hasher(sig, filename) def sha256_file(filename): """ @@ -556,6 +570,20 @@ def sha1_file(filename): import hashlib return _hasher(hashlib.sha1(), filename) +def sha384_file(filename): + """ + Return the hex string representation of the SHA384 checksum of the filename + """ + import hashlib + return _hasher(hashlib.sha384(), filename) + +def sha512_file(filename): + """ + Return the hex string representation of the SHA512 checksum of the filename + """ + import hashlib + return _hasher(hashlib.sha512(), filename) + def preserved_envvars_exported(): """Variables which are taken from the environment and placed in and exported from the metadata""" @@ -566,7 +594,6 @@ def preserved_envvars_exported(): 'PATH', 'PWD', 'SHELL', - 'TERM', 'USER', 'LC_ALL', 'BBSERVER', @@ -577,11 +604,25 @@ def preserved_envvars(): v = [ 'BBPATH', 'BB_PRESERVE_ENV', - 'BB_ENV_WHITELIST', - 'BB_ENV_EXTRAWHITE', + 'BB_ENV_PASSTHROUGH_ADDITIONS', ] return v + preserved_envvars_exported() +def check_system_locale(): + """Make sure the required system locale are available and configured""" + default_locale = locale.getlocale(locale.LC_CTYPE) + + try: + locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8")) + except: + sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system") + else: + locale.setlocale(locale.LC_CTYPE, default_locale) + + if sys.getfilesystemencoding() != "utf-8": + sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n" + "Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.") + def filter_environment(good_vars): """ Create a pristine environment for bitbake. This will remove variables that @@ -603,27 +644,27 @@ def filter_environment(good_vars): os.environ["LC_ALL"] = "en_US.UTF-8" if removed_vars: - logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars.keys())) + logger.debug("Removed the following variables from the environment: %s", ", ".join(removed_vars.keys())) return removed_vars def approved_variables(): """ - Determine and return the list of whitelisted variables which are approved + Determine and return the list of variables which are approved to remain in the environment. """ if 'BB_PRESERVE_ENV' in os.environ: return os.environ.keys() approved = [] - if 'BB_ENV_WHITELIST' in os.environ: - approved = os.environ['BB_ENV_WHITELIST'].split() - approved.extend(['BB_ENV_WHITELIST']) + if 'BB_ENV_PASSTHROUGH' in os.environ: + approved = os.environ['BB_ENV_PASSTHROUGH'].split() + approved.extend(['BB_ENV_PASSTHROUGH']) else: approved = preserved_envvars() - if 'BB_ENV_EXTRAWHITE' in os.environ: - approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split()) - if 'BB_ENV_EXTRAWHITE' not in approved: - approved.extend(['BB_ENV_EXTRAWHITE']) + if 'BB_ENV_PASSTHROUGH_ADDITIONS' in os.environ: + approved.extend(os.environ['BB_ENV_PASSTHROUGH_ADDITIONS'].split()) + if 'BB_ENV_PASSTHROUGH_ADDITIONS' not in approved: + approved.extend(['BB_ENV_PASSTHROUGH_ADDITIONS']) return approved def clean_environment(): @@ -677,8 +718,8 @@ def remove(path, recurse=False, ionice=False): return if recurse: for name in glob.glob(path): - if _check_unsafe_delete_path(path): - raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path) + if _check_unsafe_delete_path(name): + raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name) # shutil.rmtree(name) would be ideal but its too slow cmd = [] if ionice: @@ -693,7 +734,7 @@ def remove(path, recurse=False, ionice=False): raise def prunedir(topdir, ionice=False): - # Delete everything reachable from the directory named in 'topdir'. + """ Delete everything reachable from the directory named in 'topdir'. """ # CAUTION: This is dangerous! if _check_unsafe_delete_path(topdir): raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir) @@ -704,8 +745,10 @@ def prunedir(topdir, ionice=False): # but thats possibly insane and suffixes is probably going to be small # def prune_suffix(var, suffixes, d): - # See if var ends with any of the suffixes listed and - # remove it if found + """ + See if var ends with any of the suffixes listed and + remove it if found + """ for suffix in suffixes: if suffix and var.endswith(suffix): return var[:-len(suffix)] @@ -715,7 +758,8 @@ def mkdirhier(directory): """Create a directory like 'mkdir -p', but does not complain if directory already exists like os.makedirs """ - + if '${' in str(directory): + bb.fatal("Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR polution.".format(directory)) try: os.makedirs(directory) except OSError as e: @@ -734,7 +778,7 @@ def movefile(src, dest, newmtime = None, sstat = None): if not sstat: sstat = os.lstat(src) except Exception as e: - print("movefile: Stating source file failed...", e) + logger.warning("movefile: Stating source file failed...", e) return None destexists = 1 @@ -762,7 +806,7 @@ def movefile(src, dest, newmtime = None, sstat = None): os.unlink(src) return os.lstat(dest) except Exception as e: - print("movefile: failed to properly create symlink:", dest, "->", target, e) + logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e) return None renamefailed = 1 @@ -774,12 +818,12 @@ def movefile(src, dest, newmtime = None, sstat = None): if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]: try: - os.rename(src, destpath) + bb.utils.rename(src, destpath) renamefailed = 0 except Exception as e: if e.errno != errno.EXDEV: # Some random error. - print("movefile: Failed to move", src, "to", dest, e) + logger.warning("movefile: Failed to move", src, "to", dest, e) return None # Invalid cross-device-link 'bind' mounted or actually Cross-Device @@ -788,16 +832,16 @@ def movefile(src, dest, newmtime = None, sstat = None): if stat.S_ISREG(sstat[stat.ST_MODE]): try: # For safety copy then move it over. shutil.copyfile(src, destpath + "#new") - os.rename(destpath + "#new", destpath) + bb.utils.rename(destpath + "#new", destpath) didcopy = 1 except Exception as e: - print('movefile: copy', src, '->', dest, 'failed.', e) + logger.warning('movefile: copy', src, '->', dest, 'failed.', e) return None else: #we don't yet handle special, so we need to fall back to /bin/mv a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'") if a[0] != 0: - print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a) + logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a) return None # failure try: if didcopy: @@ -805,7 +849,7 @@ def movefile(src, dest, newmtime = None, sstat = None): os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown os.unlink(src) except Exception as e: - print("movefile: Failed to chown/chmod/unlink", dest, e) + logger.warning("movefile: Failed to chown/chmod/unlink", dest, e) return None if newmtime: @@ -850,7 +894,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): os.unlink(dest) os.symlink(target, dest) - #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) + os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) return os.lstat(dest) except Exception as e: logger.warning("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e)) @@ -866,7 +910,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): # For safety copy then move it over. shutil.copyfile(src, dest + "#new") - os.rename(dest + "#new", dest) + bb.utils.rename(dest + "#new", dest) except Exception as e: logger.warning("copyfile: copy %s to %s failed (%s)" % (src, dest, e)) return False @@ -945,10 +989,28 @@ def which(path, item, direction = 0, history = False, executable=False): return "", hist return "" +@contextmanager +def umask(new_mask): + """ + Context manager to set the umask to a specific mask, and restore it afterwards. + """ + current_mask = os.umask(new_mask) + try: + yield + finally: + os.umask(current_mask) + def to_boolean(string, default=None): + """ + Check input string and return boolean value True/False/None + depending upon the checks + """ if not string: return default + if isinstance(string, int): + return string != 0 + normalized = string.lower() if normalized in ("y", "yes", "1", "true"): return True @@ -989,6 +1051,23 @@ def contains(variable, checkvalues, truevalue, falsevalue, d): return falsevalue def contains_any(variable, checkvalues, truevalue, falsevalue, d): + """Check if a variable contains any values specified. + + Arguments: + + variable -- the variable name. This will be fetched and expanded (using + d.getVar(variable)) and then split into a set(). + + checkvalues -- if this is a string it is split on whitespace into a set(), + otherwise coerced directly into a set(). + + truevalue -- the value to return if checkvalues is a subset of variable. + + falsevalue -- the value to return if variable is empty or if checkvalues is + not a subset of variable. + + d -- the data store. + """ val = d.getVar(variable) if not val: return falsevalue @@ -1025,8 +1104,48 @@ def filter(variable, checkvalues, d): checkvalues = set(checkvalues) return ' '.join(sorted(checkvalues & val)) + +def get_referenced_vars(start_expr, d): + """ + :return: names of vars referenced in start_expr (recursively), in quasi-BFS order (variables within the same level + are ordered arbitrarily) + """ + + seen = set() + ret = [] + + # The first entry in the queue is the unexpanded start expression + queue = collections.deque([start_expr]) + # Subsequent entries will be variable names, so we need to track whether or not entry requires getVar + is_first = True + + empty_data = bb.data.init() + while queue: + entry = queue.popleft() + if is_first: + # Entry is the start expression - no expansion needed + is_first = False + expression = entry + else: + # This is a variable name - need to get the value + expression = d.getVar(entry, False) + ret.append(entry) + + # expandWithRefs is how we actually get the referenced variables in the expression. We call it using an empty + # data store because we only want the variables directly used in the expression. It returns a set, which is what + # dooms us to only ever be "quasi-BFS" rather than full BFS. + new_vars = empty_data.expandWithRefs(expression, None).references - set(seen) + + queue.extend(new_vars) + seen.update(new_vars) + return ret + + def cpu_count(): - return multiprocessing.cpu_count() + try: + return len(os.sched_getaffinity(0)) + except OSError: + return multiprocessing.cpu_count() def nonblockingfd(fd): fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) @@ -1035,21 +1154,20 @@ def process_profilelog(fn, pout = None): # Either call with a list of filenames and set pout or a filename and optionally pout. if not pout: pout = fn + '.processed' - pout = open(pout, 'w') - - import pstats - if isinstance(fn, list): - p = pstats.Stats(*fn, stream=pout) - else: - p = pstats.Stats(fn, stream=pout) - p.sort_stats('time') - p.print_stats() - p.print_callers() - p.sort_stats('cumulative') - p.print_stats() - pout.flush() - pout.close() + with open(pout, 'w') as pout: + import pstats + if isinstance(fn, list): + p = pstats.Stats(*fn, stream=pout) + else: + p = pstats.Stats(fn, stream=pout) + p.sort_stats('time') + p.print_stats() + p.print_callers() + p.sort_stats('cumulative') + p.print_stats() + + pout.flush() # # Was present to work around multiprocessing pool bugs in python < 2.7.3 @@ -1102,7 +1220,7 @@ def edit_metadata(meta_lines, variables, varfunc, match_overrides=False): variables: a list of variable names to look for. Functions may also be specified, but must be specified with '()' at the end of the name. Note that the function doesn't have - any intrinsic understanding of _append, _prepend, _remove, + any intrinsic understanding of :append, :prepend, :remove, or overrides, so these are considered as part of the name. These values go into a regular expression, so regular expression syntax is allowed. @@ -1422,14 +1540,20 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None): return (notadded, notremoved) - -def get_file_layer(filename, d): - """Determine the collection (as defined by a layer's layer.conf file) containing the specified file""" +def get_collection_res(d): collections = (d.getVar('BBFILE_COLLECTIONS') or '').split() collection_res = {} for collection in collections: collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or '' + return collection_res + + +def get_file_layer(filename, d, collection_res={}): + """Determine the collection (as defined by a layer's layer.conf file) containing the specified file""" + if not collection_res: + collection_res = get_collection_res(d) + def path_to_layer(path): # Use longest path so we handle nested layers matchlen = 0 @@ -1441,12 +1565,13 @@ def get_file_layer(filename, d): return match result = None - bbfiles = (d.getVar('BBFILES') or '').split() + bbfiles = (d.getVar('BBFILES_PRIORITIZED') or '').split() bbfilesmatch = False for bbfilesentry in bbfiles: - if fnmatch.fnmatch(filename, bbfilesentry): + if fnmatch.fnmatchcase(filename, bbfilesentry): bbfilesmatch = True result = path_to_layer(bbfilesentry) + break if not bbfilesmatch: # Probably a bbclass @@ -1507,35 +1632,91 @@ def set_process_name(name): except: pass -# export common proxies variables from datastore to environment -def export_proxies(d): - import os - - variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY', - 'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY', - 'GIT_PROXY_COMMAND'] - exported = False - - for v in variables: - if v in os.environ.keys(): - exported = True - else: - v_proxy = d.getVar(v) - if v_proxy is not None: - os.environ[v] = v_proxy - exported = True - - return exported +def enable_loopback_networking(): + # From bits/ioctls.h + SIOCGIFFLAGS = 0x8913 + SIOCSIFFLAGS = 0x8914 + SIOCSIFADDR = 0x8916 + SIOCSIFNETMASK = 0x891C + + # if.h + IFF_UP = 0x1 + IFF_RUNNING = 0x40 + + # bits/socket.h + AF_INET = 2 + + # char ifr_name[IFNAMSIZ=16] + ifr_name = struct.pack("@16s", b"lo") + def netdev_req(fd, req, data = b""): + # Pad and add interface name + data = ifr_name + data + (b'\x00' * (16 - len(data))) + # Return all data after interface name + return fcntl.ioctl(fd, req, data)[16:] + + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) as sock: + fd = sock.fileno() + + # struct sockaddr_in ifr_addr { unsigned short family; uint16_t sin_port ; uint32_t in_addr; } + req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 127, 0, 0, 1) + netdev_req(fd, SIOCSIFADDR, req) + + # short ifr_flags + flags = struct.unpack_from('@h', netdev_req(fd, SIOCGIFFLAGS))[0] + flags |= IFF_UP | IFF_RUNNING + netdev_req(fd, SIOCSIFFLAGS, struct.pack('@h', flags)) + + # struct sockaddr_in ifr_netmask + req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 255, 0, 0, 0) + netdev_req(fd, SIOCSIFNETMASK, req) + +def disable_network(uid=None, gid=None): + """ + Disable networking in the current process if the kernel supports it, else + just return after logging to debug. To do this we need to create a new user + namespace, then map back to the original uid/gid. + """ + libc = ctypes.CDLL('libc.so.6') + + # From sched.h + # New user namespace + CLONE_NEWUSER = 0x10000000 + # New network namespace + CLONE_NEWNET = 0x40000000 + + if uid is None: + uid = os.getuid() + if gid is None: + gid = os.getgid() + + ret = libc.unshare(CLONE_NEWNET | CLONE_NEWUSER) + if ret != 0: + logger.debug("System doesn't support disabling network without admin privs") + return + with open("/proc/self/uid_map", "w") as f: + f.write("%s %s 1" % (uid, uid)) + with open("/proc/self/setgroups", "w") as f: + f.write("deny") + with open("/proc/self/gid_map", "w") as f: + f.write("%s %s 1" % (gid, gid)) +def export_proxies(d): + from bb.fetch2 import get_fetcher_environment + """ export common proxies variables from datastore to environment """ + newenv = get_fetcher_environment(d) + for v in newenv: + os.environ[v] = newenv[v] def load_plugins(logger, plugins, pluginpath): def load_plugin(name): - logger.debug(1, 'Loading plugin %s' % name) + logger.debug('Loading plugin %s' % name) spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] ) if spec: - return spec.loader.load_module() + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod - logger.debug(1, 'Loading plugins from %s...' % pluginpath) + logger.debug('Loading plugins from %s...' % pluginpath) expanded = (glob.glob(os.path.join(pluginpath, '*' + ext)) for ext in python_extensions) @@ -1560,3 +1741,128 @@ class LogCatcher(logging.Handler): self.messages.append(bb.build.logformatter.format(record)) def contains(self, message): return (message in self.messages) + +def is_semver(version): + """ + Is the version string following the semver semantic? + + https://semver.org/spec/v2.0.0.html + """ + regex = re.compile( + r""" + ^ + (0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*) + (?:-( + (?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*) + (?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))* + ))? + (?:\+( + [0-9a-zA-Z-]+ + (?:\.[0-9a-zA-Z-]+)* + ))? + $ + """, re.VERBOSE) + + if regex.match(version) is None: + return False + + return True + +# Wrapper around os.rename which can handle cross device problems +# e.g. from container filesystems +def rename(src, dst): + try: + os.rename(src, dst) + except OSError as err: + if err.errno == 18: + # Invalid cross-device link error + shutil.move(src, dst) + else: + raise err + +@contextmanager +def environment(**envvars): + """ + Context manager to selectively update the environment with the specified mapping. + """ + backup = dict(os.environ) + try: + os.environ.update(envvars) + yield + finally: + for var in envvars: + if var in backup: + os.environ[var] = backup[var] + elif var in os.environ: + del os.environ[var] + +def is_local_uid(uid=''): + """ + Check whether uid is a local one or not. + Can't use pwd module since it gets all UIDs, not local ones only. + """ + if not uid: + uid = os.getuid() + with open('/etc/passwd', 'r') as f: + for line in f: + line_split = line.split(':') + if len(line_split) < 3: + continue + if str(uid) == line_split[2]: + return True + return False + +def mkstemp(suffix=None, prefix=None, dir=None, text=False): + """ + Generates a unique filename, independent of time. + + mkstemp() in glibc (at least) generates unique file names based on the + current system time. When combined with highly parallel builds, and + operating over NFS (e.g. shared sstate/downloads) this can result in + conflicts and race conditions. + + This function adds additional entropy to the file name so that a collision + is independent of time and thus extremely unlikely. + """ + entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20)) + if prefix: + prefix = prefix + entropy + else: + prefix = tempfile.gettempprefix() + entropy + return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text) + +def path_is_descendant(descendant, ancestor): + """ + Returns True if the path `descendant` is a descendant of `ancestor` + (including being equivalent to `ancestor` itself). Otherwise returns False. + Correctly accounts for symlinks, bind mounts, etc. by using + os.path.samestat() to compare paths + + May raise any exception that os.stat() raises + """ + + ancestor_stat = os.stat(ancestor) + + # Recurse up each directory component of the descendant to see if it is + # equivalent to the ancestor + check_dir = os.path.abspath(descendant).rstrip("/") + while check_dir: + check_stat = os.stat(check_dir) + if os.path.samestat(check_stat, ancestor_stat): + return True + check_dir = os.path.dirname(check_dir).rstrip("/") + + return False + +# If we don't have a timeout of some kind and a process/thread exits badly (for example +# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better +# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked. +@contextmanager +def lock_timeout(lock): + held = lock.acquire(timeout=5*60) + try: + if not held: + os._exit(1) + yield held + finally: + lock.release() |