summaryrefslogtreecommitdiffstats
path: root/bitbake/lib/bb/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'bitbake/lib/bb/utils.py')
-rw-r--r--bitbake/lib/bb/utils.py281
1 files changed, 225 insertions, 56 deletions
diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py
index 70634910f7..ebee65d3dd 100644
--- a/bitbake/lib/bb/utils.py
+++ b/bitbake/lib/bb/utils.py
@@ -13,10 +13,12 @@ import errno
import logging
import bb
import bb.msg
+import locale
import multiprocessing
import fcntl
import importlib
-from importlib import machinery
+import importlib.machinery
+import importlib.util
import itertools
import subprocess
import glob
@@ -26,6 +28,11 @@ import errno
import signal
import collections
import copy
+import ctypes
+import random
+import socket
+import struct
+import tempfile
from subprocess import getstatusoutput
from contextlib import contextmanager
from ctypes import cdll
@@ -43,7 +50,7 @@ def clean_context():
def get_context():
return _context
-
+
def set_context(ctx):
_context = ctx
@@ -205,8 +212,8 @@ def explode_dep_versions2(s, *, sort=True):
inversion = True
# This list is based on behavior and supported comparisons from deb, opkg and rpm.
#
- # Even though =<, <<, ==, !=, =>, and >> may not be supported,
- # we list each possibly valid item.
+ # Even though =<, <<, ==, !=, =>, and >> may not be supported,
+ # we list each possibly valid item.
# The build system is responsible for validation of what it supports.
if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
lastcmp = i[0:2]
@@ -251,7 +258,7 @@ def explode_dep_versions(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
- skip null value and items appeared in dependancy string multiple times
+ skip null value and items appeared in dependency string multiple times
and return a dictionary of dependencies and versions.
"""
r = explode_dep_versions2(s)
@@ -340,7 +347,7 @@ def _print_exception(t, value, tb, realfile, text, context):
exception = traceback.format_exception_only(t, value)
error.append('Error executing a python function in %s:\n' % realfile)
- # Strip 'us' from the stack (better_exec call) unless that was where the
+ # Strip 'us' from the stack (better_exec call) unless that was where the
# error came from
if tb.tb_next is not None:
tb = tb.tb_next
@@ -379,7 +386,7 @@ def _print_exception(t, value, tb, realfile, text, context):
error.append("Exception: %s" % ''.join(exception))
- # If the exception is from spwaning a task, let's be helpful and display
+ # If the exception is from spawning a task, let's be helpful and display
# the output (which hopefully includes stderr).
if isinstance(value, subprocess.CalledProcessError) and value.output:
error.append("Subprocess output:")
@@ -400,7 +407,7 @@ def better_exec(code, context, text = None, realfile = "<code>", pythonexception
code = better_compile(code, realfile, realfile)
try:
exec(code, get_context(), context)
- except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError):
+ except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError, bb.process.ExecutionError):
# Error already shown so passthrough, no need for traceback
raise
except Exception as e:
@@ -427,12 +434,14 @@ def better_eval(source, locals, extraglobals = None):
return eval(source, ctx, locals)
@contextmanager
-def fileslocked(files):
+def fileslocked(files, *args, **kwargs):
"""Context manager for locking and unlocking file locks."""
locks = []
if files:
for lockfile in files:
- locks.append(bb.utils.lockfile(lockfile))
+ l = bb.utils.lockfile(lockfile, *args, **kwargs)
+ if l is not None:
+ locks.append(l)
try:
yield
@@ -451,9 +460,16 @@ def lockfile(name, shared=False, retry=True, block=False):
consider the possibility of sending a signal to the process to break
out - at which point you want block=True rather than retry=True.
"""
+ basename = os.path.basename(name)
+ if len(basename) > 255:
+ root, ext = os.path.splitext(basename)
+ basename = root[:255 - len(ext)] + ext
+
dirname = os.path.dirname(name)
mkdirhier(dirname)
+ name = os.path.join(dirname, basename)
+
if not os.access(dirname, os.W_OK):
logger.error("Unable to acquire lock '%s', directory is not writable",
name)
@@ -487,7 +503,7 @@ def lockfile(name, shared=False, retry=True, block=False):
return lf
lf.close()
except OSError as e:
- if e.errno == errno.EACCES:
+ if e.errno == errno.EACCES or e.errno == errno.ENAMETOOLONG:
logger.error("Unable to acquire lock '%s', %s",
e.strerror, name)
sys.exit(1)
@@ -532,7 +548,12 @@ def md5_file(filename):
Return the hex string representation of the MD5 checksum of filename.
"""
import hashlib
- return _hasher(hashlib.md5(), filename)
+ try:
+ sig = hashlib.new('MD5', usedforsecurity=False)
+ except TypeError:
+ # Some configurations don't appear to support two arguments
+ sig = hashlib.new('MD5')
+ return _hasher(sig, filename)
def sha256_file(filename):
"""
@@ -583,11 +604,25 @@ def preserved_envvars():
v = [
'BBPATH',
'BB_PRESERVE_ENV',
- 'BB_ENV_WHITELIST',
- 'BB_ENV_EXTRAWHITE',
+ 'BB_ENV_PASSTHROUGH_ADDITIONS',
]
return v + preserved_envvars_exported()
+def check_system_locale():
+ """Make sure the required system locale are available and configured"""
+ default_locale = locale.getlocale(locale.LC_CTYPE)
+
+ try:
+ locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8"))
+ except:
+ sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system")
+ else:
+ locale.setlocale(locale.LC_CTYPE, default_locale)
+
+ if sys.getfilesystemencoding() != "utf-8":
+ sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n"
+ "Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
+
def filter_environment(good_vars):
"""
Create a pristine environment for bitbake. This will remove variables that
@@ -615,21 +650,21 @@ def filter_environment(good_vars):
def approved_variables():
"""
- Determine and return the list of whitelisted variables which are approved
+ Determine and return the list of variables which are approved
to remain in the environment.
"""
if 'BB_PRESERVE_ENV' in os.environ:
return os.environ.keys()
approved = []
- if 'BB_ENV_WHITELIST' in os.environ:
- approved = os.environ['BB_ENV_WHITELIST'].split()
- approved.extend(['BB_ENV_WHITELIST'])
+ if 'BB_ENV_PASSTHROUGH' in os.environ:
+ approved = os.environ['BB_ENV_PASSTHROUGH'].split()
+ approved.extend(['BB_ENV_PASSTHROUGH'])
else:
approved = preserved_envvars()
- if 'BB_ENV_EXTRAWHITE' in os.environ:
- approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
- if 'BB_ENV_EXTRAWHITE' not in approved:
- approved.extend(['BB_ENV_EXTRAWHITE'])
+ if 'BB_ENV_PASSTHROUGH_ADDITIONS' in os.environ:
+ approved.extend(os.environ['BB_ENV_PASSTHROUGH_ADDITIONS'].split())
+ if 'BB_ENV_PASSTHROUGH_ADDITIONS' not in approved:
+ approved.extend(['BB_ENV_PASSTHROUGH_ADDITIONS'])
return approved
def clean_environment():
@@ -683,8 +718,8 @@ def remove(path, recurse=False, ionice=False):
return
if recurse:
for name in glob.glob(path):
- if _check_unsafe_delete_path(path):
- raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
+ if _check_unsafe_delete_path(name):
+ raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name)
# shutil.rmtree(name) would be ideal but its too slow
cmd = []
if ionice:
@@ -710,9 +745,9 @@ def prunedir(topdir, ionice=False):
# but thats possibly insane and suffixes is probably going to be small
#
def prune_suffix(var, suffixes, d):
- """
+ """
See if var ends with any of the suffixes listed and
- remove it if found
+ remove it if found
"""
for suffix in suffixes:
if suffix and var.endswith(suffix):
@@ -723,7 +758,8 @@ def mkdirhier(directory):
"""Create a directory like 'mkdir -p', but does not complain if
directory already exists like os.makedirs
"""
-
+ if '${' in str(directory):
+ bb.fatal("Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR polution.".format(directory))
try:
os.makedirs(directory)
except OSError as e:
@@ -742,7 +778,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
if not sstat:
sstat = os.lstat(src)
except Exception as e:
- print("movefile: Stating source file failed...", e)
+ logger.warning("movefile: Stating source file failed...", e)
return None
destexists = 1
@@ -770,7 +806,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
os.unlink(src)
return os.lstat(dest)
except Exception as e:
- print("movefile: failed to properly create symlink:", dest, "->", target, e)
+ logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e)
return None
renamefailed = 1
@@ -787,7 +823,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
except Exception as e:
if e.errno != errno.EXDEV:
# Some random error.
- print("movefile: Failed to move", src, "to", dest, e)
+ logger.warning("movefile: Failed to move", src, "to", dest, e)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
@@ -799,13 +835,13 @@ def movefile(src, dest, newmtime = None, sstat = None):
bb.utils.rename(destpath + "#new", destpath)
didcopy = 1
except Exception as e:
- print('movefile: copy', src, '->', dest, 'failed.', e)
+ logger.warning('movefile: copy', src, '->', dest, 'failed.', e)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
- print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
+ logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
return None # failure
try:
if didcopy:
@@ -813,7 +849,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src)
except Exception as e:
- print("movefile: Failed to chown/chmod/unlink", dest, e)
+ logger.warning("movefile: Failed to chown/chmod/unlink", dest, e)
return None
if newmtime:
@@ -965,13 +1001,16 @@ def umask(new_mask):
os.umask(current_mask)
def to_boolean(string, default=None):
- """
+ """
Check input string and return boolean value True/False/None
- depending upon the checks
+ depending upon the checks
"""
if not string:
return default
+ if isinstance(string, int):
+ return string != 0
+
normalized = string.lower()
if normalized in ("y", "yes", "1", "true"):
return True
@@ -1103,7 +1142,10 @@ def get_referenced_vars(start_expr, d):
def cpu_count():
- return multiprocessing.cpu_count()
+ try:
+ return len(os.sched_getaffinity(0))
+ except OSError:
+ return multiprocessing.cpu_count()
def nonblockingfd(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
@@ -1590,33 +1632,89 @@ def set_process_name(name):
except:
pass
+def enable_loopback_networking():
+ # From bits/ioctls.h
+ SIOCGIFFLAGS = 0x8913
+ SIOCSIFFLAGS = 0x8914
+ SIOCSIFADDR = 0x8916
+ SIOCSIFNETMASK = 0x891C
+
+ # if.h
+ IFF_UP = 0x1
+ IFF_RUNNING = 0x40
+
+ # bits/socket.h
+ AF_INET = 2
+
+ # char ifr_name[IFNAMSIZ=16]
+ ifr_name = struct.pack("@16s", b"lo")
+ def netdev_req(fd, req, data = b""):
+ # Pad and add interface name
+ data = ifr_name + data + (b'\x00' * (16 - len(data)))
+ # Return all data after interface name
+ return fcntl.ioctl(fd, req, data)[16:]
+
+ with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) as sock:
+ fd = sock.fileno()
+
+ # struct sockaddr_in ifr_addr { unsigned short family; uint16_t sin_port ; uint32_t in_addr; }
+ req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 127, 0, 0, 1)
+ netdev_req(fd, SIOCSIFADDR, req)
+
+ # short ifr_flags
+ flags = struct.unpack_from('@h', netdev_req(fd, SIOCGIFFLAGS))[0]
+ flags |= IFF_UP | IFF_RUNNING
+ netdev_req(fd, SIOCSIFFLAGS, struct.pack('@h', flags))
+
+ # struct sockaddr_in ifr_netmask
+ req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 255, 0, 0, 0)
+ netdev_req(fd, SIOCSIFNETMASK, req)
+
+def disable_network(uid=None, gid=None):
+ """
+ Disable networking in the current process if the kernel supports it, else
+ just return after logging to debug. To do this we need to create a new user
+ namespace, then map back to the original uid/gid.
+ """
+ libc = ctypes.CDLL('libc.so.6')
+
+ # From sched.h
+ # New user namespace
+ CLONE_NEWUSER = 0x10000000
+ # New network namespace
+ CLONE_NEWNET = 0x40000000
+
+ if uid is None:
+ uid = os.getuid()
+ if gid is None:
+ gid = os.getgid()
+
+ ret = libc.unshare(CLONE_NEWNET | CLONE_NEWUSER)
+ if ret != 0:
+ logger.debug("System doesn't support disabling network without admin privs")
+ return
+ with open("/proc/self/uid_map", "w") as f:
+ f.write("%s %s 1" % (uid, uid))
+ with open("/proc/self/setgroups", "w") as f:
+ f.write("deny")
+ with open("/proc/self/gid_map", "w") as f:
+ f.write("%s %s 1" % (gid, gid))
+
def export_proxies(d):
+ from bb.fetch2 import get_fetcher_environment
""" export common proxies variables from datastore to environment """
- import os
-
- variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
- 'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY',
- 'GIT_PROXY_COMMAND']
- exported = False
-
- for v in variables:
- if v in os.environ.keys():
- exported = True
- else:
- v_proxy = d.getVar(v)
- if v_proxy is not None:
- os.environ[v] = v_proxy
- exported = True
-
- return exported
-
+ newenv = get_fetcher_environment(d)
+ for v in newenv:
+ os.environ[v] = newenv[v]
def load_plugins(logger, plugins, pluginpath):
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
if spec:
- return spec.loader.load_module()
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
logger.debug('Loading plugins from %s...' % pluginpath)
@@ -1695,5 +1793,76 @@ def environment(**envvars):
for var in envvars:
if var in backup:
os.environ[var] = backup[var]
- else:
+ elif var in os.environ:
del os.environ[var]
+
+def is_local_uid(uid=''):
+ """
+ Check whether uid is a local one or not.
+ Can't use pwd module since it gets all UIDs, not local ones only.
+ """
+ if not uid:
+ uid = os.getuid()
+ with open('/etc/passwd', 'r') as f:
+ for line in f:
+ line_split = line.split(':')
+ if len(line_split) < 3:
+ continue
+ if str(uid) == line_split[2]:
+ return True
+ return False
+
+def mkstemp(suffix=None, prefix=None, dir=None, text=False):
+ """
+ Generates a unique filename, independent of time.
+
+ mkstemp() in glibc (at least) generates unique file names based on the
+ current system time. When combined with highly parallel builds, and
+ operating over NFS (e.g. shared sstate/downloads) this can result in
+ conflicts and race conditions.
+
+ This function adds additional entropy to the file name so that a collision
+ is independent of time and thus extremely unlikely.
+ """
+ entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20))
+ if prefix:
+ prefix = prefix + entropy
+ else:
+ prefix = tempfile.gettempprefix() + entropy
+ return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
+
+def path_is_descendant(descendant, ancestor):
+ """
+ Returns True if the path `descendant` is a descendant of `ancestor`
+ (including being equivalent to `ancestor` itself). Otherwise returns False.
+ Correctly accounts for symlinks, bind mounts, etc. by using
+ os.path.samestat() to compare paths
+
+ May raise any exception that os.stat() raises
+ """
+
+ ancestor_stat = os.stat(ancestor)
+
+ # Recurse up each directory component of the descendant to see if it is
+ # equivalent to the ancestor
+ check_dir = os.path.abspath(descendant).rstrip("/")
+ while check_dir:
+ check_stat = os.stat(check_dir)
+ if os.path.samestat(check_stat, ancestor_stat):
+ return True
+ check_dir = os.path.dirname(check_dir).rstrip("/")
+
+ return False
+
+# If we don't have a timeout of some kind and a process/thread exits badly (for example
+# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
+# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.
+@contextmanager
+def lock_timeout(lock):
+ held = lock.acquire(timeout=5*60)
+ try:
+ if not held:
+ os._exit(1)
+ yield held
+ finally:
+ lock.release()