summaryrefslogtreecommitdiffstats
path: root/meta/classes-recipe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes-recipe')
-rw-r--r--meta/classes-recipe/allarch.bbclass4
-rw-r--r--meta/classes-recipe/autotools.bbclass11
-rw-r--r--meta/classes-recipe/baremetal-image.bbclass18
-rw-r--r--meta/classes-recipe/cargo-update-recipe-crates.bbclass79
-rw-r--r--meta/classes-recipe/cargo.bbclass16
-rw-r--r--meta/classes-recipe/cargo_c.bbclass41
-rw-r--r--meta/classes-recipe/cargo_common.bbclass113
-rw-r--r--meta/classes-recipe/cmake-qemu.bbclass32
-rw-r--r--meta/classes-recipe/cmake.bbclass78
-rw-r--r--meta/classes-recipe/cml1.bbclass30
-rw-r--r--meta/classes-recipe/core-image.bbclass4
-rw-r--r--meta/classes-recipe/devicetree.bbclass11
-rw-r--r--meta/classes-recipe/devupstream.bbclass2
-rw-r--r--meta/classes-recipe/distrooverrides.bbclass38
-rw-r--r--meta/classes-recipe/externalsrc.bbclass269
-rw-r--r--meta/classes-recipe/fontcache.bbclass1
-rw-r--r--meta/classes-recipe/fs-uuid.bbclass2
-rw-r--r--meta/classes-recipe/gi-docgen.bbclass8
-rw-r--r--meta/classes-recipe/github-releases.bbclass3
-rw-r--r--meta/classes-recipe/glide.bbclass15
-rw-r--r--meta/classes-recipe/gnomebase.bbclass7
-rw-r--r--meta/classes-recipe/go-mod.bbclass6
-rw-r--r--meta/classes-recipe/go.bbclass23
-rw-r--r--meta/classes-recipe/goarch.bbclass28
-rw-r--r--meta/classes-recipe/gobject-introspection-data.bbclass5
-rw-r--r--meta/classes-recipe/gobject-introspection.bbclass15
-rw-r--r--meta/classes-recipe/gtk-doc.bbclass21
-rw-r--r--meta/classes-recipe/gtk-icon-cache.bbclass2
-rw-r--r--meta/classes-recipe/image-artifact-names.bbclass17
-rw-r--r--meta/classes-recipe/image-combined-dbg.bbclass2
-rw-r--r--meta/classes-recipe/image-live.bbclass5
-rw-r--r--meta/classes-recipe/image.bbclass40
-rw-r--r--meta/classes-recipe/image_types.bbclass172
-rw-r--r--meta/classes-recipe/image_types_wic.bbclass25
-rw-r--r--meta/classes-recipe/kernel-arch.bbclass14
-rw-r--r--meta/classes-recipe/kernel-artifact-names.bbclass2
-rw-r--r--meta/classes-recipe/kernel-devicetree.bbclass50
-rw-r--r--meta/classes-recipe/kernel-fitimage.bbclass210
-rw-r--r--meta/classes-recipe/kernel-module-split.bbclass73
-rw-r--r--meta/classes-recipe/kernel-uboot.bbclass2
-rw-r--r--meta/classes-recipe/kernel-yocto.bbclass93
-rw-r--r--meta/classes-recipe/kernel.bbclass126
-rw-r--r--meta/classes-recipe/kernelsrc.bbclass1
-rw-r--r--meta/classes-recipe/libc-package.bbclass6
-rw-r--r--meta/classes-recipe/license_image.bbclass50
-rw-r--r--meta/classes-recipe/linux-kernel-base.bbclass15
-rw-r--r--meta/classes-recipe/linuxloader.bbclass2
-rw-r--r--meta/classes-recipe/live-vm-common.bbclass4
-rw-r--r--meta/classes-recipe/manpages.bbclass12
-rw-r--r--meta/classes-recipe/meson-routines.bbclass2
-rw-r--r--meta/classes-recipe/meson.bbclass26
-rw-r--r--meta/classes-recipe/module-base.bbclass1
-rw-r--r--meta/classes-recipe/module.bbclass12
-rw-r--r--meta/classes-recipe/multilib_script.bbclass11
-rw-r--r--meta/classes-recipe/native.bbclass11
-rw-r--r--meta/classes-recipe/nativesdk.bbclass5
-rw-r--r--meta/classes-recipe/npm.bbclass88
-rw-r--r--meta/classes-recipe/overlayfs-etc.bbclass88
-rw-r--r--meta/classes-recipe/overlayfs.bbclass142
-rw-r--r--meta/classes-recipe/packagegroup.bbclass5
-rw-r--r--meta/classes-recipe/perl-version.bbclass6
-rw-r--r--meta/classes-recipe/populate_sdk_base.bbclass34
-rw-r--r--meta/classes-recipe/populate_sdk_ext.bbclass103
-rw-r--r--meta/classes-recipe/ptest-cargo.bbclass138
-rw-r--r--meta/classes-recipe/ptest-perl.bbclass2
-rw-r--r--meta/classes-recipe/ptest.bbclass12
-rw-r--r--meta/classes-recipe/pypi.bbclass19
-rw-r--r--meta/classes-recipe/python3-dir.bbclass2
-rw-r--r--meta/classes-recipe/python3targetconfig.bbclass18
-rw-r--r--meta/classes-recipe/python_hatchling.bbclass18
-rw-r--r--meta/classes-recipe/python_maturin.bbclass17
-rw-r--r--meta/classes-recipe/python_mesonpy.bbclass52
-rw-r--r--meta/classes-recipe/python_pep517.bbclass9
-rw-r--r--meta/classes-recipe/python_setuptools3_rust.bbclass4
-rw-r--r--meta/classes-recipe/qemu.bbclass2
-rw-r--r--meta/classes-recipe/qemuboot.bbclass28
-rw-r--r--meta/classes-recipe/rootfs-postcommands.bbclass227
-rw-r--r--meta/classes-recipe/rootfs_ipk.bbclass2
-rw-r--r--meta/classes-recipe/rootfs_rpm.bbclass4
-rw-r--r--meta/classes-recipe/rootfsdebugfiles.bbclass2
-rw-r--r--meta/classes-recipe/rust-bin.bbclass154
-rw-r--r--meta/classes-recipe/rust-common.bbclass55
-rw-r--r--meta/classes-recipe/rust-target-config.bbclass97
-rw-r--r--meta/classes-recipe/scons.bbclass8
-rw-r--r--meta/classes-recipe/setuptools3-base.bbclass19
-rw-r--r--meta/classes-recipe/setuptools3.bbclass4
-rw-r--r--meta/classes-recipe/setuptools3_legacy.bbclass10
-rw-r--r--meta/classes-recipe/siteinfo.bbclass24
-rw-r--r--meta/classes-recipe/systemd-boot-cfg.bbclass2
-rw-r--r--meta/classes-recipe/systemd.bbclass7
-rw-r--r--meta/classes-recipe/testexport.bbclass176
-rw-r--r--meta/classes-recipe/testimage.bbclass95
-rw-r--r--meta/classes-recipe/toolchain-scripts.bbclass20
-rw-r--r--meta/classes-recipe/uboot-config.bbclass19
-rw-r--r--meta/classes-recipe/uboot-extlinux-config.bbclass25
-rw-r--r--meta/classes-recipe/uboot-sign.bbclass487
-rw-r--r--meta/classes-recipe/update-alternatives.bbclass12
-rw-r--r--meta/classes-recipe/update-rc.d.bbclass4
-rw-r--r--meta/classes-recipe/waf.bbclass17
-rw-r--r--meta/classes-recipe/xmlcatalog.bbclass8
100 files changed, 2521 insertions, 1615 deletions
diff --git a/meta/classes-recipe/allarch.bbclass b/meta/classes-recipe/allarch.bbclass
index 9138f40ed8..e429b92437 100644
--- a/meta/classes-recipe/allarch.bbclass
+++ b/meta/classes-recipe/allarch.bbclass
@@ -63,9 +63,9 @@ python () {
d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
+
+ d.setVar("qemu_wrapper_cmdline", "def qemu_wrapper_cmdline(data, rootfs_path, library_paths):\n return 'false'")
elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
-def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
- return 'false'
diff --git a/meta/classes-recipe/autotools.bbclass b/meta/classes-recipe/autotools.bbclass
index a4c1c4be41..7ee1b0d9c7 100644
--- a/meta/classes-recipe/autotools.bbclass
+++ b/meta/classes-recipe/autotools.bbclass
@@ -37,13 +37,14 @@ inherit siteinfo
export CONFIG_SITE
acpaths ?= "default"
-EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
+EXTRA_AUTORECONF += "--exclude=autopoint"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
# When building tools for use at build-time it's recommended for the build
# system to use these variables when cross-compiling.
-# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
+# https://www.gnu.org/software/autoconf-archive/ax_prog_cc_for_build.html
+# https://stackoverflow.com/questions/24201260/autotools-cross-compilation-and-generated-sources/24208587#24208587
export CPP_FOR_BUILD = "${BUILD_CPP}"
export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
@@ -76,7 +77,7 @@ CONFIGUREOPTS = " --build=${BUILD_SYS} \
--localstatedir=${localstatedir} \
--libdir=${libdir} \
--includedir=${includedir} \
- --oldincludedir=${oldincludedir} \
+ --oldincludedir=${includedir} \
--infodir=${infodir} \
--mandir=${mandir} \
--disable-silent-rules \
@@ -157,7 +158,7 @@ python autotools_aclocals () {
do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
-CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
+CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in *.m4 Makefile.am"
autotools_do_configure() {
# WARNING: gross hack follows:
@@ -253,8 +254,6 @@ autotools_do_install() {
fi
}
-inherit siteconfig
-
EXPORT_FUNCTIONS do_configure do_compile do_install
B = "${WORKDIR}/build"
diff --git a/meta/classes-recipe/baremetal-image.bbclass b/meta/classes-recipe/baremetal-image.bbclass
index 3a979f2ed1..4e7d413626 100644
--- a/meta/classes-recipe/baremetal-image.bbclass
+++ b/meta/classes-recipe/baremetal-image.bbclass
@@ -86,6 +86,11 @@ QB_DEFAULT_FSTYPE ?= "bin"
QB_DTB ?= ""
QB_OPT_APPEND:append = " -nographic"
+# QEMU x86 requires an .elf kernel to boot rather than a .bin
+QB_DEFAULT_KERNEL:qemux86 ?= "${IMAGE_LINK_NAME}.elf"
+# QEMU x86-64 refuses to boot from -kernel, needs a multiboot compatible image
+QB_DEFAULT_FSTYPE:qemux86-64 ?= "iso"
+
# RISC-V tunes set the BIOS, unset, and instruct QEMU to
# ignore the BIOS and boot from -kernel
QB_DEFAULT_BIOS:qemuriscv64 = ""
@@ -98,7 +103,18 @@ QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
# since medlow can only access addresses below 0x80000000 and RAM
# starts at 0x80000000 on RISC-V 64
# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
-CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
+TARGET_CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
+
+
+## Emulate image.bbclass
+# Handle inherits of any of the image classes we need
+IMAGE_CLASSES ??= ""
+IMGCLASSES = " ${IMAGE_CLASSES}"
+inherit_defer ${IMGCLASSES}
+# Set defaults to satisfy IMAGE_FEATURES check
+IMAGE_FEATURES ?= ""
+IMAGE_FEATURES[type] = "list"
+IMAGE_FEATURES[validitems] += ""
# This next part is necessary to trick the build system into thinking
diff --git a/meta/classes-recipe/cargo-update-recipe-crates.bbclass b/meta/classes-recipe/cargo-update-recipe-crates.bbclass
new file mode 100644
index 0000000000..8980137d02
--- /dev/null
+++ b/meta/classes-recipe/cargo-update-recipe-crates.bbclass
@@ -0,0 +1,79 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+##
+## Purpose:
+## This class is used to update the list of crates in SRC_URI
+## by reading Cargo.lock in the source tree.
+##
+## See meta/recipes-devtools/python/python3-bcrypt_*.bb for an example
+##
+## To perform the update: bitbake -c update_crates recipe-name
+
+addtask do_update_crates after do_patch
+do_update_crates[depends] = "python3-native:do_populate_sysroot"
+do_update_crates[nostamp] = "1"
+do_update_crates[doc] = "Update the recipe by reading Cargo.lock and write in ${THISDIR}/${BPN}-crates.inc"
+
+# The directory where to search for Cargo.lock files
+CARGO_LOCK_SRC_DIR ??= "${S}"
+
+do_update_crates() {
+ TARGET_FILE="${THISDIR}/${BPN}-crates.inc"
+
+ nativepython3 - <<EOF
+
+def get_crates(f):
+ import tomllib
+ c_list = '# from %s' % os.path.relpath(f, '${CARGO_LOCK_SRC_DIR}')
+ c_list += '\nSRC_URI += " \\\'
+ crates = tomllib.load(open(f, 'rb'))
+
+ # Build a list with crates info that have crates.io in the source
+ crates_candidates = list(filter(lambda c: 'crates.io' in c.get('source', ''), crates['package']))
+
+ if not crates_candidates:
+ raise ValueError("Unable to find any candidate crates that use crates.io")
+
+ # Update crates uri and their checksum, to avoid name clashing on the checksum
+ # we need to rename crates with name and version to have a unique key
+ cksum_list = ''
+ for c in crates_candidates:
+ rename = "%s-%s" % (c['name'], c['version'])
+ c_list += '\n crate://crates.io/%s/%s \\\' % (c['name'], c['version'])
+ if 'checksum' in c:
+ cksum_list += '\nSRC_URI[%s.sha256sum] = "%s"' % (rename, c['checksum'])
+
+ c_list += '\n"\n'
+ c_list += cksum_list
+ c_list += '\n'
+ return c_list
+
+import os
+crates = "# Autogenerated with 'bitbake -c update_crates ${PN}'\n\n"
+found = False
+for root, dirs, files in os.walk('${CARGO_LOCK_SRC_DIR}'):
+ # ignore git and patches directories
+ if root.startswith(os.path.join('${CARGO_LOCK_SRC_DIR}', '.pc')):
+ continue
+ if root.startswith(os.path.join('${CARGO_LOCK_SRC_DIR}', '.git')):
+ continue
+ for file in files:
+ if file == 'Cargo.lock':
+ try:
+ cargo_lock_path = os.path.join(root, file)
+ crates += get_crates(os.path.join(root, file))
+ except Exception as e:
+ raise ValueError("Cannot parse '%s'" % cargo_lock_path) from e
+ else:
+ found = True
+if not found:
+ raise ValueError("Unable to find any Cargo.lock in ${CARGO_LOCK_SRC_DIR}")
+open("${TARGET_FILE}", 'w').write(crates)
+EOF
+
+ bbnote "Successfully update crates inside '${TARGET_FILE}'"
+}
diff --git a/meta/classes-recipe/cargo.bbclass b/meta/classes-recipe/cargo.bbclass
index d1e83518b5..0829a58dd9 100644
--- a/meta/classes-recipe/cargo.bbclass
+++ b/meta/classes-recipe/cargo.bbclass
@@ -30,16 +30,14 @@ B = "${WORKDIR}/build"
# where the issue occured
export RUST_BACKTRACE = "1"
-# The directory of the Cargo.toml relative to the root directory, per default
-# assume there's a Cargo.toml directly in the root directory
-CARGO_SRC_DIR ??= ""
-
-# The actual path to the Cargo.toml
-MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
-
RUSTFLAGS ??= ""
BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
-CARGO_BUILD_FLAGS = "-v --target ${RUST_HOST_SYS} ${BUILD_MODE} --manifest-path=${MANIFEST_PATH}"
+# --frozen flag will prevent network access (which is required since only
+# the do_fetch step is authorized to access network)
+# and will require an up to date Cargo.lock file.
+# This force the package being built to already ship a Cargo.lock, in the end
+# this is what we want, at least, for reproducibility of the build.
+CARGO_BUILD_FLAGS = "-v --frozen --target ${RUST_HOST_SYS} ${BUILD_MODE} --manifest-path=${CARGO_MANIFEST_PATH}"
# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
# change if CARGO_BUILD_FLAGS changes.
@@ -49,14 +47,12 @@ oe_cargo_build () {
export RUSTFLAGS="${RUSTFLAGS}"
bbnote "Using rust targets from ${RUST_TARGET_PATH}"
bbnote "cargo = $(which ${CARGO})"
- bbnote "rustc = $(which ${RUSTC})"
bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
"${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
}
do_compile[progress] = "outof:\s+(\d+)/(\d+)"
cargo_do_compile () {
- oe_cargo_fix_env
oe_cargo_build
}
diff --git a/meta/classes-recipe/cargo_c.bbclass b/meta/classes-recipe/cargo_c.bbclass
new file mode 100644
index 0000000000..ef431634a2
--- /dev/null
+++ b/meta/classes-recipe/cargo_c.bbclass
@@ -0,0 +1,41 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+##
+## Purpose:
+## This class is used by any recipes that want to compile a C ABI compatible
+## library with header and pkg config file
+
+inherit cargo pkgconfig
+
+# the binaries we will use
+CARGO_C_BUILD = "cargo-cbuild"
+CARGO_C_INSTALL = "cargo-cinstall"
+
+# We need cargo-c to compile for the target
+BASEDEPENDS:append = " cargo-c-native"
+
+do_compile[progress] = "outof:\s+(\d+)/(\d+)"
+cargo_c_do_compile() {
+ oe_cargo_fix_env
+ export RUSTFLAGS="${RUSTFLAGS}"
+ bbnote "Using rust targets from ${RUST_TARGET_PATH}"
+ bbnote "cargo-cbuild = $(which ${CARGO_C_BUILD})"
+ bbnote "${CARGO_C_BUILD} cbuild ${CARGO_BUILD_FLAGS}"
+ "${CARGO_C_BUILD}" cbuild ${CARGO_BUILD_FLAGS}
+}
+
+cargo_c_do_install() {
+ oe_cargo_fix_env
+ export RUSTFLAGS="${RUSTFLAGS}"
+ bbnote "cargo-cinstall = $(which ${CARGO_C_INSTALL})"
+ "${CARGO_C_INSTALL}" cinstall ${CARGO_BUILD_FLAGS} \
+ --destdir ${D} \
+ --prefix ${prefix} \
+ --library-type cdylib
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes-recipe/cargo_common.bbclass b/meta/classes-recipe/cargo_common.bbclass
index eec7710a4c..78440c810b 100644
--- a/meta/classes-recipe/cargo_common.bbclass
+++ b/meta/classes-recipe/cargo_common.bbclass
@@ -18,7 +18,7 @@
inherit rust-common
# Where we download our registry and dependencies to
-export CARGO_HOME = "${WORKDIR}/cargo_home"
+export CARGO_HOME = "${UNPACKDIR}/cargo_home"
# The pkg-config-rs library used by cargo build scripts disables itself when
# cross compiling unless this is defined. We set up pkg-config appropriately
@@ -28,12 +28,22 @@ export PKG_CONFIG_ALLOW_CROSS = "1"
# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
# for example the rust compiler itself, come with their own vendored sources.
# Specifying two [source.crates-io] will not work.
-CARGO_DISABLE_BITBAKE_VENDORING ?= "0"
+CARGO_DISABLE_BITBAKE_VENDORING ??= "0"
# Used by libstd-rs to point to the vendor dir included in rustc src
-CARGO_VENDORING_DIRECTORY ?= "${CARGO_HOME}/bitbake"
+CARGO_VENDORING_DIRECTORY ??= "${CARGO_HOME}/bitbake"
-CARGO_RUST_TARGET_CCLD ?= "${RUST_TARGET_CCLD}"
+# The directory of the Cargo.toml relative to the root directory, per default
+# assume there's a Cargo.toml directly in the root directory
+CARGO_SRC_DIR ??= ""
+
+# The actual path to the Cargo.toml
+CARGO_MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
+
+# Path to Cargo.lock
+CARGO_LOCK_PATH ??= "${@ os.path.join(os.path.dirname(d.getVar('CARGO_MANIFEST_PATH', True)), 'Cargo.lock')}"
+
+CARGO_RUST_TARGET_CCLD ??= "${RUST_TARGET_CCLD}"
cargo_common_do_configure () {
mkdir -p ${CARGO_HOME}/bitbake
@@ -56,7 +66,7 @@ cargo_common_do_configure () {
[source.crates-io]
replace-with = "bitbake"
- local-registry = "/nonexistant"
+ local-registry = "/nonexistent"
EOF
fi
@@ -88,7 +98,7 @@ cargo_common_do_configure () {
EOF
fi
- if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}"]; then
+ if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}" ]; then
cat <<- EOF >> ${CARGO_HOME}/config
# TARGET_SYS
@@ -103,7 +113,7 @@ cargo_common_do_configure () {
cat <<- EOF >> ${CARGO_HOME}/config
[build]
- # Use out of tree build destination to avoid poluting the source tree
+ # Use out of tree build destination to avoid polluting the source tree
target-dir = "${B}/target"
EOF
fi
@@ -116,6 +126,83 @@ cargo_common_do_configure () {
EOF
}
+python cargo_common_do_patch_paths() {
+ import shutil
+
+ cargo_config = os.path.join(d.getVar("CARGO_HOME"), "config")
+ if not os.path.exists(cargo_config):
+ return
+
+ src_uri = (d.getVar('SRC_URI') or "").split()
+ if len(src_uri) == 0:
+ return
+
+ patches = dict()
+ workdir = d.getVar('UNPACKDIR')
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ for url in fetcher.urls:
+ ud = fetcher.ud[url]
+ if ud.type == 'git':
+ name = ud.parm.get('name')
+ destsuffix = ud.parm.get('destsuffix')
+ if name is not None and destsuffix is not None:
+ if ud.user:
+ repo = '%s://%s@%s%s' % (ud.proto, ud.user, ud.host, ud.path)
+ else:
+ repo = '%s://%s%s' % (ud.proto, ud.host, ud.path)
+ path = '%s = { path = "%s" }' % (name, os.path.join(workdir, destsuffix))
+ patches.setdefault(repo, []).append(path)
+
+ with open(cargo_config, "a+") as config:
+ for k, v in patches.items():
+ print('\n[patch."%s"]' % k, file=config)
+ for name in v:
+ print(name, file=config)
+
+ if not patches:
+ return
+
+ # Cargo.lock file is needed for to be sure that artifacts
+ # downloaded by the fetch steps are those expected by the
+ # project and that the possible patches are correctly applied.
+ # Moreover since we do not want any modification
+ # of this file (for reproducibility purpose), we prevent it by
+ # using --frozen flag (in CARGO_BUILD_FLAGS) and raise a clear error
+ # here is better than letting cargo tell (in case the file is missing)
+ # "Cargo.lock should be modified but --frozen was given"
+
+ lockfile = d.getVar("CARGO_LOCK_PATH", True)
+ if not os.path.exists(lockfile):
+ bb.fatal(f"{lockfile} file doesn't exist")
+
+ # There are patched files and so Cargo.lock should be modified but we use
+ # --frozen so let's handle that modifications here.
+ #
+ # Note that a "better" (more elegant ?) would have been to use cargo update for
+ # patched packages:
+ # cargo update --offline -p package_1 -p package_2
+ # But this is not possible since it requires that cargo local git db
+ # to be populated and this is not the case as we fetch git repo ourself.
+
+ lockfile_orig = lockfile + ".orig"
+ if not os.path.exists(lockfile_orig):
+ shutil.copy(lockfile, lockfile_orig)
+
+ newlines = []
+ with open(lockfile_orig, "r") as f:
+ for line in f.readlines():
+ if not line.startswith("source = \"git"):
+ newlines.append(line)
+
+ with open(lockfile, "w") as f:
+ f.writelines(newlines)
+}
+do_configure[postfuncs] += "cargo_common_do_patch_paths"
+
+do_compile:prepend () {
+ oe_cargo_fix_env
+}
+
oe_cargo_fix_env () {
export CC="${RUST_TARGET_CC}"
export CXX="${RUST_TARGET_CXX}"
@@ -137,3 +224,15 @@ oe_cargo_fix_env () {
EXTRA_OECARGO_PATHS ??= ""
EXPORT_FUNCTIONS do_configure
+
+# The culprit for this setting is the libc crate,
+# which as of Jun 2023 calls directly into 32 bit time functions in glibc,
+# bypassing all of glibc provisions to choose the right Y2038-safe functions. As
+# rust components statically link with that crate, pretty much everything
+# is affected, and so there's no point trying to have recipe-specific
+# INSANE_SKIP entries.
+#
+# Upstream ticket and PR:
+# https://github.com/rust-lang/libc/issues/3223
+# https://github.com/rust-lang/libc/pull/3175
+INSANE_SKIP:append = " 32bit-time"
diff --git a/meta/classes-recipe/cmake-qemu.bbclass b/meta/classes-recipe/cmake-qemu.bbclass
new file mode 100644
index 0000000000..46a89e2827
--- /dev/null
+++ b/meta/classes-recipe/cmake-qemu.bbclass
@@ -0,0 +1,32 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+# Not all platforms are supported by Qemu. Using qemu-user therefore
+# involves a certain risk, which is also the reason why this feature
+# is not part of the main cmake class by default.
+#
+# One use case is the execution of cross-compiled unit tests with CTest
+# on the build machine. If CMAKE_EXEWRAPPER_ENABLED is configured,
+# cmake --build --target test
+# works transparently with qemu-user. If the cmake project is developed
+# with this use case in mind this works very nicely also out of an IDE
+# configured to use cmake-native for cross compiling.
+
+inherit qemu cmake
+
+DEPENDS:append:class-target = "${@' qemu-native' if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) else ''}"
+
+cmake_do_generate_toolchain_file:append:class-target() {
+ if [ "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}" ]; then
+ # Write out a qemu wrapper that will be used as exe_wrapper so that cmake
+ # can run target helper binaries through that. This also allows to execute ctest.
+ qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_HOST}', ['${STAGING_DIR_HOST}/${libdir}','${STAGING_DIR_HOST}/${base_libdir}'])}"
+ echo "#!/bin/sh" > "${WORKDIR}/cmake-qemuwrapper"
+ echo "$qemu_binary \"\$@\"" >> "${WORKDIR}/cmake-qemuwrapper"
+ chmod +x "${WORKDIR}/cmake-qemuwrapper"
+ echo "set( CMAKE_CROSSCOMPILING_EMULATOR ${WORKDIR}/cmake-qemuwrapper)" \
+ >> ${WORKDIR}/toolchain.cmake
+ fi
+}
diff --git a/meta/classes-recipe/cmake.bbclass b/meta/classes-recipe/cmake.bbclass
index 554b948c32..e1c3d7ddb5 100644
--- a/meta/classes-recipe/cmake.bbclass
+++ b/meta/classes-recipe/cmake.bbclass
@@ -51,17 +51,16 @@ OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
# clear compiler vars for allarch to avoid sig hash difference
-OECMAKE_C_COMPILER_allarch = ""
-OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
-OECMAKE_CXX_COMPILER_allarch = ""
-OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
+OECMAKE_C_COMPILER:allarch = ""
+OECMAKE_C_COMPILER_LAUNCHER:allarch = ""
+OECMAKE_CXX_COMPILER:allarch = ""
+OECMAKE_CXX_COMPILER_LAUNCHER:allarch = ""
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
OECMAKE_EXTRA_ROOT_PATH ?= ""
OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
-OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM:class-native = "BOTH"
EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
@@ -73,6 +72,8 @@ OECMAKE_TARGET_COMPILE ?= "all"
OECMAKE_TARGET_INSTALL ?= "install"
def map_host_os_to_system_name(host_os):
+ if host_os.startswith('darwin'):
+ return 'Darwin'
if host_os.startswith('mingw'):
return 'Windows'
if host_os.startswith('linux'):
@@ -91,10 +92,14 @@ def map_host_arch_to_uname_arch(host_arch):
return "ppc64"
return host_arch
+
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
+ else
+ cmake_sysroot="set( CMAKE_SYSROOT \"${RECIPE_SYSROOT}\" )"
fi
+
cat > ${WORKDIR}/toolchain.cmake <<EOF
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
@@ -119,13 +124,15 @@ set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
# only search in the paths provided so cmake doesnt pick
# up libraries and tools from the native build machine
-set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${COREBASE}/scripts ${HOSTTOOLS_DIR} )
set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
set( CMAKE_PROGRAM_PATH "/" )
+$cmake_sysroot
+
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
@@ -151,10 +158,33 @@ EOF
addtask generate_toolchain_file after do_patch before do_configure
-CONFIGURE_FILES = "CMakeLists.txt"
+CONFIGURE_FILES = "CMakeLists.txt *.cmake"
do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}"
+OECMAKE_ARGS = "\
+ -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
+ -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
+ -DPython_EXECUTABLE:PATH=${PYTHON} \
+ -DPython3_EXECUTABLE:PATH=${PYTHON} \
+ -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
+ -DCMAKE_INSTALL_SO_NO_EXE=0 \
+ -DCMAKE_TOOLCHAIN_FILE:FILEPATH=${WORKDIR}/toolchain.cmake \
+ -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
+ -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
+ -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
+ -DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON \
+"
+
cmake_do_configure() {
if [ "${OECMAKE_BUILDPATH}" ]; then
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
@@ -175,25 +205,7 @@ cmake_do_configure() {
${OECMAKE_GENERATOR_ARGS} \
$oecmake_sitefile \
${OECMAKE_SOURCEPATH} \
- -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
- -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
- -DPython_EXECUTABLE:PATH=${PYTHON} \
- -DPython3_EXECUTABLE:PATH=${PYTHON} \
- -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
- -DCMAKE_INSTALL_SO_NO_EXE=0 \
- -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
- -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
- -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
- -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
+ ${OECMAKE_ARGS} \
${EXTRA_OECMAKE} \
-Wno-dev
}
@@ -212,12 +224,24 @@ cmake_runcmake_build() {
eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
}
+# Install an already-generated project binary tree. Not checking the compile
+# dependencies again is particularly important for SDK use cases.
+cmake_runcmake_install() {
+ bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --install '${B}'
+ eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --install '${B}'
+}
+
cmake_do_compile() {
cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
}
cmake_do_install() {
- DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
+ if [ "${OECMAKE_TARGET_INSTALL}" = "install" ]; then
+ DESTDIR='${D}' cmake_runcmake_install
+ else
+ # Legacy path which supports also custom install targets
+ DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
+ fi
}
EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes-recipe/cml1.bbclass b/meta/classes-recipe/cml1.bbclass
index b79091383d..03e5fe6f47 100644
--- a/meta/classes-recipe/cml1.bbclass
+++ b/meta/classes-recipe/cml1.bbclass
@@ -21,7 +21,6 @@ cml1_do_configure() {
}
EXPORT_FUNCTIONS do_configure
-addtask configure after do_unpack do_patch before do_compile
inherit terminal
@@ -33,10 +32,15 @@ CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
KCONFIG_CONFIG_COMMAND ??= "menuconfig"
+KCONFIG_CONFIG_ENABLE_MENUCONFIG ??= "true"
KCONFIG_CONFIG_ROOTDIR ??= "${B}"
python do_menuconfig() {
import shutil
+ if not bb.utils.to_boolean(d.getVar("KCONFIG_CONFIG_ENABLE_MENUCONFIG")):
+ bb.fatal("do_menuconfig is disabled, please check KCONFIG_CONFIG_ENABLE_MENUCONFIG variable.")
+ return
+
config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
@@ -54,19 +58,17 @@ python do_menuconfig() {
# ensure that environment variables are overwritten with this tasks 'd' values
d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
- oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ oe_terminal("sh -c 'make %s; if [ \\$? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
- # FIXME this check can be removed when the minimum bitbake version has been bumped
- if hasattr(bb.build, 'write_taint'):
- try:
- newmtime = os.path.getmtime(config)
- except OSError:
- newmtime = 0
+ try:
+ newmtime = os.path.getmtime(config)
+ except OSError:
+ newmtime = 0
- if newmtime > mtime:
- bb.note("Configuration changed, recompile will be forced")
- bb.build.write_taint('do_compile', d)
+ if newmtime > mtime:
+ bb.plain("Changed configuration saved at:\n %s\nRecompile will be forced" % config)
+ bb.build.write_taint('do_compile', d)
}
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
do_menuconfig[nostamp] = "1"
@@ -105,3 +107,9 @@ python do_diffconfig() {
do_diffconfig[nostamp] = "1"
do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask diffconfig
+
+do_showconfig() {
+ bbplain "Config file written to ${KCONFIG_CONFIG_ROOTDIR}/.config"
+}
+do_showconfig[nostamp] = "1"
+addtask showconfig after do_configure
diff --git a/meta/classes-recipe/core-image.bbclass b/meta/classes-recipe/core-image.bbclass
index 4b5f2c99c4..40fc15cb04 100644
--- a/meta/classes-recipe/core-image.bbclass
+++ b/meta/classes-recipe/core-image.bbclass
@@ -62,6 +62,10 @@ FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
# Including image feature foo would replace the image features bar1 and bar2
IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
+# Do not install openssh complementary packages if either packagegroup-core-ssh-dropbear or dropbear
+# is installed # to avoid openssh-dropbear conflict
+# see [Yocto #14858] for more information
+PACKAGE_EXCLUDE_COMPLEMENTARY:append = "${@bb.utils.contains_any('PACKAGE_INSTALL', 'packagegroup-core-ssh-dropbear dropbear', ' openssh', '' , d)}"
# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
# An error exception would be raised if both image features foo and bar1(or bar2) are included
diff --git a/meta/classes-recipe/devicetree.bbclass b/meta/classes-recipe/devicetree.bbclass
index ac1d284ccd..bd50d7fa1d 100644
--- a/meta/classes-recipe/devicetree.bbclass
+++ b/meta/classes-recipe/devicetree.bbclass
@@ -4,7 +4,7 @@
# SPDX-License-Identifier: MIT
#
-# This bbclass implements device tree compliation for user provided device tree
+# This bbclass implements device tree compilation for user provided device tree
# sources. The compilation of the device tree sources is the same as the kernel
# device tree compilation process, this includes being able to include sources
# from the kernel such as soc dtsi files or header files such as gpio.h. In
@@ -53,8 +53,10 @@ KERNEL_INCLUDE ??= " \
DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
-DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
+DT_FILES_PATH[doc] = "Path to the directory containing dts files to build. Defaults to source directory."
DT_FILES_PATH ?= "${S}"
+DT_FILES[doc] = "Space-separated list of dts or dtb files (relative to DT_FILES_PATH) to build. If empty, all dts files are built."
+DT_FILES ?= ""
DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
DT_PADDING_SIZE ??= "0x3000"
@@ -125,9 +127,12 @@ def devicetree_compile(dtspath, includes, d):
subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
python devicetree_do_compile() {
+ import re
includes = expand_includes("DT_INCLUDE", d)
+ dtfiles = d.getVar("DT_FILES").split()
+ dtfiles = [ re.sub(r"\.dtbo?$", ".dts", dtfile) for dtfile in dtfiles ]
listpath = d.getVar("DT_FILES_PATH")
- for dts in os.listdir(listpath):
+ for dts in dtfiles or os.listdir(listpath):
dtspath = os.path.join(listpath, dts)
try:
if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
diff --git a/meta/classes-recipe/devupstream.bbclass b/meta/classes-recipe/devupstream.bbclass
index 1529cc8fca..d941763fb7 100644
--- a/meta/classes-recipe/devupstream.bbclass
+++ b/meta/classes-recipe/devupstream.bbclass
@@ -46,7 +46,7 @@ python devupstream_virtclass_handler () {
pv = d.getVar("PV")
proto_marker = "+" + uri.scheme
if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
- d.setVar("PV", pv + proto_marker + "${SRCPV}")
+ d.setVar("PV", pv + proto_marker)
if variant == "native":
pn = d.getVar("PN")
diff --git a/meta/classes-recipe/distrooverrides.bbclass b/meta/classes-recipe/distrooverrides.bbclass
deleted file mode 100644
index 8d9d7cda7d..0000000000
--- a/meta/classes-recipe/distrooverrides.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Copyright OpenEmbedded Contributors
-#
-# SPDX-License-Identifier: MIT
-#
-
-# Turns certain DISTRO_FEATURES into overrides with the same
-# name plus a df- prefix. Ensures that these special
-# distro features remain set also for native and nativesdk
-# recipes, so that these overrides can also be used there.
-#
-# This makes it simpler to write .bbappends that only change the
-# task signatures of the recipe if the change is really enabled,
-# for example with:
-# do_install:append:df-my-feature () { ... }
-# where "my-feature" is a DISTRO_FEATURE.
-#
-# The class is meant to be used in a layer.conf or distro
-# .inc file with:
-# INHERIT += "distrooverrides"
-# DISTRO_FEATURES_OVERRIDES += "my-feature"
-#
-# Beware that this part of OVERRIDES changes during parsing, so usage
-# of these overrides should be limited to .bb and .bbappend files,
-# because then DISTRO_FEATURES is final.
-
-DISTRO_FEATURES_OVERRIDES ?= ""
-DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
-Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
-
-DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
-DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
-
-# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
-# signature because of this line, then the task dependency on
-# OVERRIDES itself should be fixed. Excluding these two variables
-# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
-DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"
diff --git a/meta/classes-recipe/externalsrc.bbclass b/meta/classes-recipe/externalsrc.bbclass
deleted file mode 100644
index 51dbe9ea5a..0000000000
--- a/meta/classes-recipe/externalsrc.bbclass
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright (C) 2012 Linux Foundation
-# Author: Richard Purdie
-# Some code and influence taken from srctree.bbclass:
-# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
-#
-# SPDX-License-Identifier: MIT
-#
-# externalsrc.bbclass enables use of an existing source tree, usually external to
-# the build system to build a piece of software rather than the usual fetch/unpack/patch
-# process.
-#
-# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
-# directory you want to use containing the sources e.g. from local.conf for a recipe
-# called "myrecipe" you would do:
-#
-# INHERIT += "externalsrc"
-# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
-#
-# In order to make this class work for both target and native versions (or with
-# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
-# directory under the work directory (split source and build directories). This is
-# the default, but the build directory can be set to the source directory if
-# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
-#
-# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
-#
-
-SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
-EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
-
-python () {
- externalsrc = d.getVar('EXTERNALSRC')
- externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
-
- if externalsrc and not externalsrc.startswith("/"):
- bb.error("EXTERNALSRC must be an absolute path")
- if externalsrcbuild and not externalsrcbuild.startswith("/"):
- bb.error("EXTERNALSRC_BUILD must be an absolute path")
-
- # If this is the base recipe and EXTERNALSRC is set for it or any of its
- # derivatives, then enable BB_DONT_CACHE to force the recipe to always be
- # re-parsed so that the file-checksums function for do_compile is run every
- # time.
- bpn = d.getVar('BPN')
- classextend = (d.getVar('BBCLASSEXTEND') or '').split()
- if bpn == d.getVar('PN') or not classextend:
- if (externalsrc or
- ('native' in classextend and
- d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
- ('nativesdk' in classextend and
- d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
- ('cross' in classextend and
- d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
- d.setVar('BB_DONT_CACHE', '1')
-
- if externalsrc:
- import oe.recipeutils
- import oe.path
-
- d.setVar('S', externalsrc)
- if externalsrcbuild:
- d.setVar('B', externalsrcbuild)
- else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
-
- local_srcuri = []
- fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
- for url in fetch.urls:
- url_data = fetch.ud[url]
- parm = url_data.parm
- if (url_data.type == 'file' or
- url_data.type == 'npmsw' or url_data.type == 'crate' or
- 'type' in parm and parm['type'] == 'kmeta'):
- local_srcuri.append(url)
-
- d.setVar('SRC_URI', ' '.join(local_srcuri))
-
- # Dummy value because the default function can't be called with blank SRC_URI
- d.setVar('SRCPV', '999')
-
- if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
- d.setVar('CONFIGUREOPT_DEPTRACK', '')
-
- tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
-
- for task in tasks:
- if task.endswith("_setscene"):
- # sstate is never going to work for external source trees, disable it
- bb.build.deltask(task, d)
- elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
- # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
- d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
-
- for funcname in [task, "base_" + task, "kernel_" + task]:
- # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
- cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(funcname, 'cleandirs', False) or '')
- setvalue = False
- for cleandir in cleandirs[:]:
- if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
- cleandirs.remove(cleandir)
- setvalue = True
- if setvalue:
- d.setVarFlag(funcname, 'cleandirs', ' '.join(cleandirs))
-
- fetch_tasks = ['do_fetch', 'do_unpack']
- # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
- # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
- d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
-
- for task in d.getVar("SRCTREECOVEREDTASKS").split():
- if local_srcuri and task in fetch_tasks:
- continue
- bb.build.deltask(task, d)
- if task == 'do_unpack':
- # The reproducible build create_source_date_epoch_stamp function must
- # be run after the source is available and before the
- # do_deploy_source_date_epoch task. In the normal case, it's attached
- # to do_unpack as a postfuncs, but since we removed do_unpack (above)
- # we need to move the function elsewhere. The easiest thing to do is
- # move it into the prefuncs of the do_deploy_source_date_epoch task.
- # This is safe, as externalsrc runs with the source already unpacked.
- d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
-
- d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
- d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
-
- d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
- d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
-
- # We don't want the workdir to go away
- d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
-
- bb.build.addtask('do_buildclean',
- 'do_clean' if d.getVar('S') == d.getVar('B') else None,
- None, d)
-
- # If B=S the same builddir is used even for different architectures.
- # Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
- # change of do_configure task hash is correctly detected and stamps are
- # invalidated if e.g. MACHINE changes.
- if d.getVar('S') == d.getVar('B'):
- configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
- d.setVar('CONFIGURESTAMPFILE', configstamp)
- d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
- d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
-}
-
-python externalsrc_configure_prefunc() {
- s_dir = d.getVar('S')
- # Create desired symlinks
- symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
- newlinks = []
- for symlink in symlinks:
- symsplit = symlink.split(':', 1)
- lnkfile = os.path.join(s_dir, symsplit[0])
- target = d.expand(symsplit[1])
- if len(symsplit) > 1:
- if os.path.islink(lnkfile):
- # Link already exists, leave it if it points to the right location already
- if os.readlink(lnkfile) == target:
- continue
- os.unlink(lnkfile)
- elif os.path.exists(lnkfile):
- # File/dir exists with same name as link, just leave it alone
- continue
- os.symlink(target, lnkfile)
- newlinks.append(symsplit[0])
- # Hide the symlinks from git
- try:
- git_exclude_file = os.path.join(s_dir, '.git/info/exclude')
- if os.path.exists(git_exclude_file):
- with open(git_exclude_file, 'r+') as efile:
- elines = efile.readlines()
- for link in newlinks:
- if link in elines or '/'+link in elines:
- continue
- efile.write('/' + link + '\n')
- except IOError as ioe:
- bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
-}
-
-python externalsrc_compile_prefunc() {
- # Make it obvious that this is happening, since forgetting about it could lead to much confusion
- bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
-}
-
-do_buildclean[dirs] = "${S} ${B}"
-do_buildclean[nostamp] = "1"
-do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
-externalsrc_do_buildclean() {
- if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
- rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
- if [ "${CLEANBROKEN}" != "1" ]; then
- oe_runmake clean || die "make failed"
- fi
- else
- bbnote "nothing to do - no makefile found"
- fi
-}
-
-def srctree_hash_files(d, srcdir=None):
- import shutil
- import subprocess
- import tempfile
- import hashlib
-
- s_dir = srcdir or d.getVar('EXTERNALSRC')
- git_dir = None
-
- try:
- git_dir = os.path.join(s_dir,
- subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
- top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
- stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
- if git_dir == top_git_dir:
- git_dir = None
- except subprocess.CalledProcessError:
- pass
-
- ret = " "
- if git_dir is not None:
- oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
- with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
- # Clone index
- shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
- # Update our custom index
- env = os.environ.copy()
- env['GIT_INDEX_FILE'] = tmp_index.name
- subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
- git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
- submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
- for line in submodule_helper.splitlines():
- module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
- if os.path.isdir(module_dir):
- proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- proc.communicate()
- proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
- stdout, _ = proc.communicate()
- git_sha1 += stdout.decode("utf-8")
- sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
- with open(oe_hash_file, 'w') as fobj:
- fobj.write(sha1)
- ret = oe_hash_file + ':True'
- else:
- ret = s_dir + '/*:True'
- return ret
-
-def srctree_configure_hash_files(d):
- """
- Get the list of files that should trigger do_configure to re-execute,
- based on the value of CONFIGURE_FILES
- """
- in_files = (d.getVar('CONFIGURE_FILES') or '').split()
- out_items = []
- search_files = []
- for entry in in_files:
- if entry.startswith('/'):
- out_items.append('%s:%s' % (entry, os.path.exists(entry)))
- else:
- search_files.append(entry)
- if search_files:
- s_dir = d.getVar('EXTERNALSRC')
- for root, _, files in os.walk(s_dir):
- for f in files:
- if f in search_files:
- out_items.append('%s:True' % os.path.join(root, f))
- return ' '.join(out_items)
-
-EXPORT_FUNCTIONS do_buildclean
diff --git a/meta/classes-recipe/fontcache.bbclass b/meta/classes-recipe/fontcache.bbclass
index 0d496b72dd..6f4978369d 100644
--- a/meta/classes-recipe/fontcache.bbclass
+++ b/meta/classes-recipe/fontcache.bbclass
@@ -13,6 +13,7 @@ PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
FONT_PACKAGES ??= "${PN}"
+FONT_PACKAGES:class-native = ""
FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
FONTCONFIG_CACHE_PARAMS ?= "-v"
diff --git a/meta/classes-recipe/fs-uuid.bbclass b/meta/classes-recipe/fs-uuid.bbclass
index a9e7eb8c67..e215f06c80 100644
--- a/meta/classes-recipe/fs-uuid.bbclass
+++ b/meta/classes-recipe/fs-uuid.bbclass
@@ -10,7 +10,7 @@
def get_rootfs_uuid(d):
import subprocess
rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
uuid = line.split()[-1]
diff --git a/meta/classes-recipe/gi-docgen.bbclass b/meta/classes-recipe/gi-docgen.bbclass
index 8b7eaacea3..b178d1c387 100644
--- a/meta/classes-recipe/gi-docgen.bbclass
+++ b/meta/classes-recipe/gi-docgen.bbclass
@@ -8,9 +8,11 @@
# seems to be a successor to gtk-doc:
# https://gitlab.gnome.org/GNOME/gi-docgen
-# This variable is set to True if api-documentation is in
-# DISTRO_FEATURES, and False otherwise.
-GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'True', 'False', d)}"
+# True if api-documentation and gobject-introspection-data are in DISTRO_FEATURES,
+# and qemu-user is in MACHINE_FEATURES, False otherwise.
+GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation gobject-introspection-data', \
+ bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+
# When building native recipes, disable gi-docgen, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
GIDOCGEN_ENABLED:class-native = "False"
diff --git a/meta/classes-recipe/github-releases.bbclass b/meta/classes-recipe/github-releases.bbclass
new file mode 100644
index 0000000000..ed83b83731
--- /dev/null
+++ b/meta/classes-recipe/github-releases.bbclass
@@ -0,0 +1,3 @@
+GITHUB_BASE_URI ?= "https://github.com/${BPN}/${BPN}/releases/"
+UPSTREAM_CHECK_URI ?= "${GITHUB_BASE_URI}"
+UPSTREAM_CHECK_REGEX ?= "releases/tag/v?(?P<pver>\d+(\.\d+)+)"
diff --git a/meta/classes-recipe/glide.bbclass b/meta/classes-recipe/glide.bbclass
deleted file mode 100644
index 21b48fa4e0..0000000000
--- a/meta/classes-recipe/glide.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Copyright OpenEmbedded Contributors
-#
-# SPDX-License-Identifier: MIT
-#
-
-# Handle Glide Vendor Package Management use
-#
-# Copyright 2018 (C) O.S. Systems Software LTDA.
-
-DEPENDS:append = " glide-native"
-
-do_compile:prepend() {
- ( cd ${B}/src/${GO_IMPORT} && glide install )
-}
diff --git a/meta/classes-recipe/gnomebase.bbclass b/meta/classes-recipe/gnomebase.bbclass
index 805daafa40..74073321b8 100644
--- a/meta/classes-recipe/gnomebase.bbclass
+++ b/meta/classes-recipe/gnomebase.bbclass
@@ -5,7 +5,7 @@
#
def gnome_verdir(v):
- return ".".join(v.split(".")[:-1])
+ return ".".join(v.split(".")[:-1]) or v
GNOME_COMPRESS_TYPE ?= "xz"
@@ -27,8 +27,9 @@ FILES:${PN} += "${datadir}/application-registry \
FILES:${PN}-doc += "${datadir}/devhelp"
-GNOMEBASEBUILDCLASS ??= "autotools"
-inherit ${GNOMEBASEBUILDCLASS} pkgconfig
+GNOMEBASEBUILDCLASS ??= "meson"
+inherit pkgconfig
+inherit_defer ${GNOMEBASEBUILDCLASS}
do_install:append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
diff --git a/meta/classes-recipe/go-mod.bbclass b/meta/classes-recipe/go-mod.bbclass
index 927746a338..ca3a690d05 100644
--- a/meta/classes-recipe/go-mod.bbclass
+++ b/meta/classes-recipe/go-mod.bbclass
@@ -6,7 +6,7 @@
# Handle Go Modules support
#
-# When using Go Modules, the the current working directory MUST be at or below
+# When using Go Modules, the current working directory MUST be at or below
# the location of the 'go.mod' file when the go tool is used, and there is no
# way to tell it to look elsewhere. It will automatically look upwards for the
# file, but not downwards.
@@ -24,3 +24,7 @@ inherit go
GO_WORKDIR ?= "${GO_IMPORT}"
do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
+
+export GOMODCACHE = "${B}/.mod"
+
+do_compile[cleandirs] += "${B}/.mod"
diff --git a/meta/classes-recipe/go.bbclass b/meta/classes-recipe/go.bbclass
index 6b9748406d..9146dd611e 100644
--- a/meta/classes-recipe/go.bbclass
+++ b/meta/classes-recipe/go.bbclass
@@ -37,7 +37,7 @@ GOMIPS:mips:class-target[export] = "1"
DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
DEPENDS_GOLANG:class-native = "go-native"
-DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go virtual/${TARGET_PREFIX}go-runtime"
DEPENDS:append = " ${DEPENDS_GOLANG}"
@@ -48,8 +48,6 @@ GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE
GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
GO_LINKMODE ?= ""
-GO_LINKMODE:class-nativesdk = "--linkmode=external"
-GO_LINKMODE:class-native = "--linkmode=external"
GO_EXTRA_LDFLAGS ?= ""
GO_LINUXLOADER ?= "-I ${@get_linuxloader(d)}"
# Use system loader. If uninative is used, the uninative loader will be patched automatically
@@ -78,22 +76,11 @@ GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
B = "${WORKDIR}/build"
export GOPATH = "${B}"
export GOENV = "off"
+export GOPROXY ??= "https://proxy.golang.org,direct"
export GOTMPDIR ?= "${WORKDIR}/build-tmp"
GOTMPDIR[vardepvalue] = ""
-python go_do_unpack() {
- src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
- return
-
- fetcher = bb.fetch2.Fetch(src_uri, d)
- for url in fetcher.urls:
- if fetcher.ud[url].type == 'git':
- if fetcher.ud[url].parm.get('destsuffix') is None:
- s_dirname = os.path.basename(d.getVar('S'))
- fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
- fetcher.unpack(d.getVar('WORKDIR'))
-}
+GO_SRCURI_DESTSUFFIX = "${@os.path.join(os.path.basename(d.getVar('S')), 'src', d.getVar('GO_IMPORT')) + '/'}"
go_list_packages() {
${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
@@ -132,7 +119,7 @@ go_do_install() {
tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
tar -C ${D}${libdir}/go --no-same-owner -xf -
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
install -d ${D}${bindir}
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
fi
@@ -152,7 +139,7 @@ go_stage_testdata() {
cd "$oldwd"
}
-EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
FILES:${PN}-dev = "${libdir}/go/src"
FILES:${PN}-staticdev = "${libdir}/go/pkg"
diff --git a/meta/classes-recipe/goarch.bbclass b/meta/classes-recipe/goarch.bbclass
index 61ead30a63..1ebe03864f 100644
--- a/meta/classes-recipe/goarch.bbclass
+++ b/meta/classes-recipe/goarch.bbclass
@@ -54,6 +54,7 @@ COMPATIBLE_HOST:linux-muslx32 = "null"
COMPATIBLE_HOST:powerpc = "null"
COMPATIBLE_HOST:powerpc64 = "null"
COMPATIBLE_HOST:mipsarchn32 = "null"
+COMPATIBLE_HOST:riscv32 = "null"
ARM_INSTRUCTION_SET:armv4 = "arm"
ARM_INSTRUCTION_SET:armv5 = "arm"
@@ -67,31 +68,10 @@ SECURITY_NOPIE_CFLAGS ??= ""
CCACHE_DISABLE ?= "1"
def go_map_arch(a, d):
- import re
- if re.match('i.86', a):
- return '386'
- elif a == 'x86_64':
- return 'amd64'
- elif re.match('arm.*', a):
- return 'arm'
- elif re.match('aarch64.*', a):
- return 'arm64'
- elif re.match('mips64el.*', a):
- return 'mips64le'
- elif re.match('mips64.*', a):
- return 'mips64'
- elif a == 'mips':
- return 'mips'
- elif a == 'mipsel':
- return 'mipsle'
- elif re.match('p(pc|owerpc)(64le)', a):
- return 'ppc64le'
- elif re.match('p(pc|owerpc)(64)', a):
- return 'ppc64'
- elif a == 'riscv64':
- return 'riscv64'
- else:
+ arch = oe.go.map_arch(a)
+ if not arch:
raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
+ return arch
def go_map_arm(a, d):
if a.startswith("arm"):
diff --git a/meta/classes-recipe/gobject-introspection-data.bbclass b/meta/classes-recipe/gobject-introspection-data.bbclass
index 7f522a1ed3..aa04c70ca6 100644
--- a/meta/classes-recipe/gobject-introspection-data.bbclass
+++ b/meta/classes-recipe/gobject-introspection-data.bbclass
@@ -11,8 +11,3 @@
# so that qemu use can be avoided when necessary.
GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
-
-do_compile:prepend() {
- # This prevents g-ir-scanner from writing cache data to $HOME
- export GI_SCANNER_DISABLE_CACHE=1
-}
diff --git a/meta/classes-recipe/gobject-introspection.bbclass b/meta/classes-recipe/gobject-introspection.bbclass
index 0c7b7d200a..d0052cd623 100644
--- a/meta/classes-recipe/gobject-introspection.bbclass
+++ b/meta/classes-recipe/gobject-introspection.bbclass
@@ -35,12 +35,10 @@ EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('G
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS:append:class-target = " gobject-introspection gobject-introspection-native qemu-native"
+DEPENDS:append:class-target = " ${@bb.utils.contains('GI_DATA_ENABLED', 'True', 'gobject-introspection qemu-native', '', d)}"
-# Even though introspection is disabled on -native, gobject-introspection package is still
-# needed for m4 macros.
-DEPENDS:append:class-native = " gobject-introspection-native"
-DEPENDS:append:class-nativesdk = " gobject-introspection-native"
+# Even when introspection is disabled, the gobject-introspection package is still needed for m4 macros.
+DEPENDS:append = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
@@ -49,7 +47,12 @@ do_configure:prepend:class-target () {
# introspection.m4 pre-packaged with upstream tarballs does not yet
# have our fixes
mkdir -p ${S}/m4
- cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
+ cp ${STAGING_DIR_NATIVE}/${datadir}/aclocal/introspection.m4 ${S}/m4
+}
+
+do_compile:prepend() {
+ # This prevents g-ir-scanner from writing cache data to $HOME
+ export GI_SCANNER_DISABLE_CACHE=1
}
# .typelib files are needed at runtime and so they go to the main package (so
diff --git a/meta/classes-recipe/gtk-doc.bbclass b/meta/classes-recipe/gtk-doc.bbclass
index 68fa2cc745..9d3911966b 100644
--- a/meta/classes-recipe/gtk-doc.bbclass
+++ b/meta/classes-recipe/gtk-doc.bbclass
@@ -25,36 +25,19 @@ GTKDOC_MESON_ENABLE_FLAG ?= 'true'
GTKDOC_MESON_DISABLE_FLAG ?= 'false'
# Auto enable/disable based on GTKDOC_ENABLED
-EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
+EXTRA_OECONF:prepend = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
'--disable-gtk-doc', d)} "
-EXTRA_OEMESON:prepend:class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
-
-# When building native recipes, disable gtkdoc, as it is not necessary,
-# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF:prepend:class-native = "--disable-gtk-doc "
-EXTRA_OECONF:prepend:class-nativesdk = "--disable-gtk-doc "
-EXTRA_OEMESON:prepend:class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON:prepend:class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON:prepend = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
DEPENDS:append = " gtk-doc-native"
-# The documentation directory, where the infrastructure will be copied.
-# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
-GTKDOC_DOCDIR ?= "${S}"
-
export STAGING_DIR_HOST
inherit python3native pkgconfig qemu
DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-do_configure:prepend () {
- # Need to use ||true as this is only needed if configure.ac both exists
- # and uses GTK_DOC_CHECK.
- gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
-}
-
do_compile:prepend:class-target () {
if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
diff --git a/meta/classes-recipe/gtk-icon-cache.bbclass b/meta/classes-recipe/gtk-icon-cache.bbclass
index 17c7eb7a33..9ecb49916c 100644
--- a/meta/classes-recipe/gtk-icon-cache.bbclass
+++ b/meta/classes-recipe/gtk-icon-cache.bbclass
@@ -9,7 +9,7 @@ FILES:${PN} += "${datadir}/icons/hicolor"
GTKIC_VERSION ??= '3'
GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
-GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
+GTKIC_CMD = "${@ 'gtk4-update-icon-cache' if d.getVar('GTKIC_VERSION') == '4' else 'gtk-update-icon-cache-3.0' }"
#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
#recipes inherit this class require GTK3DISTROFEATURES
diff --git a/meta/classes-recipe/image-artifact-names.bbclass b/meta/classes-recipe/image-artifact-names.bbclass
index 5c4e746b90..bc76ff0e16 100644
--- a/meta/classes-recipe/image-artifact-names.bbclass
+++ b/meta/classes-recipe/image-artifact-names.bbclass
@@ -11,8 +11,21 @@
IMAGE_BASENAME ?= "${PN}"
IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
-IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
-IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
+IMAGE_NAME ?= "${IMAGE_LINK_NAME}${IMAGE_VERSION_SUFFIX}"
+IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}${IMAGE_MACHINE_SUFFIX}${IMAGE_NAME_SUFFIX}"
+
+# This needs to stay in sync with IMAGE_LINK_NAME, but with INITRAMFS_IMAGE instead of IMAGE_BASENAME
+# and without ${IMAGE_NAME_SUFFIX} which all initramfs images should set to empty
+INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}${IMAGE_MACHINE_SUFFIX}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
+
+# The default DEPLOY_DIR_IMAGE is ${MACHINE} directory:
+# meta/conf/bitbake.conf:DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR}/images/${MACHINE}"
+# so many people find it unnecessary to include this suffix to every image
+# stored there, but other people often fetch various images for different
+# MACHINEs to the same downloads directory and then the suffix is very helpful
+# add separate variable for projects to decide which scheme works best for them
+# without understanding the IMAGE_NAME/IMAGE_LINK_NAME structure.
+IMAGE_MACHINE_SUFFIX ??= "-${MACHINE}"
# IMAGE_NAME is the base name for everything produced when building images.
# The actual image that contains the rootfs has an additional suffix (.rootfs
diff --git a/meta/classes-recipe/image-combined-dbg.bbclass b/meta/classes-recipe/image-combined-dbg.bbclass
index dcf1968538..729313739c 100644
--- a/meta/classes-recipe/image-combined-dbg.bbclass
+++ b/meta/classes-recipe/image-combined-dbg.bbclass
@@ -4,7 +4,7 @@
# SPDX-License-Identifier: MIT
#
-IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image; "
+IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image"
combine_dbg_image () {
if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
diff --git a/meta/classes-recipe/image-live.bbclass b/meta/classes-recipe/image-live.bbclass
index 1034acc49e..d2e95ef51c 100644
--- a/meta/classes-recipe/image-live.bbclass
+++ b/meta/classes-recipe/image-live.bbclass
@@ -31,14 +31,14 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
+ ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_').split('.')[0]) if d.getVar('ROOTFS') else ''} \
"
LABELS_LIVE ?= "boot install"
ROOT_LIVE ?= "root=/dev/ram0"
INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
-INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}${IMAGE_MACHINE_SUFFIX}.${@d.getVar('INITRAMFS_FSTYPES').split()[0]}"
LIVE_ROOTFS_TYPE ?= "ext4"
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
@@ -260,6 +260,5 @@ python do_bootimg() {
bb.build.exec_func('create_symlinks', d)
}
do_bootimg[subimages] = "hddimg iso"
-do_bootimg[imgsuffix] = "."
addtask bootimg before do_image_complete after do_rootfs
diff --git a/meta/classes-recipe/image.bbclass b/meta/classes-recipe/image.bbclass
index e387645503..28be6c6362 100644
--- a/meta/classes-recipe/image.bbclass
+++ b/meta/classes-recipe/image.bbclass
@@ -14,18 +14,19 @@ ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
# in the non-Linux SDK_OS case, such as mingw32
-IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
+inherit populate_sdk_base
+IMGCLASSES += "${@['', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
IMGCLASSES += "image_types_wic"
IMGCLASSES += "rootfs-postcommands"
IMGCLASSES += "image-postinst-intercepts"
IMGCLASSES += "overlayfs-etc"
-inherit ${IMGCLASSES}
+inherit_defer ${IMGCLASSES}
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
-POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
+POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks"
LICENSE ?= "MIT"
PACKAGES = ""
@@ -96,6 +97,7 @@ USE_DEPMOD ?= "1"
PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
+SSTATE_ARCHS_TUNEPKG = "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
LDCONFIGDEPEND:libc-musl = ""
@@ -120,8 +122,7 @@ def rootfs_command_variables(d):
python () {
variables = rootfs_command_variables(d)
for var in variables:
- if d.getVar(var, False):
- d.setVarFlag(var, 'func', '1')
+ d.setVarFlag(var, 'vardeps', d.getVar(var))
}
def rootfs_variables(d):
@@ -182,8 +183,7 @@ python () {
IMAGE_POSTPROCESS_COMMAND ?= ""
-# some default locales
-IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
+IMAGE_LINGUAS ??= ""
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
@@ -203,6 +203,7 @@ fakeroot python do_rootfs () {
from oe.rootfs import create_rootfs
from oe.manifest import create_manifest
import logging
+ import oe.packagedata
logger = d.getVar('BB_TASK_LOGGER', False)
if logger:
@@ -247,9 +248,9 @@ fakeroot python do_rootfs () {
# otherwise, the multilib renaming could step in and squash any fixups that
# may have occurred.
pn = d.getVar('PN')
- runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
- runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
- runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
+ oe.packagedata.runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
+ oe.packagedata.runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
+ oe.packagedata.runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
# Generate the initial manifest
create_manifest(d)
@@ -319,7 +320,7 @@ fakeroot python do_image_qa () {
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -446,7 +447,7 @@ python () {
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
localdata.delVar('IMAGE_VERSION_SUFFIX')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude') or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
@@ -480,14 +481,14 @@ python () {
if subimage not in subimages:
subimages.append(subimage)
if type not in alltypes:
- rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
+ rm_tmp_images.add(localdata.expand("${IMAGE_NAME}.${type}"))
for bt in basetypes[t]:
gen_conversion_cmds(bt)
localdata.setVar('type', realt)
if t not in alltypes:
- rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
+ rm_tmp_images.add(localdata.expand("${IMAGE_NAME}.${type}"))
else:
subimages.append(realt)
@@ -594,13 +595,12 @@ python create_symlinks() {
manifest_name = d.getVar('IMAGE_MANIFEST')
taskname = d.getVar("BB_CURRENTTASK")
subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
- imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
if not link_name:
return
for type in subimages:
dst = os.path.join(deploy_dir, link_name + "." + type)
- src = img_name + imgsuffix + type
+ src = img_name + "." + type
if os.path.exists(os.path.join(deploy_dir, src)):
bb.note("Creating symlink: %s -> %s" % (dst, src))
if os.path.islink(dst):
@@ -610,7 +610,7 @@ python create_symlinks() {
bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
}
-MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
+MULTILIBRE_ALLOW_REP += "${base_bindir} ${base_sbindir} ${bindir} ${sbindir} ${libexecdir} ${sysconfdir} ${nonarch_base_libdir}/udev /lib/modules/[^/]*/modules.*"
MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
@@ -658,8 +658,8 @@ create_merged_usr_symlinks_sdk() {
create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
}
-ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
-POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
+ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs', '',d)}"
+POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk', '',d)}"
reproducible_final_image_task () {
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
@@ -679,6 +679,6 @@ systemd_preset_all () {
fi
}
-IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
+IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task "
CVE_PRODUCT = ""
diff --git a/meta/classes-recipe/image_types.bbclass b/meta/classes-recipe/image_types.bbclass
index a731e585b2..2f948ecbf8 100644
--- a/meta/classes-recipe/image_types.bbclass
+++ b/meta/classes-recipe/image_types.bbclass
@@ -54,17 +54,19 @@ def imagetypes_getdepends(d):
# Sort the set so that ordering is consistant
return " ".join(sorted(deps))
-XZ_COMPRESSION_LEVEL ?= "-9"
+XZ_COMPRESSION_LEVEL ?= "-6"
XZ_INTEGRITY_CHECK ?= "crc32"
ZIP_COMPRESSION_LEVEL ?= "-9"
-ZSTD_COMPRESSION_LEVEL ?= "-3"
+7ZIP_COMPRESSION_LEVEL ?= "9"
+7ZIP_COMPRESSION_METHOD ?= "BZip2"
+7ZIP_EXTENSION ?= "7z"
JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}.jffs2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
+IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.cramfs ${EXTRA_IMAGECMD}"
oe_mkext234fs () {
fstype=$1
@@ -84,14 +86,14 @@ oe_mkext234fs () {
eval COUNT=\"$MIN_COUNT\"
fi
# Create a sparse image block
- bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
+ bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
- bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
- bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
- mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
+ bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype`"
+ bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype -d ${IMAGE_ROOTFS}"
+ mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype -d ${IMAGE_ROOTFS}
# Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
- fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
+ fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype || [ $? -le 3 ]
}
IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
@@ -105,28 +107,49 @@ IMAGE_CMD:btrfs () {
size=${MIN_BTRFS_SIZE}
bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
fi
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
- mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.btrfs seek=${size} count=0 bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.btrfs
}
-IMAGE_CMD:squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD:squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD:squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
-IMAGE_CMD:squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
-IMAGE_CMD:squashfs-zst = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-zst ${EXTRA_IMAGECMD} -noappend -comp zstd"
+oe_mksquashfs () {
+ local comp=$1; shift
+ local extra_imagecmd="$@"
-IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
-IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
-IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
+ if [ "$comp" = "zstd" ]; then
+ suffix="zst"
+ fi
+ # Use the bitbake reproducible timestamp instead of the hardcoded squashfs one
+ export SOURCE_DATE_EPOCH=$(stat -c '%Y' ${IMAGE_ROOTFS})
+ mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.squashfs${comp:+-}${suffix:-$comp} -noappend ${comp:+-comp }$comp $extra_imagecmd
+}
+IMAGE_CMD:squashfs = "oe_mksquashfs '' ${EXTRA_IMAGECMD}"
+IMAGE_CMD:squashfs-xz = "oe_mksquashfs xz ${EXTRA_IMAGECMD}"
+IMAGE_CMD:squashfs-lzo = "oe_mksquashfs lzo ${EXTRA_IMAGECMD}"
+IMAGE_CMD:squashfs-lz4 = "oe_mksquashfs lz4 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:squashfs-zst = "oe_mksquashfs zstd ${EXTRA_IMAGECMD}"
+
+IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs-lz4 ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs-lz4hc ${IMAGE_ROOTFS}"
+
+# Note that vfat can't handle all types of files that a real linux file system
+# can (e.g. device files, symlinks, etc.) and therefore it not suitable for all
+# use cases
+oe_mkvfatfs () {
+ mkfs.vfat $@ -C ${IMGDEPLOYDIR}/${IMAGE_NAME}.vfat ${ROOTFS_SIZE}
+ mcopy -i "${IMGDEPLOYDIR}/${IMAGE_NAME}.vfat" -vsmpQ ${IMAGE_ROOTFS}/* ::/
+}
+
+IMAGE_CMD:vfat = "oe_mkvfatfs ${EXTRA_IMAGECMD}"
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD:cpio () {
- (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}.cpio)
# We only need the /init symlink if we're building the real
# image. The -dbg image doesn't need it! By being clever
# about this we also avoid 'touch' below failing, as it
@@ -136,10 +159,11 @@ IMAGE_CMD:cpio () {
if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
ln -sf /sbin/init ${WORKDIR}/cpio_append/init
+ touch -h -r ${IMAGE_ROOTFS}/sbin/init ${WORKDIR}/cpio_append/init
else
- touch ${WORKDIR}/cpio_append/init
+ touch -r ${IMAGE_ROOTFS} ${WORKDIR}/cpio_append/init
fi
- (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ (cd ${WORKDIR}/cpio_append && echo ./init | cpio --reproducible -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}.cpio)
fi
fi
}
@@ -149,16 +173,12 @@ UBI_VOLTYPE ?= "dynamic"
UBI_IMGTYPE ?= "ubifs"
write_ubi_config() {
- if [ -z "$1" ]; then
- local vname=""
- else
- local vname="_$1"
- fi
+ local vname="$1"
cat <<EOF > ubinize${vname}-${IMAGE_NAME}.cfg
[ubifs]
mode=ubi
-image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE}
+image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.${UBI_IMGTYPE}
vol_id=0
vol_type=${UBI_VOLTYPE}
vol_name=${UBI_VOLNAME}
@@ -175,12 +195,17 @@ multiubi_mkfs() {
bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
fi
- write_ubi_config "$3"
+ if [ -z "$3" ]; then
+ local vname=""
+ else
+ local vname="_$3"
+ fi
+ write_ubi_config "${vname}"
if [ -n "$vname" ]; then
- mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.ubifs ${mkubifs_args}
fi
- ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
+ ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
# Cleanup cfg file
mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
@@ -188,19 +213,22 @@ multiubi_mkfs() {
# Create own symlinks for 'named' volumes
if [ -n "$vname" ]; then
cd ${IMGDEPLOYDIR}
- if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
- ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
+ if [ -e ${IMAGE_NAME}${vname}.ubifs ]; then
+ ln -sf ${IMAGE_NAME}${vname}.ubifs \
${IMAGE_LINK_NAME}${vname}.ubifs
fi
- if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
- ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
+ if [ -e ${IMAGE_NAME}${vname}.ubi ]; then
+ ln -sf ${IMAGE_NAME}${vname}.ubi \
${IMAGE_LINK_NAME}${vname}.ubi
fi
cd -
fi
}
+MULTIUBI_ARGS = "MKUBIFS_ARGS UBINIZE_ARGS"
+
IMAGE_CMD:multiubi () {
+ ${@' '.join(['%s_%s="%s";' % (arg, name, d.getVar('%s_%s' % (arg, name))) for arg in d.getVar('MULTIUBI_ARGS').split() for name in d.getVar('MULTIUBI_BUILD').split()])}
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
for name in ${MULTIUBI_BUILD}; do
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
@@ -215,7 +243,7 @@ IMAGE_CMD:ubi () {
}
IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
-IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.ubifs ${MKUBIFS_ARGS}"
MIN_F2FS_SIZE ?= "524288"
IMAGE_CMD:f2fs () {
@@ -229,9 +257,9 @@ IMAGE_CMD:f2fs () {
size=${MIN_F2FS_SIZE}
bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
fi
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
- mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
- sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs seek=${size} count=0 bs=1024
+ mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs
+ sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs
}
EXTRA_IMAGECMD = ""
@@ -249,6 +277,10 @@ EXTRA_IMAGECMD:ext4 ?= "-i 4096"
EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
EXTRA_IMAGECMD:f2fs ?= ""
+# If a specific FAT size is needed, set it here (e.g. "-F 32"/"-F 16"/"-F 12")
+# otherwise mkfs.vfat will automatically pick one.
+EXTRA_IMAGECMD:vfat ?= ""
+
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
@@ -268,6 +300,7 @@ do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot"
do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot"
do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot"
+do_image_vfat[depends] += "dosfstools-native:do_populate_sysroot mtools-native:do_populate_sysroot"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = " \
@@ -277,6 +310,7 @@ IMAGE_TYPES = " \
ext3 ext3.gz \
ext4 ext4.gz \
btrfs \
+ vfat \
squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
ubi ubifs multiubi \
tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
@@ -296,32 +330,33 @@ IMAGE_TYPES:append:x86-64 = " hddimg iso"
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
-CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
-CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD:zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
-CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
-CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
-CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
-CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
-CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
-CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
-CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
-CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
-CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
-CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
-CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
-CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
-CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
-CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
-CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
-CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip 7zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
+CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}.${type}"
+CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.gz"
+CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}.${type}"
+CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.xz"
+CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.lz4"
+CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}.${type}"
+CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}.${type}.zip ${IMAGE_NAME}.${type}"
+CONVERSION_CMD:7zip = "7za a -mx=${7ZIP_COMPRESSION_LEVEL} -mm=${7ZIP_COMPRESSION_METHOD} ${IMAGE_NAME}.${type}.${7ZIP_EXTENSION} ${IMAGE_NAME}.${type}"
+CONVERSION_CMD:zst = "zstd -f -k -c ${ZSTD_DEFAULTS} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.zst"
+CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}.${type} -o ${IMAGE_NAME}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.md5sum"
+CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha1sum"
+CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha224sum"
+CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha256sum"
+CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha384sum"
+CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha512sum"
+CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}.${type} -o ${IMAGE_NAME}.${type}.bmap"
+CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.u-boot"
+CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vmdk"
+CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vhdx"
+CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vhd"
+CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vdi"
+CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.qcow2"
+CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.base64"
+CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}.${type}"
+CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}.${type}"
CONVERSION_DEPENDS_lzma = "xz-native"
CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
@@ -329,9 +364,10 @@ CONVERSION_DEPENDS_xz = "xz-native"
CONVERSION_DEPENDS_lz4 = "lz4-native"
CONVERSION_DEPENDS_lzo = "lzop-native"
CONVERSION_DEPENDS_zip = "zip-native"
+CONVERSION_DEPENDS_7zip = "p7zip-native"
CONVERSION_DEPENDS_zst = "zstd-native"
CONVERSION_DEPENDS_sum = "mtd-utils-native"
-CONVERSION_DEPENDS_bmap = "bmap-tools-native"
+CONVERSION_DEPENDS_bmap = "bmaptool-native"
CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
CONVERSION_DEPENDS_vmdk = "qemu-system-native"
CONVERSION_DEPENDS_vdi = "qemu-system-native"
diff --git a/meta/classes-recipe/image_types_wic.bbclass b/meta/classes-recipe/image_types_wic.bbclass
index c339b9bdfb..cf3be909b3 100644
--- a/meta/classes-recipe/image_types_wic.bbclass
+++ b/meta/classes-recipe/image_types_wic.bbclass
@@ -38,7 +38,7 @@ WICVARS ?= "\
TARGET_SYS \
"
-inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
+inherit_defer ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
@@ -71,7 +71,24 @@ IMAGE_CMD:wic () {
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
- mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
+
+ # look to see if the user specifies a custom imager
+ IMAGER=direct
+ eval set -- "${WIC_CREATE_EXTRA_ARGS} --"
+ while [ 1 ]; do
+ case "$1" in
+ --imager|-i)
+ shift
+ IMAGER=$1
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+ mv "$build_wic/$(basename "${wks%.wks}")"*.${IMAGER} "$out.wic"
}
IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
@@ -89,9 +106,9 @@ do_image_wic[recrdeptask] += "do_deploy"
do_image_wic[deptask] += "do_image_complete"
WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
-WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
+WKS_FILE_DEPENDS_DEFAULT += "bmaptool-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
# Unified kernel images need objcopy
-WKS_FILE_DEPENDS_DEFAULT += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils"
+WKS_FILE_DEPENDS_DEFAULT += "virtual/${TARGET_PREFIX}binutils"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
diff --git a/meta/classes-recipe/kernel-arch.bbclass b/meta/classes-recipe/kernel-arch.bbclass
index 6f5d3bde6c..b32f6137a2 100644
--- a/meta/classes-recipe/kernel-arch.bbclass
+++ b/meta/classes-recipe/kernel-arch.bbclass
@@ -19,6 +19,7 @@ valid_archs = "alpha cris ia64 \
sh sh64 um h8300 \
parisc s390 v850 \
avr32 blackfin \
+ loongarch64 \
microblaze \
nios2 arc riscv xtensa"
@@ -34,6 +35,7 @@ def map_kernel_arch(a, d):
elif re.match('aarch64_be$', a): return 'arm64'
elif re.match('aarch64_ilp32$', a): return 'arm64'
elif re.match('aarch64_be_ilp32$', a): return 'arm64'
+ elif re.match('loongarch(32|64|)$', a): return 'loongarch'
elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
elif re.match('mcf', a): return 'm68k'
elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
@@ -66,9 +68,13 @@ TARGET_LD_KERNEL_ARCH ?= ""
HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
+TARGET_OBJCOPY_KERNEL_ARCH ?= ""
+HOST_OBJCOPY_KERNEL_ARCH ?= "${TARGET_OBJCOPY_KERNEL_ARCH}"
KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
-KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
-KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
-
+KERNEL_LD = "${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
+KERNEL_AR = "${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
+KERNEL_OBJCOPY = "${HOST_PREFIX}objcopy ${HOST_OBJCOPY_KERNEL_ARCH}"
+# Code in package.py can't handle options on KERNEL_STRIP
+KERNEL_STRIP = "${HOST_PREFIX}strip"
+TOOLCHAIN ?= "gcc"
diff --git a/meta/classes-recipe/kernel-artifact-names.bbclass b/meta/classes-recipe/kernel-artifact-names.bbclass
index 311075c68d..1a7611a15e 100644
--- a/meta/classes-recipe/kernel-artifact-names.bbclass
+++ b/meta/classes-recipe/kernel-artifact-names.bbclass
@@ -12,7 +12,7 @@
inherit image-artifact-names
-KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}${IMAGE_MACHINE_SUFFIX}${IMAGE_VERSION_SUFFIX}"
KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
diff --git a/meta/classes-recipe/kernel-devicetree.bbclass b/meta/classes-recipe/kernel-devicetree.bbclass
index b2117de805..eff052b402 100644
--- a/meta/classes-recipe/kernel-devicetree.bbclass
+++ b/meta/classes-recipe/kernel-devicetree.bbclass
@@ -12,7 +12,12 @@ python () {
d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
}
-FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+# recursivly search for devicetree files
+FILES:${KERNEL_PACKAGE_NAME}-devicetree = " \
+ /${KERNEL_DTBDEST}/**/*.dtb \
+ /${KERNEL_DTBDEST}/**/*.dtbo \
+"
+
FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
@@ -68,17 +73,21 @@ do_compile:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}" ${KERNEL_EXTRA_ARGS}
done
}
do_install:append() {
+ install -d ${D}/${KERNEL_DTBDEST}
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- dtb_ext=${dtb##*.}
- dtb_base_name=`basename $dtb .$dtb_ext`
dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
- install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ dtb_ext=${dtb##*.}
+ dtb_base_name=`basename $dtb .$dtb_ext`
+ dtb=$dtb_base_name.$dtb_ext
+ fi
+ install -Dm 0644 $dtb_path ${D}/${KERNEL_DTBDEST}/$dtb
done
}
@@ -88,28 +97,39 @@ do_deploy:append() {
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
install -d $deployDir
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ dtb=$dtb_base_name.$dtb_ext
+ fi
+ install -m 0644 ${D}/${KERNEL_DTBDEST}/$dtb $deployDir/$dtb_base_name.$dtb_ext
+ if [ -n "${KERNEL_DTB_NAME}" ] ; then
+ ln -sf $dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
fi
if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ ln -sf $dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
fi
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
cat ${D}/${KERNEL_IMAGEDEST}/$type \
- $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ $deployDir/$dtb_base_name.$dtb_ext \
+ > $deployDir/$type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_NAME}" ]; then
+ ln -sf $type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
- ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ ln -sf $type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
$deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
fi
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
- $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ $deployDir/$dtb_base_name.$dtb_ext \
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_NAME}" ]; then
+ ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
- ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
$deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
fi
fi
diff --git a/meta/classes-recipe/kernel-fitimage.bbclass b/meta/classes-recipe/kernel-fitimage.bbclass
index 838ce204cb..4b74ddc201 100644
--- a/meta/classes-recipe/kernel-fitimage.bbclass
+++ b/meta/classes-recipe/kernel-fitimage.bbclass
@@ -4,7 +4,7 @@
# SPDX-License-Identifier: MIT
#
-inherit kernel-uboot kernel-artifact-names uboot-sign
+inherit kernel-uboot kernel-artifact-names uboot-config
def get_fit_replacement_type(d):
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
@@ -50,21 +50,37 @@ python __anonymous () {
d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
-
- # Verified boot will sign the fitImage and append the public key to
- # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
- # the fitImage:
- if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
- d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
- if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
- d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Description string
FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
+# Kernel fitImage Hash Algo
+FIT_HASH_ALG ?= "sha256"
+
+# Kernel fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+
+# Kernel / U-Boot fitImage Padding Algo
+FIT_PAD_ALG ?= "pkcs-1.5"
+
+# Generate keys for signing Kernel fitImage
+FIT_GENERATE_KEYS ?= "0"
+
+# Size of private keys in number of bits
+FIT_SIGN_NUMBITS ?= "2048"
+
+# args to openssl genrsa (Default is just the public exponent)
+FIT_KEY_GENRSA_ARGS ?= "-F4"
+
+# args to openssl req (Default is -batch for non interactive mode and
+# -new for new certificate)
+FIT_KEY_REQ_ARGS ?= "-batch -new"
+
+# Standard format for public key certificate
+FIT_KEY_SIGN_PKCS ?= "-x509"
+
# Sign individual images as well
FIT_SIGN_INDIVIDUAL ?= "0"
@@ -73,6 +89,13 @@ FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
FIT_SUPPORTED_INITRAMFS_FSTYPES ?= "cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio"
+# Allow user to select the default DTB for FIT image when multiple dtb's exists.
+FIT_CONF_DEFAULT_DTB ?= ""
+
+# length of address in number of <u32> cells
+# ex: 1 32bits address, 2 64bits address
+FIT_ADDRESS_CELLS ?= "1"
+
# Keys used to sign individually image nodes.
# The keys to sign image nodes must be different from those used to sign
# configuration nodes, otherwise the "required" property, from
@@ -91,7 +114,7 @@ fitimage_emit_fit_header() {
/ {
description = "${FIT_DESC}";
- #address-cells = <1>;
+ #address-cells = <${FIT_ADDRESS_CELLS}>;
EOF
}
@@ -339,6 +362,27 @@ EOF
}
#
+# echoes symlink destination if it points below directory
+#
+# $1 ... file that's a potential symlink
+# $2 ... expected parent directory
+symlink_points_below() {
+ file="$2/$1"
+ dir=$2
+
+ if ! [ -L "$file" ]; then
+ return
+ fi
+
+ realpath="$(realpath --relative-to=$dir $file)"
+ if [ -z "${realpath%%../*}" ]; then
+ return
+ fi
+
+ echo "$realpath"
+}
+
+#
# Emit the fitImage ITS configuration section
#
# $1 ... .its filename
@@ -348,6 +392,7 @@ EOF
# $5 ... u-boot script ID
# $6 ... config ID
# $7 ... default flag
+# $8 ... default DTB image name
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
@@ -364,6 +409,7 @@ fitimage_emit_section_config() {
bootscr_id="$5"
config_id="$6"
default_flag="$7"
+ default_dtb_image="$8"
# Test if we have any DTBs at all
sep=""
@@ -375,6 +421,23 @@ fitimage_emit_section_config() {
bootscr_line=""
setup_line=""
default_line=""
+ compatible_line=""
+
+ dtb_image_sect=$(symlink_points_below $dtb_image "${EXTERNAL_KERNEL_DEVICETREE}")
+ if [ -z "$dtb_image_sect" ]; then
+ dtb_image_sect=$dtb_image
+ fi
+
+ dtb_path="${EXTERNAL_KERNEL_DEVICETREE}/${dtb_image_sect}"
+ if [ -e "$dtb_path" ]; then
+ compat=$(fdtget -t s "$dtb_path" / compatible | sed 's/ /", "/g')
+ if [ -n "$compat" ]; then
+ compatible_line="compatible = \"$compat\";"
+ fi
+ fi
+
+ dtb_image=$(echo $dtb_image | tr '/' '_')
+ dtb_image_sect=$(echo "${dtb_image_sect}" | tr '/' '_')
# conf node name is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
@@ -393,7 +456,7 @@ fitimage_emit_section_config() {
if [ -n "$dtb_image" ]; then
conf_desc="$conf_desc${sep}FDT blob"
sep=", "
- fdt_line="fdt = \"fdt-$dtb_image\";"
+ fdt_line="fdt = \"fdt-$dtb_image_sect\";"
fi
if [ -n "$ramdisk_id" ]; then
@@ -417,7 +480,13 @@ fitimage_emit_section_config() {
# default node is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
if [ -n "$dtb_image" ]; then
- default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ # Select default node as user specified dtb when
+ # multiple dtb exists.
+ if [ -n "$default_dtb_image" ]; then
+ default_line="default = \"${FIT_CONF_PREFIX}$default_dtb_image\";"
+ else
+ default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ fi
else
default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
fi
@@ -427,6 +496,7 @@ fitimage_emit_section_config() {
$default_line
$conf_node {
description = "$default_flag $conf_desc";
+ $compatible_line
$kernel_line
$fdt_line
$ramdisk_line
@@ -496,6 +566,7 @@ fitimage_assemble() {
ramdiskcount=$3
setupcount=""
bootscr_id=""
+ default_dtb_image=""
rm -f $1 arch/${ARCH}/boot/$2
if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
@@ -529,26 +600,60 @@ fitimage_assemble() {
continue
fi
- DTB_PATH="arch/${ARCH}/boot/dts/$DTB"
+ DTB_PATH="${KERNEL_OUTPUT_DIR}/dts/$DTB"
if [ ! -e "$DTB_PATH" ]; then
- DTB_PATH="arch/${ARCH}/boot/$DTB"
+ DTB_PATH="${KERNEL_OUTPUT_DIR}/$DTB"
+ fi
+
+ # Strip off the path component from the filename
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ DTB=`basename $DTB`
+ fi
+
+ # Set the default dtb image if it exists in the devicetree.
+ if [ ${FIT_CONF_DEFAULT_DTB} = $DTB ];then
+ default_dtb_image=$(echo "$DTB" | tr '/' '_')
fi
DTB=$(echo "$DTB" | tr '/' '_')
+
+ # Skip DTB if we've picked it up previously
+ echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
+
DTBS="$DTBS $DTB"
+ DTB=$(echo $DTB | tr '/' '_')
fitimage_emit_section_dtb $1 $DTB $DTB_PATH
done
fi
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
dtbcount=1
- for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
+ for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtb' -printf '%P\n' | sort) \
+ $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtbo' -printf '%P\n' | sort); do
+ # Set the default dtb image if it exists in the devicetree.
+ if [ ${FIT_CONF_DEFAULT_DTB} = $DTB ];then
+ default_dtb_image=$(echo "$DTB" | tr '/' '_')
+ fi
+
DTB=$(echo "$DTB" | tr '/' '_')
+
+ # Skip DTB/DTBO if we've picked it up previously
+ echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
+
DTBS="$DTBS $DTB"
+
+ # Also skip if a symlink. We'll later have each config section point at it
+ [ $(symlink_points_below $DTB "${EXTERNAL_KERNEL_DEVICETREE}") ] && continue
+
+ DTB=$(echo $DTB | tr '/' '_')
fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
done
fi
+ if [ -n "${FIT_CONF_DEFAULT_DTB}" ] && [ -z $default_dtb_image ]; then
+ bbwarn "${FIT_CONF_DEFAULT_DTB} is not available in the list of device trees."
+ fi
+
#
# Step 3: Prepare a u-boot script section
#
@@ -566,9 +671,9 @@ fitimage_assemble() {
#
# Step 4: Prepare a setup section. (For x86)
#
- if [ -e arch/${ARCH}/boot/setup.bin ]; then
+ if [ -e ${KERNEL_OUTPUT_DIR}/setup.bin ]; then
setupcount=1
- fitimage_emit_section_setup $1 $setupcount arch/${ARCH}/boot/setup.bin
+ fitimage_emit_section_setup $1 $setupcount ${KERNEL_OUTPUT_DIR}/setup.bin
fi
#
@@ -621,15 +726,15 @@ fitimage_assemble() {
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
if [ "$dtb_ext" = "dtbo" ]; then
- fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`"
+ fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`" "$default_dtb_image"
else
- fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`"
+ fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`" "$default_dtb_image"
fi
i=`expr $i + 1`
done
else
defaultconfigcount=1
- fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount
+ fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount "$default_dtb_image"
fi
fitimage_emit_section_maint $1 sectend
@@ -642,24 +747,16 @@ fitimage_assemble() {
${UBOOT_MKIMAGE} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-f $1 \
- arch/${ARCH}/boot/$2
+ ${KERNEL_OUTPUT_DIR}/$2
#
- # Step 8: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
- add_key_to_u_boot=""
- if [ -n "${UBOOT_DTB_BINARY}" ]; then
- # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
- # both of them, and don't dereference the symlink.
- cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
- add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
- fi
${UBOOT_MKIMAGE_SIGN} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
- $add_key_to_u_boot \
- -r arch/${ARCH}/boot/$2 \
+ -r ${KERNEL_OUTPUT_DIR}/$2 \
${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
@@ -667,18 +764,30 @@ fitimage_assemble() {
do_assemble_fitimage() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
cd ${B}
- fitimage_assemble fit-image.its fitImage ""
+ fitimage_assemble fit-image.its fitImage-none ""
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ ln -sf fitImage-none ${B}/${KERNEL_OUTPUT_DIR}/fitImage
+ fi
fi
}
addtask assemble_fitimage before do_install after do_compile
+SYSROOT_DIRS:append = " /sysroot-only"
+do_install:append() {
+ if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
+ [ "${UBOOT_SIGN_ENABLE}" = "1" ]; then
+ install -D ${B}/${KERNEL_OUTPUT_DIR}/fitImage-none ${D}/sysroot-only/fitImage
+ fi
+}
+
do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-bundle ""
+ ln -sf fitImage-bundle ${B}/${KERNEL_OUTPUT_DIR}/fitImage
else
fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
fi
@@ -762,42 +871,11 @@ kernel_do_deploy:append() {
if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
bbnote "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
+ install -m 0644 ${B}/${KERNEL_OUTPUT_DIR}/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
fi
fi
fi
fi
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
- [ -n "${UBOOT_DTB_BINARY}" ] ; then
- # UBOOT_DTB_IMAGE is a realfile, but we can't use
- # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
- # for u-boot, but we are in kernel env now.
- install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
- fi
- if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${UBOOT_BINARY}" -a -n "${SPL_DTB_BINARY}" ] ; then
- # If we're also creating and/or signing the uboot fit, now we need to
- # deploy it, it's its file, as well as u-boot-spl.dtb
- install -m 0644 ${B}/u-boot-spl-${MACHINE}*.dtb "$deployDir/"
- bbnote "Copying u-boot-fitImage file..."
- install -m 0644 ${B}/u-boot-fitImage-* "$deployDir/"
- bbnote "Copying u-boot-its file..."
- install -m 0644 ${B}/u-boot-its-* "$deployDir/"
- fi
-}
-
-# The function below performs the following in case of initramfs bundles:
-# - Removes do_assemble_fitimage. FIT generation is done through
-# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
-# and should not be part of the tasks to be executed.
-# - Since do_kernel_generate_rsa_keys is inserted by default
-# between do_compile and do_assemble_fitimage, this is
-# not suitable in case of initramfs bundles. do_kernel_generate_rsa_keys
-# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
-python () {
- if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
- bb.build.deltask('do_assemble_fitimage', d)
- bb.build.deltask('kernel_generate_rsa_keys', d)
- bb.build.addtask('kernel_generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
}
diff --git a/meta/classes-recipe/kernel-module-split.bbclass b/meta/classes-recipe/kernel-module-split.bbclass
index 1b4c864a63..9487365eb7 100644
--- a/meta/classes-recipe/kernel-module-split.bbclass
+++ b/meta/classes-recipe/kernel-module-split.bbclass
@@ -18,7 +18,7 @@ pkg_postrm:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
- depmodwrapper -a -b $D ${KERNEL_VERSION}
+ depmodwrapper -a -b $D ${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME}
fi
}
@@ -30,12 +30,11 @@ fi
PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
-do_install:append() {
- install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
-}
+modulesloaddir ??= "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '${nonarch_libdir}', '${sysconfdir}', d)}/modules-load.d"
+modprobedir ??= "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '${nonarch_base_libdir}', '${sysconfdir}', d)}/modprobe.d"
KERNEL_SPLIT_MODULES ?= "1"
-PACKAGESPLITFUNCS:prepend = "split_kernel_module_packages "
+PACKAGESPLITFUNCS =+ "split_kernel_module_packages"
KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
@@ -68,14 +67,13 @@ python split_kernel_module_packages () {
else:
msg = "Cannot decompress '%s'" % file
raise msg
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
+ cmd = "%s -j .modinfo -O binary %s %s" % (d.getVar("OBJCOPY"), tmpkofile, tmpfile)
else:
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
+ cmd = "%s -j .modinfo -O binary %s %s" % (d.getVar("OBJCOPY"), file, tmpfile)
subprocess.check_call(cmd, shell=True)
# errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
- f = open(tmpfile, errors='replace')
- l = f.read().split("\000")
- f.close()
+ with open(tmpfile, errors='replace') as f:
+ l = f.read().split("\000")
os.close(tf[0])
os.unlink(tmpfile)
if compressed:
@@ -93,7 +91,7 @@ python split_kernel_module_packages () {
dvar = d.getVar('PKGD')
- # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
+ # If autoloading is requested, output ${modulesloaddir}/<name>.conf and append
# appropriate modprobe commands to the postinst
autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
autoload = d.getVar('module_autoload_%s' % basename)
@@ -102,14 +100,18 @@ python split_kernel_module_packages () {
if autoload and basename not in autoloadlist:
bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
if basename in autoloadlist:
- name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
- f = open(name, 'w')
- if autoload:
- for m in autoload.split():
- f.write('%s\n' % m)
- else:
- f.write('%s\n' % basename)
- f.close()
+ conf = '%s/%s.conf' % (d.getVar('modulesloaddir'), basename)
+ name = '%s%s' % (dvar, conf)
+ os.makedirs(os.path.dirname(name), exist_ok=True)
+ with open(name, 'w') as f:
+ if autoload:
+ for m in autoload.split():
+ f.write('%s\n' % m)
+ else:
+ f.write('%s\n' % basename)
+ conf2append = ' %s' % conf
+ d.appendVar('FILES:%s' % pkg, conf2append)
+ d.appendVar('CONFFILES:%s' % pkg, conf2append)
postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
bb.fatal("pkg_postinst:%s not defined" % pkg)
@@ -120,21 +122,18 @@ python split_kernel_module_packages () {
modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
modconf = d.getVar('module_conf_%s' % basename)
if modconf and basename in modconflist:
- name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
- f = open(name, 'w')
- f.write("%s\n" % modconf)
- f.close()
+ conf = '%s/%s.conf' % (d.getVar('modprobedir'), basename)
+ name = '%s%s' % (dvar, conf)
+ os.makedirs(os.path.dirname(name), exist_ok=True)
+ with open(name, 'w') as f:
+ f.write("%s\n" % modconf)
+ conf2append = ' %s' % conf
+ d.appendVar('FILES:%s' % pkg, conf2append)
+ d.appendVar('CONFFILES:%s' % pkg, conf2append)
+
elif modconf:
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
- files = d.getVar('FILES:%s' % pkg)
- files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
- d.setVar('FILES:%s' % pkg, files)
-
- conffiles = d.getVar('CONFFILES:%s' % pkg)
- conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
- d.setVar('CONFFILES:%s' % pkg, conffiles)
-
if "description" in vals:
old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
@@ -169,8 +168,8 @@ python split_kernel_module_packages () {
postrm = d.getVar('pkg_postrm:modules')
if splitmods != '1':
- etcdir = d.getVar('sysconfdir')
- d.appendVar('FILES:' + metapkg, '%s/modules-load.d/ %s/modprobe.d/ %s/modules/' % (etcdir, etcdir, d.getVar("nonarch_base_libdir")))
+ d.appendVar('FILES:' + metapkg, '%s %s %s/modules' %
+ (d.getVar('modulesloaddir'), d.getVar('modprobedir'), d.getVar("nonarch_base_libdir")))
d.appendVar('pkg_postinst:%s' % metapkg, postinst)
d.prependVar('pkg_postrm:%s' % metapkg, postrm);
return
@@ -184,14 +183,6 @@ python split_kernel_module_packages () {
modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
if modules:
d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
-
- # If modules-load.d and modprobe.d are empty at this point, remove them to
- # avoid warnings. removedirs only raises an OSError if an empty
- # directory cannot be removed.
- dvar = d.getVar('PKGD')
- for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
- if len(os.listdir(dir)) == 0:
- os.rmdir(dir)
}
do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/meta/classes-recipe/kernel-uboot.bbclass b/meta/classes-recipe/kernel-uboot.bbclass
index 4aab02671e..30a85ccc28 100644
--- a/meta/classes-recipe/kernel-uboot.bbclass
+++ b/meta/classes-recipe/kernel-uboot.bbclass
@@ -34,7 +34,7 @@ uboot_prep_kimage() {
linux_comp="${FIT_KERNEL_COMP_ALG}"
fi
- [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
+ [ -n "${vmlinux_path}" ] && ${KERNEL_OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
if [ "${linux_comp}" != "none" ] ; then
if [ "${linux_comp}" = "gzip" ] ; then
diff --git a/meta/classes-recipe/kernel-yocto.bbclass b/meta/classes-recipe/kernel-yocto.bbclass
index 8eda0dcaf3..6d5c3b6327 100644
--- a/meta/classes-recipe/kernel-yocto.bbclass
+++ b/meta/classes-recipe/kernel-yocto.bbclass
@@ -63,7 +63,7 @@ def find_sccs(d):
return sources_list
# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
-# the repository as it will be found in WORKDIR
+# the repository as it will be found in UNPACKDIR
def find_kernel_feature_dirs(d):
feature_dirs=[]
fetch = bb.fetch2.Fetch([], d)
@@ -147,24 +147,24 @@ do_kernel_metadata() {
# from the source tree, into a common location and normalized "defconfig" name,
# where the rest of the process will include and incoroporate it into the build
#
- # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
+ # If the fetcher has already placed a defconfig in UNPACKDIR (from the SRC_URI),
# we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
# precendence.
#
if [ -n "${KBUILD_DEFCONFIG}" ]; then
if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
- if [ -f "${WORKDIR}/defconfig" ]; then
+ if [ -f "${UNPACKDIR}/defconfig" ]; then
# If the two defconfig's are different, warn that we overwrote the
- # one already placed in WORKDIR
- cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
+ # one already placed in UNPACKDIR
+ cmp "${UNPACKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
if [ $? -ne 0 ]; then
- bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
+ bbdebug 1 "detected SRC_URI or patched defconfig in UNPACKDIR. ${KBUILD_DEFCONFIG} copied over it"
fi
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${UNPACKDIR}/defconfig
else
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${UNPACKDIR}/defconfig
fi
- in_tree_defconfig="${WORKDIR}/defconfig"
+ in_tree_defconfig="${UNPACKDIR}/defconfig"
else
bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
fi
@@ -176,12 +176,32 @@ do_kernel_metadata() {
# kernel source tree, where they'll be used later.
check_git_config
patches="${@" ".join(find_patches(d,'kernel-meta'))}"
- for p in $patches; do
+ if [ -n "$patches" ]; then
(
- cd ${WORKDIR}/kernel-meta
- git am -s $p
- )
- done
+ cd ${UNPACKDIR}/kernel-meta
+
+ # take the SRC_URI patches, and create a series file
+ # this is required to support some better processing
+ # of issues with the patches
+ rm -f series
+ for p in $patches; do
+ cp $p .
+ echo "$(basename $p)" >> series
+ done
+
+ # process the series with kgit-s2q, which is what is
+ # handling the rest of the kernel. This allows us
+ # more flexibility for handling failures or advanced
+ # mergeing functinoality
+ message=$(kgit-s2q --gen -v --patches ${UNPACKDIR}/kernel-meta 2>&1)
+ if [ $? -ne 0 ]; then
+ # setup to try the patch again
+ kgit-s2q --prev
+ bberror "Problem applying patches to: ${UNPACKDIR}/kernel-meta"
+ bbfatal_log "\n($message)"
+ fi
+ )
+ fi
fi
sccs_from_src_uri="${@" ".join(find_sccs(d))}"
@@ -212,12 +232,10 @@ do_kernel_metadata() {
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
- includes="$includes -I${WORKDIR}/$f/kernel-meta"
- elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
- includes="$includes -I${WORKDIR}/../oe-local-files/$f"
- elif [ -d "${WORKDIR}/$f" ]; then
- includes="$includes -I${WORKDIR}/$f"
+ if [ -d "${UNPACKDIR}/$f/kernel-meta" ]; then
+ includes="$includes -I${UNPACKDIR}/$f/kernel-meta"
+ elif [ -d "${UNPACKDIR}/$f" ]; then
+ includes="$includes -I${UNPACKDIR}/$f"
fi
done
for s in ${sccs} ${patches}; do
@@ -359,19 +377,19 @@ do_kernel_checkout() {
set +e
source_dir=`echo ${S} | sed 's%/$%%'`
- source_workdir="${WORKDIR}/git"
- if [ -d "${WORKDIR}/git/" ]; then
+ source_workdir="${UNPACKDIR}/git"
+ if [ -d "${UNPACKDIR}/git/" ]; then
# case: git repository
# if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
if [ "${source_dir}" != "${source_workdir}" ]; then
if [ -d "${source_workdir}/.git" ]; then
# regular git repository with .git
rm -rf ${S}
- mv ${WORKDIR}/git ${S}
+ mv ${UNPACKDIR}/git ${S}
else
# create source for bare cloned git repository
git clone ${WORKDIR}/git ${S}
- rm -rf ${WORKDIR}/git
+ rm -rf ${UNPACKDIR}/git
fi
fi
cd ${S}
@@ -408,13 +426,13 @@ do_kernel_checkout() {
git init
check_git_config
git add .
- git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
+ git commit -q -n -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
fi
set -e
}
-do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
+do_kernel_checkout[dirs] = "${S} ${UNPACKDIR}"
addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
@@ -422,6 +440,11 @@ do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
+# ${S} doesn't exist for us at unpack
+do_qa_unpack() {
+ return
+}
+
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
@@ -440,7 +463,7 @@ do_kernel_configme() {
config_flags=""
;;
*)
- if [ -f ${WORKDIR}/defconfig ]; then
+ if [ -f ${UNPACKDIR}/defconfig ]; then
config_flags="-n"
fi
;;
@@ -455,7 +478,7 @@ do_kernel_configme() {
bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
fi
- CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
+ CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
if [ $? -ne 0 -o ! -f ${B}/.config ]; then
bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
@@ -489,6 +512,8 @@ python do_config_analysis() {
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
env['LD'] = d.getVar('KERNEL_LD')
env['CC'] = d.getVar('KERNEL_CC')
+ env['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY')
+ env['STRIP'] = d.getVar('KERNEL_STRIP')
env['ARCH'] = d.getVar('ARCH')
env['srctree'] = s
@@ -506,7 +531,7 @@ python do_config_analysis() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
@@ -514,7 +539,7 @@ python do_config_analysis() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
@@ -550,6 +575,8 @@ python do_kernel_configcheck() {
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
env['LD'] = d.getVar('KERNEL_LD')
env['CC'] = d.getVar('KERNEL_CC')
+ env['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY')
+ env['STRIP'] = d.getVar('KERNEL_STRIP')
env['ARCH'] = d.getVar('ARCH')
env['srctree'] = s
@@ -575,7 +602,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
@@ -597,7 +624,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
@@ -616,7 +643,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
diff --git a/meta/classes-recipe/kernel.bbclass b/meta/classes-recipe/kernel.bbclass
index 3463179395..d6eedf942c 100644
--- a/meta/classes-recipe/kernel.bbclass
+++ b/meta/classes-recipe/kernel.bbclass
@@ -33,7 +33,6 @@ INHIBIT_DEFAULT_DEPS = "1"
KERNEL_IMAGETYPE ?= "zImage"
INITRAMFS_IMAGE ?= ""
-INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
@@ -112,7 +111,7 @@ python __anonymous () {
d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
- d.appendVar('RPROVIDES:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+ d.appendVar('RPROVIDES:%s-modules' % kname, ' %s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
@@ -172,7 +171,7 @@ set -e
# image types.
KERNEL_CLASSES ?= " kernel-uimage "
-inherit ${KERNEL_CLASSES}
+inherit_defer ${KERNEL_CLASSES}
# Old style kernels may set ${S} = ${WORKDIR}/git for example
# We need to move these over to STAGING_KERNEL_DIR. We can't just
@@ -182,13 +181,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
python do_symlink_kernsrc () {
s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
+ # directory name and fail
+ s = s[:-1]
if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
@@ -210,15 +210,14 @@ PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-export KBUILD_BUILD_VERSION = "1"
-export KBUILD_BUILD_USER ?= "oe-user"
-export KBUILD_BUILD_HOST ?= "oe-host"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
# The directory where built kernel lies in the kernel tree
KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
KERNEL_IMAGEDEST ?= "boot"
+KERNEL_DTBDEST ?= "${KERNEL_IMAGEDEST}"
+KERNEL_DTBVENDORED ?= "0"
#
# configuration
@@ -237,8 +236,11 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
-EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}" PAHOLE=false"
+EXTRA_OEMAKE += ' CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}"'
+EXTRA_OEMAKE += ' HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"'
+EXTRA_OEMAKE += ' HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}"'
+# Only for newer kernels (5.19+), native pkg-config variables are set for older kernels when building kernel and modules
+EXTRA_OEMAKE += ' HOSTPKG_CONFIG="pkg-config-native"'
KERNEL_ALT_IMAGETYPE ??= ""
@@ -335,6 +337,10 @@ kernel_do_transform_bundled_initramfs() {
}
do_transform_bundled_initramfs[dirs] = "${B}"
+python do_package:prepend () {
+ d.setVar('STRIP', d.getVar('KERNEL_STRIP').strip())
+}
+
python do_devshell:prepend () {
os.environ["LDFLAGS"] = ''
}
@@ -366,6 +372,10 @@ kernel_do_compile() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
@@ -381,7 +391,7 @@ kernel_do_compile() {
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${PARALLEL_MAKE} ${typeformake} ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
}
@@ -397,6 +407,13 @@ addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
+ export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
+ export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
+ export PKG_CONFIG_SYSROOT_DIR=""
+
if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
@@ -411,11 +428,15 @@ do_compile_kernelmodules() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules ${KERNEL_EXTRA_ARGS}
- # Module.symvers gets updated during the
+ # Module.symvers gets updated during the
# building of the kernel modules. We need to
# update this in the shared workdir since some
# external kernel modules has a dependency on
@@ -439,10 +460,10 @@ kernel_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
- # If the kernel/ directory is empty remove it to prevent QA issues
- rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
+ # Remove empty module directories to prevent QA issues
+ [ -d "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" ] && find "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" -type d -empty -delete
else
bbnote "no modules to install"
fi
@@ -471,9 +492,7 @@ kernel_do_install() {
install -m 0644 System.map ${D}/${KERNEL_IMAGEDEST}/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/${KERNEL_IMAGEDEST}/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION}
- [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/${KERNEL_IMAGEDEST}/Module.symvers-${KERNEL_VERSION}
- install -d ${D}${sysconfdir}/modules-load.d
- install -d ${D}${sysconfdir}/modprobe.d
+ ! [ -e Module.symvers ] || install -m 0644 Module.symvers ${D}/${KERNEL_IMAGEDEST}/Module.symvers-${KERNEL_VERSION}
}
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
@@ -538,10 +557,11 @@ do_shared_workdir () {
#
echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
+ echo "${KERNEL_LOCALVERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-localversion
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
- [ -e Module.symvers ] && cp Module.symvers $kerneldir/
+ ! [ -e Module.symvers ] || cp Module.symvers $kerneldir/
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
@@ -590,14 +610,28 @@ do_shared_workdir () {
cp tools/objtool/objtool ${kerneldir}/tools/objtool/
fi
fi
+
+ # When building with CONFIG_MODVERSIONS=y and CONFIG_RANDSTRUCT=y we need
+ # to copy the build assets generated for the randstruct seed to
+ # STAGING_KERNEL_BUILDDIR, otherwise the out-of-tree modules build will
+ # generate those assets which will result in a different
+ # RANDSTRUCT_HASHED_SEED
+ if [ -d scripts/basic ]; then
+ mkdir -p ${kerneldir}/scripts
+ cp -r scripts/basic ${kerneldir}/scripts
+ fi
+
+ if [ -d scripts/gcc-plugins ]; then
+ mkdir -p ${kerneldir}/scripts
+ cp -r scripts/gcc-plugins ${kerneldir}/scripts
+ fi
+
}
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
-sysroot_stage_all () {
- :
-}
+SYSROOT_DIRS = ""
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} olddefconfig || oe_runmake -C ${S} O=${B} oldnoconfig"
python check_oldest_kernel() {
oldest_kernel = d.getVar('OLDEST_KERNEL')
@@ -611,14 +645,33 @@ python check_oldest_kernel() {
}
check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
-do_configure[prefuncs] += "check_oldest_kernel"
+do_compile[postfuncs] += "check_oldest_kernel"
+
+KERNEL_LOCALVERSION ??= ""
+
+# 6.3+ requires the variable LOCALVERSION to be set to not get a "+" in
+# the local version. Having it empty means nothing will be added, and any
+# value will be appended to the local kernel version. This replaces the
+# use of .scmversion file for setting a localversion without using
+# the CONFIG_LOCALVERSION option.
+#
+# Note: This class saves the value of localversion to a file
+# so other recipes like make-mod-scripts can restore it via the
+# helper function get_kernellocalversion_file
+export LOCALVERSION="${KERNEL_LOCALVERSION}"
kernel_do_configure() {
# fixes extra + in /lib/modules/2.6.37+
# $ scripts/setlocalversion . => +
# $ make kernelversion => 2.6.37
# $ make kernelrelease => 2.6.37+
- touch ${B}/.scmversion ${S}/.scmversion
+ # See kernel-arch.bbclass for post v6.3 removal of the extra
+ # + in localversion. .scmversion is no longer used, and the
+ # variable LOCALVERSION must be used
+ if [ ! -e ${B}/.scmversion -a ! -e ${S}/.scmversion ]; then
+ echo ${KERNEL_LOCALVERSION} > ${B}/.scmversion
+ echo ${KERNEL_LOCALVERSION} > ${S}/.scmversion
+ fi
if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
mv "${S}/.config" "${B}/.config"
@@ -626,8 +679,8 @@ kernel_do_configure() {
# Copy defconfig to .config if .config does not exist. This allows
# recipes to manage the .config themselves in do_configure:prepend().
- if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
- cp "${WORKDIR}/defconfig" "${B}/.config"
+ if [ -f "${UNPACKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
+ cp "${UNPACKDIR}/defconfig" "${B}/.config"
fi
${KERNEL_CONFIG_COMMAND}
@@ -635,14 +688,15 @@ kernel_do_configure() {
do_savedefconfig() {
bbplain "Saving defconfig to:\n${B}/defconfig"
- oe_runmake -C ${B} LD='${KERNEL_LD}' savedefconfig
+ oe_runmake -C ${B} savedefconfig
}
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
-inherit cml1
+inherit cml1 pkgconfig
-KCONFIG_CONFIG_COMMAND:append = " PAHOLE=false LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+# Need LD, HOSTLDFLAGS and more for config operations
+KCONFIG_CONFIG_COMMAND:append = " ${EXTRA_OEMAKE}"
EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
@@ -675,13 +729,13 @@ pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
if [ -n "$D" ]; then
- depmodwrapper -a -b $D ${KERNEL_VERSION}
+ depmodwrapper -a -b $D ${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME}
else
depmod -a ${KERNEL_VERSION}
fi
}
-PACKAGESPLITFUNCS:prepend = "split_kernel_packages "
+PACKAGESPLITFUNCS =+ "split_kernel_packages"
python split_kernel_packages () {
do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
@@ -712,7 +766,7 @@ addtask kernel_link_images after do_compile before do_strip
python do_strip() {
import shutil
- strip = d.getVar('STRIP')
+ strip = d.getVar('KERNEL_STRIP')
extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
diff --git a/meta/classes-recipe/kernelsrc.bbclass b/meta/classes-recipe/kernelsrc.bbclass
index a32882a5d2..ecb02dc9ed 100644
--- a/meta/classes-recipe/kernelsrc.bbclass
+++ b/meta/classes-recipe/kernelsrc.bbclass
@@ -11,6 +11,7 @@ do_patch[depends] += "virtual/kernel:do_shared_workdir"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+LOCAL_VERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}"
inherit linux-kernel-base
diff --git a/meta/classes-recipe/libc-package.bbclass b/meta/classes-recipe/libc-package.bbclass
index de3d4223a8..c06a2ce90a 100644
--- a/meta/classes-recipe/libc-package.bbclass
+++ b/meta/classes-recipe/libc-package.bbclass
@@ -51,6 +51,7 @@ PACKAGE_NO_GCONV ?= "0"
OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
+mkdir ${libdir}/locale
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
@@ -267,7 +268,8 @@ python package_do_split_gconvs () {
"riscv32": " --uint32-align=4 --little-endian ", \
"i586": " --uint32-align=4 --little-endian ", \
"i686": " --uint32-align=4 --little-endian ", \
- "x86_64": " --uint32-align=4 --little-endian " }
+ "x86_64": " --uint32-align=4 --little-endian ", \
+ "loongarch64": " --uint32-align=4 --little-endian " }
if target_arch in locale_arch_options:
localedef_opts = locale_arch_options[target_arch]
@@ -276,7 +278,7 @@ python package_do_split_gconvs () {
bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
- --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
+ --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s --no-warnings=ascii" \
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
diff --git a/meta/classes-recipe/license_image.bbclass b/meta/classes-recipe/license_image.bbclass
index b60d6e44f4..19b3dc55ba 100644
--- a/meta/classes-recipe/license_image.bbclass
+++ b/meta/classes-recipe/license_image.bbclass
@@ -18,7 +18,7 @@ python() {
python write_package_manifest() {
# Get list of installed packages
- license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
+ license_image_dir = d.expand('${LICENSE_DIRECTORY}/${SSTATE_PKGARCH}/${IMAGE_NAME}')
bb.utils.mkdirhier(license_image_dir)
from oe.rootfs import image_list_installed_packages
from oe.utils import format_pkg_list
@@ -49,7 +49,7 @@ python license_create_manifest() {
pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- d.getVar('IMAGE_NAME'), 'license.manifest')
+ d.getVar('SSTATE_PKGARCH'), d.getVar('IMAGE_NAME'), 'license.manifest')
write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
}
@@ -59,6 +59,8 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ pkgarchs = d.getVar("SSTATE_ARCHS").split()
+ pkgarchs.reverse()
exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
with open(license_manifest, "w") as license_file:
@@ -98,9 +100,13 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
for lic in pkg_dic[pkg]["LICENSES"]:
- lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- pkg_dic[pkg]["PN"], "generic_%s" %
- re.sub(r'\+', '', lic))
+ for pkgarch in pkgarchs:
+ lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ pkgarch,
+ pkg_dic[pkg]["PN"], "generic_%s" %
+ re.sub(r'\+', '', lic))
+ if os.path.exists(lic_file):
+ break
# add explicity avoid of CLOSED license because isn't generic
if lic == "CLOSED":
continue
@@ -130,8 +136,13 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
for pkg in sorted(pkg_dic):
pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
bb.utils.mkdirhier(pkg_rootfs_license_dir)
- pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- pkg_dic[pkg]["PN"])
+ for pkgarch in pkgarchs:
+ pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ pkgarch, pkg_dic[pkg]["PN"])
+ if os.path.exists(pkg_license_dir):
+ break
+ if not os.path.exists(pkg_license_dir ):
+ bb.fatal("Couldn't find license information for dependency %s" % pkg)
pkg_manifest_licenses = [canonical_license(d, lic) \
for lic in pkg_dic[pkg]["LICENSES"]]
@@ -183,7 +194,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
os.lchown(p, 0, 0)
os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
-
+write_license_files[vardepsexclude] = "SSTATE_ARCHS"
def license_deployed_manifest(d):
"""
@@ -195,6 +206,8 @@ def license_deployed_manifest(d):
dep_dic = {}
man_dic = {}
lic_dir = d.getVar("LICENSE_DIRECTORY")
+ pkgarchs = d.getVar("SSTATE_ARCHS").split()
+ pkgarchs.reverse()
dep_dic = get_deployed_dependencies(d)
for dep in dep_dic.keys():
@@ -204,12 +217,19 @@ def license_deployed_manifest(d):
man_dic[dep]["PN"] = dep
man_dic[dep]["FILES"] = \
" ".join(get_deployed_files(dep_dic[dep]))
- with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
+
+ for pkgarch in pkgarchs:
+ licfile = os.path.join(lic_dir, pkgarch, dep, "recipeinfo")
+ if os.path.exists(licfile):
+ break
+ if not os.path.exists(licfile):
+ bb.fatal("Couldn't find license information for dependency %s" % dep)
+ with open(licfile, "r") as f:
for line in f.readlines():
key,val = line.split(": ", 1)
man_dic[dep][key] = val[:-1]
- lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), d.getVar('SSTATE_PKGARCH'),
d.getVar('IMAGE_NAME'))
bb.utils.mkdirhier(lic_manifest_dir)
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
@@ -217,7 +237,7 @@ def license_deployed_manifest(d):
link_name = d.getVar('IMAGE_LINK_NAME')
if link_name:
- lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), d.getVar('SSTATE_PKGARCH'),
link_name)
# remove old symlink
if os.path.islink(lic_manifest_symlink_dir):
@@ -227,6 +247,8 @@ def license_deployed_manifest(d):
if lic_manifest_dir != lic_manifest_symlink_dir:
os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
+license_deployed_manifest[vardepsexclude] = "SSTATE_ARCHS"
+
def get_deployed_dependencies(d):
"""
Get all the deployed dependencies of an image
@@ -235,7 +257,7 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
depends = list(set([dep[0] for dep
in list(taskdata.values())
if not dep[0].endswith("-native") and not dep[0] == pn]))
@@ -255,7 +277,7 @@ def get_deployed_dependencies(d):
break
return deploy
-get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA SSTATE_ARCHS"
def get_deployed_files(man_file):
"""
@@ -272,7 +294,7 @@ def get_deployed_files(man_file):
dep_files.append(os.path.basename(f))
return dep_files
-ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest; license_create_manifest; "
+ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest license_create_manifest "
do_rootfs[recrdeptask] += "do_populate_lic"
python do_populate_lic_deploy() {
diff --git a/meta/classes-recipe/linux-kernel-base.bbclass b/meta/classes-recipe/linux-kernel-base.bbclass
index cb2212c948..e2187a73f0 100644
--- a/meta/classes-recipe/linux-kernel-base.bbclass
+++ b/meta/classes-recipe/linux-kernel-base.bbclass
@@ -39,9 +39,24 @@ def get_kernelversion_file(p):
except IOError:
return None
+def get_kernellocalversion_file(p):
+ fn = p + '/kernel-localversion'
+
+ try:
+ with open(fn, 'r') as f:
+ return f.readlines()[0].strip()
+ except IOError:
+ return ""
+
+ return ""
+
def linux_module_packages(s, d):
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
+export KBUILD_BUILD_VERSION = "1"
+export KBUILD_BUILD_USER ?= "oe-user"
+export KBUILD_BUILD_HOST ?= "oe-host"
+
# that's all
diff --git a/meta/classes-recipe/linuxloader.bbclass b/meta/classes-recipe/linuxloader.bbclass
index 1dfb95e31d..2ea1b62254 100644
--- a/meta/classes-recipe/linuxloader.bbclass
+++ b/meta/classes-recipe/linuxloader.bbclass
@@ -46,6 +46,8 @@ def get_glibc_loader(d):
dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
elif targetarch.startswith("mips"):
dynamic_loader = "${base_libdir}/ld.so.1"
+ elif targetarch.startswith("loongarch64"):
+ dynamic_loader = "${base_libdir}/ld-linux-loongarch-lp64d.so.1"
elif targetarch == "powerpc64le":
dynamic_loader = "${base_libdir}/ld64.so.2"
elif targetarch == "powerpc64":
diff --git a/meta/classes-recipe/live-vm-common.bbclass b/meta/classes-recipe/live-vm-common.bbclass
index b619f3a4be..d90cc67ebc 100644
--- a/meta/classes-recipe/live-vm-common.bbclass
+++ b/meta/classes-recipe/live-vm-common.bbclass
@@ -68,8 +68,8 @@ efi_hddimg_populate() {
efi_populate $1
}
-inherit ${EFI_CLASS}
-inherit ${PCBIOS_CLASS}
+inherit_defer ${EFI_CLASS}
+inherit_defer ${PCBIOS_CLASS}
populate_kernel() {
dest=$1
diff --git a/meta/classes-recipe/manpages.bbclass b/meta/classes-recipe/manpages.bbclass
index 693fb53671..e9ca2f895b 100644
--- a/meta/classes-recipe/manpages.bbclass
+++ b/meta/classes-recipe/manpages.bbclass
@@ -23,17 +23,7 @@ pkg_postinst:${MAN_PKG}:append () {
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
if test -n "$D"; then
if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
- sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
- chown -R root:root $D${mandir}
-
- mkdir -p $D${localstatedir}/cache/man
- cd $D${mandir}
- find . -name index.db | while read index; do
- mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
- mv ${index} $D${localstatedir}/cache/man/${index}
- chown man:man $D${localstatedir}/cache/man/${index}
- done
- cd -
+ $INTERCEPT_DIR/postinst_intercept update_mandb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} bindir=${bindir} sysconfdir=${sysconfdir} mandir=${mandir}
else
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
fi
diff --git a/meta/classes-recipe/meson-routines.bbclass b/meta/classes-recipe/meson-routines.bbclass
index 6086fce9d9..a944a8fff1 100644
--- a/meta/classes-recipe/meson-routines.bbclass
+++ b/meta/classes-recipe/meson-routines.bbclass
@@ -23,6 +23,8 @@ def meson_cpu_family(var, d):
return 'arm'
elif arch == 'aarch64_be':
return 'aarch64'
+ elif arch == 'loongarch64':
+ return 'loongarch64'
elif arch == 'mipsel':
return 'mips'
elif arch == 'mips64el':
diff --git a/meta/classes-recipe/meson.bbclass b/meta/classes-recipe/meson.bbclass
index 765e81bc4f..03fa2c06eb 100644
--- a/meta/classes-recipe/meson.bbclass
+++ b/meta/classes-recipe/meson.bbclass
@@ -20,6 +20,9 @@ do_configure[cleandirs] = "${B}"
# Where the meson.build build configuration is
MESON_SOURCEPATH = "${S}"
+# The target to build in do_compile. If unset the default targets are built.
+MESON_TARGET ?= ""
+
def noprefix(var, d):
return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
@@ -58,7 +61,7 @@ def rust_tool(d, target_var):
return "rust = %s" % repr(cmd)
addtask write_config before do_configure
-do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS"
+do_write_config[vardeps] += "CC CXX AR NM STRIP READELF OBJCOPY CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS EXEWRAPPER_ENABLED"
do_write_config() {
# This needs to be Py to split the args into single-element lists
cat >${WORKDIR}/meson.cross <<EOF
@@ -71,12 +74,12 @@ nm = ${@meson_array('NM', d)}
strip = ${@meson_array('STRIP', d)}
readelf = ${@meson_array('READELF', d)}
objcopy = ${@meson_array('OBJCOPY', d)}
-pkgconfig = 'pkg-config'
-llvm-config = 'llvm-config${LLVMVERSION}'
+pkg-config = 'pkg-config'
+llvm-config = 'llvm-config'
cups-config = 'cups-config'
g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
-${@rust_tool(d, "HOST_SYS")}
+${@rust_tool(d, "RUST_HOST_SYS")}
${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
[built-in options]
@@ -87,6 +90,7 @@ cpp_link_args = ${@meson_array('LDFLAGS', d)}
[properties]
needs_exe_wrapper = true
+sys_root = '${STAGING_DIR_HOST}'
[host_machine]
system = '${@meson_operating_system('HOST_OS', d)}'
@@ -111,8 +115,9 @@ nm = ${@meson_array('BUILD_NM', d)}
strip = ${@meson_array('BUILD_STRIP', d)}
readelf = ${@meson_array('BUILD_READELF', d)}
objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
-pkgconfig = 'pkg-config-native'
-${@rust_tool(d, "BUILD_SYS")}
+llvm-config = '${STAGING_BINDIR_NATIVE}/llvm-config'
+pkg-config = 'pkg-config-native'
+${@rust_tool(d, "RUST_BUILD_SYS")}
[built-in options]
c_args = ${@meson_array('BUILD_CFLAGS', d)}
@@ -148,11 +153,8 @@ meson_do_configure() {
# https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
unset LD
- # Work around "Meson fails if /tmp is mounted with noexec #2972"
- mkdir -p "${B}/meson-private/tmp"
- export TMPDIR="${B}/meson-private/tmp"
bbnote Executing meson ${EXTRA_OEMESON}...
- if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
+ if ! meson setup ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
bbfatal_log meson failed
fi
}
@@ -169,11 +171,11 @@ do_configure[postfuncs] += "meson_do_qa_configure"
do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
meson_do_compile() {
- ninja -v ${PARALLEL_MAKE}
+ meson compile -v ${PARALLEL_MAKE} ${MESON_TARGET}
}
meson_do_install() {
- DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
+ meson install --destdir ${D} --no-rebuild
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/module-base.bbclass b/meta/classes-recipe/module-base.bbclass
index 094b563b1a..2a225881ba 100644
--- a/meta/classes-recipe/module-base.bbclass
+++ b/meta/classes-recipe/module-base.bbclass
@@ -20,6 +20,7 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
+export LOCALVERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-localversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
diff --git a/meta/classes-recipe/module.bbclass b/meta/classes-recipe/module.bbclass
index d52d5e3098..f2f0b25a2d 100644
--- a/meta/classes-recipe/module.bbclass
+++ b/meta/classes-recipe/module.bbclass
@@ -20,6 +20,10 @@ python __anonymous () {
d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
}
+python do_package:prepend () {
+ os.environ['STRIP'] = d.getVar('KERNEL_STRIP')
+}
+
python do_devshell:prepend () {
os.environ['CFLAGS'] = ''
os.environ['CPPFLAGS'] = ''
@@ -32,6 +36,8 @@ python do_devshell:prepend () {
os.environ['CC'] = d.getVar('KERNEL_CC')
os.environ['LD'] = d.getVar('KERNEL_LD')
os.environ['AR'] = d.getVar('KERNEL_AR')
+ os.environ['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY')
+ os.environ['STRIP'] = d.getVar('KERNEL_STRIP')
os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
if kbuild_extra_symbols:
@@ -45,7 +51,8 @@ module_do_compile() {
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
KERNEL_VERSION=${KERNEL_VERSION} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
- AR="${KERNEL_AR}" \
+ AR="${KERNEL_AR}" OBJCOPY="${KERNEL_OBJCOPY}" \
+ STRIP="${KERNEL_STRIP}" \
O=${STAGING_KERNEL_BUILDDIR} \
KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
${MAKE_TARGETS}
@@ -55,7 +62,8 @@ module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
- CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" \
+ STRIP="${KERNEL_STRIP}" \
O=${STAGING_KERNEL_BUILDDIR} \
${MODULES_INSTALL_TARGET}
diff --git a/meta/classes-recipe/multilib_script.bbclass b/meta/classes-recipe/multilib_script.bbclass
index 7011526254..e6f0249529 100644
--- a/meta/classes-recipe/multilib_script.bbclass
+++ b/meta/classes-recipe/multilib_script.bbclass
@@ -31,10 +31,11 @@ python () {
for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
pkg, script = entry.split(":")
epkg = d.expand(pkg)
- scriptname = os.path.basename(script)
+ escript = d.expand(script)
+ scriptname = os.path.basename(escript)
d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ")
- d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
- d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
- d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
- d.appendVar("FILES:" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
+ d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, escript)
+ d.setVarFlag("ALTERNATIVE_TARGET", scriptname, escript + "-${MULTILIB_SUFFIX}")
+ d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + escript + " ${PKGD}" + escript + "-${MULTILIB_SUFFIX}")
+ d.appendVar("FILES:" + epkg, " " + escript + "-${MULTILIB_SUFFIX}")
}
diff --git a/meta/classes-recipe/native.bbclass b/meta/classes-recipe/native.bbclass
index 61ad053def..84a3ec65da 100644
--- a/meta/classes-recipe/native.bbclass
+++ b/meta/classes-recipe/native.bbclass
@@ -77,7 +77,7 @@ exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
bindir = "${STAGING_BINDIR_NATIVE}"
sbindir = "${STAGING_SBINDIR_NATIVE}"
-base_libdir = "${STAGING_LIBDIR_NATIVE}"
+base_libdir = "${STAGING_BASE_LIBDIR_NATIVE}"
libdir = "${STAGING_LIBDIR_NATIVE}"
includedir = "${STAGING_INCDIR_NATIVE}"
sysconfdir = "${STAGING_ETCDIR_NATIVE}"
@@ -139,7 +139,7 @@ python native_virtclass_handler () {
if "native" not in classextend:
return
- def map_dependencies(varname, d, suffix = "", selfref=True):
+ def map_dependencies(varname, d, suffix = "", selfref=True, regex=False):
if suffix:
varname = varname + ":" + suffix
deps = d.getVar(varname)
@@ -148,7 +148,9 @@ python native_virtclass_handler () {
deps = bb.utils.explode_deps(deps)
newdeps = []
for dep in deps:
- if dep == pn:
+ if regex and dep.startswith("^") and dep.endswith("$"):
+ newdeps.append(dep[:-1].replace(pn, bpn) + "-native$")
+ elif dep == pn:
if not selfref:
continue
newdeps.append(dep)
@@ -161,7 +163,7 @@ python native_virtclass_handler () {
newdeps.append(dep.replace(pn, bpn) + "-native")
else:
newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps), parsing=True)
+ d.setVar(varname, " ".join(newdeps))
map_dependencies("DEPENDS", e.data, selfref=False)
for pkg in e.data.getVar("PACKAGES", False).split():
@@ -171,6 +173,7 @@ python native_virtclass_handler () {
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
map_dependencies("PACKAGES", e.data)
+ map_dependencies("PACKAGES_DYNAMIC", e.data, regex=True)
provides = e.data.getVar("PROVIDES")
nprovides = []
diff --git a/meta/classes-recipe/nativesdk.bbclass b/meta/classes-recipe/nativesdk.bbclass
index 08288fdb73..de6debda93 100644
--- a/meta/classes-recipe/nativesdk.bbclass
+++ b/meta/classes-recipe/nativesdk.bbclass
@@ -15,7 +15,10 @@ NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
-MACHINE_FEATURES = ""
+
+MACHINE_FEATURES = "${SDK_MACHINE_FEATURES}"
+DISTRO_FEATURES_BACKFILL = ""
+MACHINE_FEATURES_BACKFILL = ""
MULTILIBS = ""
diff --git a/meta/classes-recipe/npm.bbclass b/meta/classes-recipe/npm.bbclass
index deea53c9ec..91da3295f2 100644
--- a/meta/classes-recipe/npm.bbclass
+++ b/meta/classes-recipe/npm.bbclass
@@ -28,20 +28,18 @@ NPM_INSTALL_DEV ?= "0"
NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}"
-def npm_target_arch_map(target_arch):
- """Maps arch names to npm arch names"""
+## must match mapping in nodejs.bb (openembedded-meta)
+def map_nodejs_arch(a, d):
import re
- if re.match("p(pc|owerpc)(|64)", target_arch):
- return "ppc"
- elif re.match("i.86$", target_arch):
- return "ia32"
- elif re.match("x86_64$", target_arch):
- return "x64"
- elif re.match("arm64$", target_arch):
- return "arm"
- return target_arch
-
-NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
+
+ if re.match('i.86$', a): return 'ia32'
+ elif re.match('x86_64$', a): return 'x64'
+ elif re.match('aarch64$', a): return 'arm64'
+ elif re.match('(powerpc64|powerpc64le|ppc64le)$', a): return 'ppc64'
+ elif re.match('powerpc$', a): return 'ppc'
+ return a
+
+NPM_ARCH ?= "${@map_nodejs_arch(d.getVar("TARGET_ARCH"), d)}"
NPM_PACKAGE = "${WORKDIR}/npm-package"
NPM_CACHE = "${WORKDIR}/npm-cache"
@@ -54,7 +52,7 @@ def npm_global_configs(d):
# Ensure no network access is done
configs.append(("offline", "true"))
configs.append(("proxy", "http://invalid"))
- configs.append(("funds", False))
+ configs.append(("fund", False))
configs.append(("audit", False))
# Configure the cache directory
configs.append(("cache", d.getVar("NPM_CACHE")))
@@ -84,7 +82,7 @@ def npm_pack(env, srcdir, workdir):
subprocess.run(['tar', 'czf', tarball,
'--exclude', './node-modules',
'--exclude-vcs',
- '--transform', 's,^\./,package/,',
+ '--transform', r's,^\./,package/,',
'--mtime', '1985-10-26T08:15:00.000Z',
'.'],
check = True, cwd = srcdir)
@@ -111,6 +109,7 @@ python npm_do_configure() {
import tempfile
from bb.fetch2.npm import NpmEnvironment
from bb.fetch2.npm import npm_unpack
+ from bb.fetch2.npm import npm_package
from bb.fetch2.npmsw import foreach_dependencies
from bb.progress import OutOfProgressHandler
from oe.npm_registry import NpmRegistry
@@ -131,22 +130,6 @@ python npm_do_configure() {
sha512 = bb.utils.sha512_file(tarball)
return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
- def _npmsw_dependency_dict(orig, deptree):
- """
- Return the sub dictionary in the 'orig' dictionary corresponding to the
- 'deptree' dependency tree. This function follows the shrinkwrap file
- format.
- """
- ptr = orig
- for dep in deptree:
- if "dependencies" not in ptr:
- ptr["dependencies"] = {}
- ptr = ptr["dependencies"]
- if dep not in ptr:
- ptr[dep] = {}
- ptr = ptr[dep]
- return ptr
-
# Manage the manifest file and shrinkwrap files
orig_manifest_file = d.expand("${S}/package.json")
orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
@@ -170,31 +153,44 @@ python npm_do_configure() {
if has_shrinkwrap_file:
cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
- cached_shrinkwrap.pop("dependencies", None)
+ for package in orig_shrinkwrap["packages"]:
+ if package != "":
+ cached_shrinkwrap["packages"].pop(package, None)
+ cached_shrinkwrap["packages"][""].pop("dependencies", None)
+ cached_shrinkwrap["packages"][""].pop("devDependencies", None)
+ cached_shrinkwrap["packages"][""].pop("peerDependencies", None)
# Manage the dependencies
progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
progress_total = 1 # also count the main package
progress_done = 0
- def _count_dependency(name, params, deptree):
+ def _count_dependency(name, params, destsuffix):
nonlocal progress_total
progress_total += 1
- def _cache_dependency(name, params, deptree):
- destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
- destsuffix = os.path.join(*destsubdirs)
+ def _cache_dependency(name, params, destsuffix):
with tempfile.TemporaryDirectory() as tmpdir:
# Add the dependency to the npm cache
destdir = os.path.join(d.getVar("S"), destsuffix)
(tarball, pkg) = npm_pack(env, destdir, tmpdir)
_npm_cache_add(tarball, pkg)
# Add its signature to the cached shrinkwrap
- dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
+ dep = params
dep["version"] = pkg['version']
dep["integrity"] = _npm_integrity(tarball)
if params.get("dev", False):
dep["dev"] = True
+ if "devDependencies" not in cached_shrinkwrap["packages"][""]:
+ cached_shrinkwrap["packages"][""]["devDependencies"] = {}
+ cached_shrinkwrap["packages"][""]["devDependencies"][name] = pkg['version']
+
+ else:
+ if "dependencies" not in cached_shrinkwrap["packages"][""]:
+ cached_shrinkwrap["packages"][""]["dependencies"] = {}
+ cached_shrinkwrap["packages"][""]["dependencies"][name] = pkg['version']
+
+ cached_shrinkwrap["packages"][destsuffix] = dep
# Display progress
nonlocal progress_done
progress_done += 1
@@ -205,6 +201,19 @@ python npm_do_configure() {
if has_shrinkwrap_file:
foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
+
+ # Manage Peer Dependencies
+ if has_shrinkwrap_file:
+ packages = orig_shrinkwrap.get("packages", {})
+ peer_deps = packages.get("", {}).get("peerDependencies", {})
+ package_runtime_dependencies = d.getVar("RDEPENDS:%s" % d.getVar("PN"))
+
+ for peer_dep in peer_deps:
+ peer_dep_yocto_name = npm_package(peer_dep)
+ if peer_dep_yocto_name not in package_runtime_dependencies:
+ bb.warn(peer_dep + " is a peer dependencie that is not in RDEPENDS variable. " +
+ "Please add this peer dependencie to the RDEPENDS variable as %s and generate its recipe with devtool"
+ % peer_dep_yocto_name)
# Configure the main package
with tempfile.TemporaryDirectory() as tmpdir:
@@ -214,7 +223,7 @@ python npm_do_configure() {
# Configure the cached manifest file and cached shrinkwrap file
def _update_manifest(depkey):
for name in orig_manifest.get(depkey, {}):
- version = cached_shrinkwrap["dependencies"][name]["version"]
+ version = cached_shrinkwrap["packages"][""][depkey][name]
if depkey not in cached_manifest:
cached_manifest[depkey] = {}
cached_manifest[depkey][name] = version
@@ -281,6 +290,9 @@ python npm_do_compile() {
args.append(("target_arch", d.getVar("NPM_ARCH")))
args.append(("build-from-source", "true"))
+ # Don't install peer dependencies as they should be in RDEPENDS variable
+ args.append(("legacy-peer-deps", "true"))
+
# Pack and install the main package
(tarball, _) = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
diff --git a/meta/classes-recipe/overlayfs-etc.bbclass b/meta/classes-recipe/overlayfs-etc.bbclass
new file mode 100644
index 0000000000..d339fbbeee
--- /dev/null
+++ b/meta/classes-recipe/overlayfs-etc.bbclass
@@ -0,0 +1,88 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# Class for setting up /etc in overlayfs
+#
+# In order to have /etc directory in overlayfs a special handling at early boot stage is required
+# The idea is to supply a custom init script that mounts /etc before launching actual init program,
+# because the latter already requires /etc to be mounted
+#
+# The configuration must be machine specific. You should at least set these three variables:
+# OVERLAYFS_ETC_MOUNT_POINT ?= "/data"
+# OVERLAYFS_ETC_FSTYPE ?= "ext4"
+# OVERLAYFS_ETC_DEVICE ?= "/dev/mmcblk0p2"
+#
+# To control more mount options you should consider setting mount options:
+# OVERLAYFS_ETC_MOUNT_OPTIONS ?= "defaults"
+#
+# The class provides two options for /sbin/init generation
+# 1. Default option is to rename original /sbin/init to /sbin/init.orig and place generated init under
+# original name, i.e. /sbin/init. It has an advantage that you won't need to change any kernel
+# parameters in order to make it work, but it poses a restriction that package-management can't
+# be used, becaause updating init manager would remove generated script
+# 2. If you are would like to keep original init as is, you can set
+# OVERLAYFS_ETC_USE_ORIG_INIT_NAME = "0"
+# Then generated init will be named /sbin/preinit and you would need to extend you kernel parameters
+# manually in your bootloader configuration.
+#
+# Regardless which mode you choose, update and migration strategy of configuration files under /etc
+# overlay is out of scope of this class
+
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "overlayfs-etc", "create_overlayfs_etc_preinit", "", d)}'
+IMAGE_FEATURES_CONFLICTS_overlayfs-etc = "${@ 'package-management' if bb.utils.to_boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'), True) else ''}"
+
+OVERLAYFS_ETC_MOUNT_POINT ??= ""
+OVERLAYFS_ETC_FSTYPE ??= ""
+OVERLAYFS_ETC_DEVICE ??= ""
+OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1"
+OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults"
+OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in"
+OVERLAYFS_ETC_EXPOSE_LOWER ??= "0"
+OVERLAYFS_ETC_CREATE_MOUNT_DIRS ??= "1"
+
+python create_overlayfs_etc_preinit() {
+ overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT")
+ overlayEtcFsType = d.getVar("OVERLAYFS_ETC_FSTYPE")
+ overlayEtcDevice = d.getVar("OVERLAYFS_ETC_DEVICE")
+
+ if not overlayEtcMountPoint:
+ bb.fatal("OVERLAYFS_ETC_MOUNT_POINT must be set in your MACHINE configuration")
+ if not overlayEtcDevice:
+ bb.fatal("OVERLAYFS_ETC_DEVICE must be set in your MACHINE configuration")
+ if not overlayEtcFsType:
+ bb.fatal("OVERLAYFS_ETC_FSTYPE should contain a valid file system type on {0}".format(overlayEtcDevice))
+
+ with open(d.getVar("OVERLAYFS_ETC_INIT_TEMPLATE"), "r") as f:
+ PreinitTemplate = f.read()
+
+ useOrigInit = oe.types.boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'))
+ preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit")
+ initBaseName = oe.path.join(d.getVar("base_sbindir"), "init")
+ origInitNameSuffix = ".orig"
+ exposeLower = oe.types.boolean(d.getVar('OVERLAYFS_ETC_EXPOSE_LOWER'))
+ createMoundDirs = oe.types.boolean(d.getVar('OVERLAYFS_ETC_CREATE_MOUNT_DIRS'))
+
+ args = {
+ 'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint,
+ 'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'),
+ 'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType,
+ 'OVERLAYFS_ETC_DEVICE': overlayEtcDevice,
+ 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName,
+ 'OVERLAYFS_ETC_EXPOSE_LOWER': "true" if exposeLower else "false",
+ 'CREATE_MOUNT_DIRS': "true" if createMoundDirs else "false"
+ }
+
+ if useOrigInit:
+ # rename original /sbin/init
+ origInit = oe.path.join(d.getVar("IMAGE_ROOTFS"), initBaseName)
+ bb.debug(1, "rootfs path %s, init path %s, test %s" % (d.getVar('IMAGE_ROOTFS'), origInit, d.getVar("IMAGE_ROOTFS")))
+ bb.utils.rename(origInit, origInit + origInitNameSuffix)
+ preinitPath = origInit
+
+ with open(preinitPath, 'w') as f:
+ f.write(PreinitTemplate.format(**args))
+ os.chmod(preinitPath, 0o755)
+}
diff --git a/meta/classes-recipe/overlayfs.bbclass b/meta/classes-recipe/overlayfs.bbclass
new file mode 100644
index 0000000000..a82763ec10
--- /dev/null
+++ b/meta/classes-recipe/overlayfs.bbclass
@@ -0,0 +1,142 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# Class for generation of overlayfs mount units
+#
+# It's often desired in Embedded System design to have a read-only rootfs.
+# But a lot of different applications might want to have a read-write access to
+# some parts of a filesystem. It can be especially useful when your update mechanism
+# overwrites the whole rootfs, but you want your application data to be preserved
+# between updates. This class provides a way to achieve that by means
+# of overlayfs and at the same time keeping the base rootfs read-only.
+#
+# Usage example.
+#
+# Set a mount point for a partition overlayfs is going to use as upper layer
+# in your machine configuration. Underlying file system can be anything that
+# is supported by overlayfs. This has to be done in your machine configuration.
+# QA check fails to catch file existence if you redefine this variable in your recipe!
+#
+# OVERLAYFS_MOUNT_POINT[data] ?= "/data"
+#
+# Per default the class assumes you have a corresponding fstab entry or systemd
+# mount unit (data.mount in this case) for this mount point installed on the
+# image, for instance via a wks script or the systemd-machine-units recipe.
+#
+# If the mount point is handled somewhere else, e.g. custom boot or preinit
+# scripts or in a initramfs, then this QA check can be skipped by adding
+# mount-configured to the related OVERLAYFS_QA_SKIP flag:
+#
+# OVERLAYFS_QA_SKIP[data] = "mount-configured"
+#
+# To use the overlayfs, you just have to specify writable directories inside
+# their recipe:
+#
+# OVERLAYFS_WRITABLE_PATHS[data] = "/usr/share/my-custom-application"
+#
+# To support several mount points you can use a different variable flag. Assume we
+# want to have a writable location on the file system, but not interested where the data
+# survive a reboot. Then we could have a mnt-overlay.mount unit for a tmpfs file system:
+#
+# OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay"
+# OVERLAYFS_WRITABLE_PATHS[mnt-overlay] = "/usr/share/another-application"
+#
+# If your recipe deploys a systemd service, then it should require and be
+# started after the ${PN}-overlays.service to make sure that all overlays are
+# mounted beforehand.
+#
+# Note: the class does not support /etc directory itself, because systemd depends on it
+# For /etc directory use overlayfs-etc class
+
+REQUIRED_DISTRO_FEATURES += "systemd overlayfs"
+
+inherit systemd features_check
+
+OVERLAYFS_CREATE_DIRS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-create-dirs.service.in"
+OVERLAYFS_MOUNT_UNIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-unit.mount.in"
+OVERLAYFS_ALL_OVERLAYS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-all-overlays.service.in"
+
+python do_create_overlayfs_units() {
+ from oe.overlayfs import mountUnitName
+
+ with open(d.getVar("OVERLAYFS_CREATE_DIRS_TEMPLATE"), "r") as f:
+ CreateDirsUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_MOUNT_UNIT_TEMPLATE"), "r") as f:
+ MountUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_ALL_OVERLAYS_TEMPLATE"), "r") as f:
+ AllOverlaysTemplate = f.read()
+
+ def prepareUnits(data, lower):
+ from oe.overlayfs import helperUnitName
+
+ args = {
+ 'DATA_MOUNT_POINT': data,
+ 'DATA_MOUNT_UNIT': mountUnitName(data),
+ 'CREATE_DIRS_SERVICE': helperUnitName(lower),
+ 'LOWERDIR': lower,
+ }
+
+ bb.debug(1, "Generate systemd unit %s" % mountUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), mountUnitName(lower)), 'w') as f:
+ f.write(MountUnitTemplate.format(**args))
+
+ bb.debug(1, "Generate helper systemd unit %s" % helperUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), helperUnitName(lower)), 'w') as f:
+ f.write(CreateDirsUnitTemplate.format(**args))
+
+ def prepareGlobalUnit(dependentUnits):
+ from oe.overlayfs import allOverlaysUnitName
+ args = {
+ 'ALL_OVERLAYFS_UNITS': " ".join(dependentUnits),
+ 'PN': d.getVar('PN')
+ }
+
+ bb.debug(1, "Generate systemd unit with all overlays %s" % allOverlaysUnitName(d))
+ with open(os.path.join(d.getVar('WORKDIR'), allOverlaysUnitName(d)), 'w') as f:
+ f.write(AllOverlaysTemplate.format(**args))
+
+ mountUnitList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+ for mountPoint in overlayMountPoints:
+ bb.debug(1, "Process variable flag %s" % mountPoint)
+ lowerList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not lowerList:
+ bb.note("No mount points defined for %s flag, skipping" % (mountPoint))
+ continue
+ for lower in lowerList.split():
+ bb.debug(1, "Prepare mount unit for %s with data mount point %s" %
+ (lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)))
+ prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower)
+ mountUnitList.append(mountUnitName(lower))
+
+ # set up one unit, which depends on all mount units, so users can set
+ # only one dependency in their units to make sure software starts
+ # when all overlays are mounted
+ prepareGlobalUnit(mountUnitList)
+}
+
+# we need to generate file names early during parsing stage
+python () {
+ from oe.overlayfs import strForBash, unitFileList
+
+ unitList = unitFileList(d)
+ for unit in unitList:
+ d.appendVar('SYSTEMD_SERVICE:' + d.getVar('PN'), ' ' + unit)
+ d.appendVar('FILES:' + d.getVar('PN'), ' ' +
+ d.getVar('systemd_system_unitdir') + '/' + strForBash(unit))
+
+ d.setVar('OVERLAYFS_UNIT_LIST', ' '.join([strForBash(s) for s in unitList]))
+}
+
+do_install:append() {
+ install -d ${D}${systemd_system_unitdir}
+ for unit in ${OVERLAYFS_UNIT_LIST}; do
+ install -m 0444 ${WORKDIR}/${unit} ${D}${systemd_system_unitdir}
+ done
+}
+
+do_create_overlayfs_units[vardeps] += "OVERLAYFS_WRITABLE_PATHS"
+addtask create_overlayfs_units before do_install
diff --git a/meta/classes-recipe/packagegroup.bbclass b/meta/classes-recipe/packagegroup.bbclass
index 6f17fc73b0..cf6fc354a8 100644
--- a/meta/classes-recipe/packagegroup.bbclass
+++ b/meta/classes-recipe/packagegroup.bbclass
@@ -22,7 +22,7 @@ PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
LICENSE ?= "MIT"
-inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
+inherit_defer ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
# This automatically adds -dbg and -dev flavours of all PACKAGES
# to the list. Their dependencies (RRECOMMENDS) are handled as usual
@@ -54,6 +54,9 @@ deltask do_compile
deltask do_install
deltask do_populate_sysroot
+do_create_runtime_spdx[deptask] = "do_create_spdx"
+do_create_runtime_spdx[rdeptask] = ""
+
INHIBIT_DEFAULT_DEPS = "1"
python () {
diff --git a/meta/classes-recipe/perl-version.bbclass b/meta/classes-recipe/perl-version.bbclass
index 269ac9eb31..74e33175d9 100644
--- a/meta/classes-recipe/perl-version.bbclass
+++ b/meta/classes-recipe/perl-version.bbclass
@@ -26,9 +26,6 @@ def get_perl_version(d):
return m.group(1)
return None
-PERLVERSION := "${@get_perl_version(d)}"
-PERLVERSION[vardepvalue] = ""
-
# Determine the staged arch of perl from the perl configuration file
# Assign vardepvalue, because otherwise signature is changed before and after
@@ -49,9 +46,6 @@ def get_perl_arch(d):
return m.group(1)
return None
-PERLARCH := "${@get_perl_arch(d)}"
-PERLARCH[vardepvalue] = ""
-
# Determine the staged arch of perl-native from the perl configuration file
# Assign vardepvalue, because otherwise signature is changed before and after
# perl is built (from None to real version in config.sh).
diff --git a/meta/classes-recipe/populate_sdk_base.bbclass b/meta/classes-recipe/populate_sdk_base.bbclass
index 0be108ad98..81896d808f 100644
--- a/meta/classes-recipe/populate_sdk_base.bbclass
+++ b/meta/classes-recipe/populate_sdk_base.bbclass
@@ -15,7 +15,7 @@ COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
-COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
+COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest ${MLPREFIX}ptest-runner'
COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
def complementary_globs(featurevar, d):
@@ -37,7 +37,7 @@ SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
# List of locales to install, or "all" for all of them, or unset for none.
SDKIMAGE_LINGUAS ?= "all"
-inherit rootfs_${IMAGE_PKGTYPE}
+inherit_defer rootfs_${IMAGE_PKGTYPE}
SDK_DIR = "${WORKDIR}/sdk"
SDK_OUTPUT = "${SDK_DIR}/image"
@@ -74,6 +74,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_ARCHIVE_TYPE ?= "tar.xz"
SDK_XZ_COMPRESSION_LEVEL ?= "-9"
SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+SDK_ZIP_OPTIONS ?= "-y"
+
# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
python () {
@@ -81,7 +83,7 @@ python () {
d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
# SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
# recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
else:
d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
@@ -150,16 +152,17 @@ python write_host_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data ; "
-POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest; sdk_prune_dirs; "
-POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest; "
+POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data"
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest sdk_prune_dirs"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest"
-SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
-SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
+SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC}' if '${SDK_PACKAGING_FUNC}' else ''}"
+SDK_POSTPROCESS_COMMAND = "create_sdk_files check_sdk_sysroots archive_sdk ${SDK_PACKAGING_COMMAND}"
def populate_sdk_common(d):
from oe.sdk import populate_sdk
from oe.manifest import create_manifest, Manifest
+ import oe.packagedata
# Handle package exclusions
excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
@@ -182,13 +185,13 @@ def populate_sdk_common(d):
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
pn = d.getVar('PN')
- runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
- runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
+ oe.packagedata.runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
+ oe.packagedata.runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
ld = bb.data.createCopy(d)
ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
- runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
- runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
+ oe.packagedata.runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
+ oe.packagedata.runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
@@ -205,7 +208,7 @@ fakeroot python do_populate_sdk() {
}
SSTATETASKS += "do_populate_sdk"
SSTATE_SKIP_CREATION:task-populate-sdk = '1'
-do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
+do_populate_sdk[cleandirs] += "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
@@ -282,7 +285,7 @@ python check_sdk_sysroots() {
dir_walk(SCAN_ROOT)
}
-SDKTAROPTS = "--owner=root --group=root"
+SDKTAROPTS = "--owner=root --group=root --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
fakeroot archive_sdk() {
# Package it up
@@ -369,8 +372,7 @@ do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
python () {
variables = sdk_command_variables(d)
for var in variables:
- if d.getVar(var, False):
- d.setVarFlag(var, 'func', '1')
+ d.setVarFlag(var, 'vardeps', d.getVar(var))
}
do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
diff --git a/meta/classes-recipe/populate_sdk_ext.bbclass b/meta/classes-recipe/populate_sdk_ext.bbclass
index 56e24c4eed..09d5e2aeb6 100644
--- a/meta/classes-recipe/populate_sdk_ext.bbclass
+++ b/meta/classes-recipe/populate_sdk_ext.bbclass
@@ -120,7 +120,7 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = " write_target_sdk_ext_manifest write_host_sdk_ext_manifest"
SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
@@ -186,12 +186,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
if os.path.exists(localconf + '.bak'):
os.replace(localconf + '.bak', localconf)
-python copy_buildsystem () {
- import re
- import shutil
- import glob
- import oe.copy_buildsystem
-
+def copy_bitbake_and_layers(d, baseoutpath, derivative):
oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
conf_bbpath = ''
@@ -200,13 +195,7 @@ python copy_buildsystem () {
# Copy in all metadata layers + bitbake (as repositories)
buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
- baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
- #check if custome templateconf path is set
- use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
-
- # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
- derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
if derivative:
workspace_name = 'orig-workspace'
else:
@@ -220,20 +209,23 @@ python copy_buildsystem () {
if os.path.exists(os.path.join(baseoutpath, relpath)):
conf_initpath = relpath
- relpath = os.path.join('layers', path, 'scripts', 'devtool')
+ relpath = os.path.join('layers', path, 'scripts', 'esdk-tools', 'devtool')
if os.path.exists(os.path.join(baseoutpath, relpath)):
- scriptrelpath = os.path.dirname(relpath)
+ esdk_tools_path = os.path.dirname(relpath)
relpath = os.path.join('layers', path, 'meta')
if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
core_meta_subdir = relpath
d.setVar('oe_init_build_env_path', conf_initpath)
- d.setVar('scriptrelpath', scriptrelpath)
+ d.setVar('esdk_tools_path', esdk_tools_path)
+ return (conf_initpath, conf_bbpath, core_meta_subdir, sdkbblayers)
+
+def write_devtool_config(d, baseoutpath, conf_bbpath, conf_initpath, core_meta_subdir):
# Write out config file for devtool
import configparser
- config = configparser.SafeConfigParser()
+ config = configparser.ConfigParser()
config.add_section('General')
config.set('General', 'bitbake_subdir', conf_bbpath)
config.set('General', 'init_path', conf_initpath)
@@ -247,15 +239,17 @@ python copy_buildsystem () {
with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
config.write(f)
+def write_unlocked_sigs(d, baseoutpath):
unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
with open(unlockedsigs, 'w') as f:
pass
+def write_bblayers_conf(d, baseoutpath, sdkbblayers):
# Create a layer for new recipes / appends
bbpath = d.getVar('BBPATH')
env = os.environ.copy()
env['PYTHONDONTWRITEBYTECODE'] = '1'
- bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
+ bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--layerseries', d.getVar("LAYERSERIES_CORENAMES"), '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
# Create bblayers.conf
bb.utils.mkdirhier(baseoutpath + '/conf')
@@ -279,6 +273,11 @@ python copy_buildsystem () {
f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
f.write(' "\n')
+def copy_uninative(d, baseoutpath):
+ import shutil
+
+ uninative_checksum = None
+
# Copy uninative tarball
# For now this is where uninative.bbclass expects the tarball
if bb.data.inherits_class('uninative', d):
@@ -288,6 +287,12 @@ python copy_buildsystem () {
bb.utils.mkdirhier(uninative_outdir)
shutil.copy(uninative_file, uninative_outdir)
+ return uninative_checksum
+
+def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_checksum):
+ #check if custome templateconf path is set
+ use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
+
env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split()
env_passthrough_values = {}
@@ -369,7 +374,8 @@ python copy_buildsystem () {
f.write('BUILDCFG_HEADER = ""\n\n')
# Write METADATA_REVISION
- f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+ # Needs distro override so it can override the value set in the bbclass code (later than local.conf)
+ f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION')))
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
@@ -438,7 +444,8 @@ python copy_buildsystem () {
else:
# Write a templateconf.cfg
with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
- f.write('meta/conf\n')
+ f.write('meta/conf/templates/default\n')
+ os.makedirs(os.path.join(baseoutpath, core_meta_subdir, 'conf/templates/default'), exist_ok=True)
# Ensure any variables set from the external environment (by way of
# BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration
@@ -455,6 +462,9 @@ python copy_buildsystem () {
f.write(line)
f.write('\n')
+def prepare_locked_cache(d, baseoutpath, derivative, conf_initpath):
+ import shutil
+
# Filter the locked signatures file to just the sstate tasks we are interested in
excluded_targets = get_sdk_install_targets(d, images_only=True)
sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
@@ -486,7 +496,7 @@ python copy_buildsystem () {
bb.utils.remove(sstate_out, True)
# uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
- fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
+ fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d) if bb.data.inherits_class('uninative', d) else ""
sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
sdk_ext_type = d.getVar('SDK_EXT_TYPE')
@@ -497,7 +507,6 @@ python copy_buildsystem () {
else:
tasklistfn = None
-
cachedir = os.path.join(baseoutpath, 'cache')
bb.utils.mkdirhier(cachedir)
bb.parse.siggen.copy_unitaskhashes(cachedir)
@@ -559,6 +568,9 @@ python copy_buildsystem () {
f = os.path.join(root, name)
os.remove(f)
+def write_manifest(d, baseoutpath):
+ import glob
+
# Write manifest file
# Note: at the moment we cannot include the env setup script here to keep
# it updated, since it gets modified during SDK installation (see
@@ -582,6 +594,32 @@ python copy_buildsystem () {
continue
chksum = bb.utils.sha256_file(fn)
f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
+
+
+python copy_buildsystem () {
+ import oe.copy_buildsystem
+
+ baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
+
+ # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
+ derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
+
+ conf_initpath, conf_bbpath, core_meta_subdir, sdkbblayers = copy_bitbake_and_layers(d, baseoutpath, derivative)
+
+ write_devtool_config(d, baseoutpath, conf_bbpath, conf_initpath, core_meta_subdir)
+
+ write_unlocked_sigs(d, baseoutpath)
+
+ write_bblayers_conf(d, baseoutpath, sdkbblayers)
+
+ uninative_checksum = copy_uninative(d, baseoutpath)
+
+ write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_checksum)
+
+ prepare_locked_cache(d, baseoutpath, derivative, conf_initpath)
+
+ write_manifest(d, baseoutpath)
+
}
def get_current_buildtools(d):
@@ -626,21 +664,6 @@ def get_sdk_required_utilities(buildtools_fn, d):
return ' '.join(sanity_required_utilities)
install_tools() {
- install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
- scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
- for script in $scripts; do
- for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
- targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
- test -e ${targetscriptfn} || ln -rs ${scriptfn} ${targetscriptfn}
- done
- done
- # We can't use the same method as above because files in the sysroot won't exist at this point
- # (they get populated from sstate on installation)
- unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
- if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
- binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
- ln -rs ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
- fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
# find latest buildtools-tarball and install it
@@ -719,7 +742,7 @@ sdk_ext_postinst() {
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+ echo "export PATH=\"$target_sdk_dir/${esdk_tools_path}:\$PATH\"" >> $env_setup_script
echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
@@ -732,7 +755,7 @@ sdk_ext_postinst() {
# current working directory when first ran, nor will it set $1 when
# sourcing a script. That is why this has to look so ugly.
LOGFILE="$target_sdk_dir/preparing_build_system.log"
- sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ sh -c ". buildtools/environment-setup* > $LOGFILE 2>&1 && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE 2>&1 && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
fi
if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
rm $target_sdk_dir/ext-sdk-prepare.py
@@ -742,7 +765,7 @@ sdk_ext_postinst() {
SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}"
-SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem install_tools "
SDK_INSTALL_TARGETS = ""
fakeroot python do_populate_sdk_ext() {
diff --git a/meta/classes-recipe/ptest-cargo.bbclass b/meta/classes-recipe/ptest-cargo.bbclass
new file mode 100644
index 0000000000..c46df362bf
--- /dev/null
+++ b/meta/classes-recipe/ptest-cargo.bbclass
@@ -0,0 +1,138 @@
+inherit cargo ptest
+
+RUST_TEST_ARGS ??= ""
+RUST_TEST_ARGS[doc] = "Arguments to give to the test binaries (e.g. --shuffle)"
+
+# I didn't find a cleaner way to share data between compile and install tasks
+CARGO_TEST_BINARIES_FILES ?= "${B}/test_binaries_list"
+
+# Sadly, generated test binaries have no deterministic names (https://github.com/rust-lang/cargo/issues/1924)
+# This forces us to parse the cargo output in json format to find those test binaries.
+python do_compile_ptest_cargo() {
+ import subprocess
+ import json
+
+ cargo = bb.utils.which(d.getVar("PATH"), d.getVar("CARGO", True))
+ cargo_build_flags = d.getVar("CARGO_BUILD_FLAGS", True)
+ rust_flags = d.getVar("RUSTFLAGS", True)
+ manifest_path = d.getVar("CARGO_MANIFEST_PATH", True)
+ project_manifest_path = os.path.normpath(manifest_path)
+ manifest_dir = os.path.dirname(manifest_path)
+
+ env = os.environ.copy()
+ env['RUSTFLAGS'] = rust_flags
+ cmd = f"{cargo} build --tests --message-format json {cargo_build_flags}"
+ bb.note(f"Building tests with cargo ({cmd})")
+
+ try:
+ proc = subprocess.Popen(cmd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal(f"Cannot build test with cargo: {e}")
+
+ lines = []
+ for line in proc.stdout:
+ data = line.strip('\n')
+ lines.append(data)
+ bb.note(data)
+ proc.communicate()
+ if proc.returncode != 0:
+ bb.fatal(f"Unable to compile test with cargo, '{cmd}' failed")
+
+ # Definition of the format: https://doc.rust-lang.org/cargo/reference/external-tools.html#json-messages
+ test_bins = []
+ for line in lines:
+ try:
+ data = json.loads(line)
+ except json.JSONDecodeError:
+ # skip lines that are not a json
+ pass
+ else:
+ try:
+ # Filter the test packages coming from the current project:
+ # - test binaries from the root manifest
+ # - test binaries from sub manifest of the current project if any
+ current_manifest_path = os.path.normpath(data['manifest_path'])
+ common_path = os.path.commonpath([current_manifest_path, project_manifest_path])
+ if common_path in [manifest_dir, current_manifest_path]:
+ if (data['target']['test'] or data['target']['doctest']) and data['executable']:
+ test_bins.append(data['executable'])
+ except (KeyError, ValueError) as e:
+ # skip lines that do not meet the requirements
+ pass
+
+ # All rust project will generate at least one unit test binary
+ # It will just run a test suite with 0 tests, if the project didn't define some
+ # So it is not expected to have an empty list here
+ if not test_bins:
+ bb.fatal("Unable to find any test binaries")
+
+ cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES', True)
+ bb.note(f"Found {len(test_bins)} tests, write their paths into {cargo_test_binaries_file}")
+ with open(cargo_test_binaries_file, "w") as f:
+ for test_bin in test_bins:
+ f.write(f"{test_bin}\n")
+
+}
+
+python do_install_ptest_cargo() {
+ import shutil
+
+ dest_dir = d.getVar("D", True)
+ pn = d.getVar("PN", True)
+ ptest_path = d.getVar("PTEST_PATH", True)
+ cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES', True)
+ rust_test_args = d.getVar('RUST_TEST_ARGS') or ""
+
+ ptest_dir = os.path.join(dest_dir, ptest_path.lstrip('/'))
+ os.makedirs(ptest_dir, exist_ok=True)
+
+ test_bins = []
+ with open(cargo_test_binaries_file, "r") as f:
+ for line in f.readlines():
+ test_bins.append(line.strip('\n'))
+
+ test_paths = []
+ for test_bin in test_bins:
+ shutil.copy2(test_bin, ptest_dir)
+ test_paths.append(os.path.join(ptest_path, os.path.basename(test_bin)))
+
+ ptest_script = os.path.join(ptest_dir, "run-ptest")
+ if os.path.exists(ptest_script):
+ with open(ptest_script, "a") as f:
+ f.write(f"\necho \"\"\n")
+ f.write(f"echo \"## starting to run rust tests ##\"\n")
+ for test_path in test_paths:
+ f.write(f"{test_path} {rust_test_args}\n")
+ else:
+ with open(ptest_script, "a") as f:
+ f.write("#!/bin/sh\n")
+ for test_path in test_paths:
+ f.write(f"{test_path} {rust_test_args}\n")
+ os.chmod(ptest_script, 0o755)
+
+ # this is chown -R root:root ${D}${PTEST_PATH}
+ for root, dirs, files in os.walk(ptest_dir):
+ for d in dirs:
+ shutil.chown(os.path.join(root, d), "root", "root")
+ for f in files:
+ shutil.chown(os.path.join(root, f), "root", "root")
+}
+
+do_install_ptest_cargo[dirs] = "${B}"
+do_install_ptest_cargo[doc] = "Create or update the run-ptest script with rust test binaries generated"
+do_compile_ptest_cargo[dirs] = "${B}"
+do_compile_ptest_cargo[doc] = "Generate rust test binaries through cargo"
+
+addtask compile_ptest_cargo after do_compile before do_compile_ptest_base
+addtask install_ptest_cargo after do_install_ptest_base before do_package
+
+python () {
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.setVarFlag('do_install_ptest_cargo', 'fakeroot', '1')
+ d.setVarFlag('do_install_ptest_cargo', 'umask', '022')
+
+ # Remove all '*ptest_cargo' tasks when ptest is not enabled
+ if not(d.getVar('PTEST_ENABLED') == "1"):
+ for i in ['do_compile_ptest_cargo', 'do_install_ptest_cargo']:
+ bb.build.deltask(i, d)
+}
diff --git a/meta/classes-recipe/ptest-perl.bbclass b/meta/classes-recipe/ptest-perl.bbclass
index c283fdd1fc..a4a9d40d52 100644
--- a/meta/classes-recipe/ptest-perl.bbclass
+++ b/meta/classes-recipe/ptest-perl.bbclass
@@ -13,7 +13,7 @@ SRC_URI += "file://ptest-perl/run-ptest"
do_install_ptest_perl() {
install -d ${D}${PTEST_PATH}
if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
- install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
+ install -m 0755 ${UNPACKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
fi
cp -r ${B}/t ${D}${PTEST_PATH}
chown -R root:root ${D}${PTEST_PATH}
diff --git a/meta/classes-recipe/ptest.bbclass b/meta/classes-recipe/ptest.bbclass
index 0383206a6d..0941572f8f 100644
--- a/meta/classes-recipe/ptest.bbclass
+++ b/meta/classes-recipe/ptest.bbclass
@@ -53,12 +53,12 @@ do_install_ptest() {
}
do_install_ptest_base() {
- if [ -f ${WORKDIR}/run-ptest ]; then
- install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
- fi
- if grep -q install-ptest: Makefile; then
- oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
+ if [ -f ${UNPACKDIR}/run-ptest ]; then
+ install -D ${UNPACKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
fi
+
+ grep -q install-ptest: Makefile 2>/dev/null && oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
+
do_install_ptest
chown -R root:root ${D}${PTEST_PATH}
@@ -138,5 +138,5 @@ def package_qa_check_missing_ptest(pn, d, messages):
return
enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
- if (pn + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
+ if pn.replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d)
diff --git a/meta/classes-recipe/pypi.bbclass b/meta/classes-recipe/pypi.bbclass
index aab04c638f..c6bbe8119a 100644
--- a/meta/classes-recipe/pypi.bbclass
+++ b/meta/classes-recipe/pypi.bbclass
@@ -12,14 +12,21 @@ def pypi_package(d):
return bpn[8:]
return bpn
+# The PyPi package name (defaults to PN without the python3- prefix)
PYPI_PACKAGE ?= "${@pypi_package(d)}"
+# The file extension of the source archive
PYPI_PACKAGE_EXT ?= "tar.gz"
-PYPI_ARCHIVE_NAME ?= "${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}"
+# An optional prefix for the download file in the case of name collisions
+PYPI_ARCHIVE_NAME_PREFIX ?= ""
def pypi_src_uri(d):
+ """
+ Construct a source URL as per https://warehouse.pypa.io/api-reference/integration-guide.html#predictable-urls.
+ """
package = d.getVar('PYPI_PACKAGE')
- archive_name = d.getVar('PYPI_ARCHIVE_NAME')
- return 'https://files.pythonhosted.org/packages/source/%s/%s/%s' % (package[0], package, archive_name)
+ archive_name = d.expand('${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}')
+ archive_downloadname = d.getVar('PYPI_ARCHIVE_NAME_PREFIX') + archive_name
+ return 'https://files.pythonhosted.org/packages/source/%s/%s/%s;downloadfilename=%s' % (package[0], package, archive_name, archive_downloadname)
PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
@@ -28,7 +35,9 @@ SECTION = "devel/python"
SRC_URI:prepend = "${PYPI_SRC_URI} "
S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
-UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
-UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
+# Replace any '_' characters in the pypi URI with '-'s to follow the PyPi website naming conventions
+UPSTREAM_CHECK_PYPI_PACKAGE ?= "${@d.getVar('PYPI_PACKAGE').replace('_', '-')}"
+UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${UPSTREAM_CHECK_PYPI_PACKAGE}/"
+UPSTREAM_CHECK_REGEX ?= "/${UPSTREAM_CHECK_PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes-recipe/python3-dir.bbclass b/meta/classes-recipe/python3-dir.bbclass
index 912c67253c..3d07de99b8 100644
--- a/meta/classes-recipe/python3-dir.bbclass
+++ b/meta/classes-recipe/python3-dir.bbclass
@@ -4,7 +4,7 @@
# SPDX-License-Identifier: MIT
#
-PYTHON_BASEVERSION = "3.10"
+PYTHON_BASEVERSION = "3.12"
PYTHON_ABI = ""
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_PN = "python3"
diff --git a/meta/classes-recipe/python3targetconfig.bbclass b/meta/classes-recipe/python3targetconfig.bbclass
index 3f89e5e09e..08bc619398 100644
--- a/meta/classes-recipe/python3targetconfig.bbclass
+++ b/meta/classes-recipe/python3targetconfig.bbclass
@@ -10,26 +10,32 @@ EXTRA_PYTHON_DEPENDS ?= ""
EXTRA_PYTHON_DEPENDS:class-target = "python3"
DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}"
-do_configure:prepend:class-target() {
+setup_target_config() {
export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+ export PYTHONPATH=${STAGING_LIBDIR}/python-sysconfigdata:$PYTHONPATH
+ export PATH=${STAGING_EXECPREFIXDIR}/python-target-config/:$PATH
+}
+
+do_configure:prepend:class-target() {
+ setup_target_config
}
do_compile:prepend:class-target() {
- export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+ setup_target_config
}
do_install:prepend:class-target() {
- export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+ setup_target_config
}
do_configure:prepend:class-nativesdk() {
- export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+ setup_target_config
}
do_compile:prepend:class-nativesdk() {
- export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+ setup_target_config
}
do_install:prepend:class-nativesdk() {
- export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+ setup_target_config
}
diff --git a/meta/classes-recipe/python_hatchling.bbclass b/meta/classes-recipe/python_hatchling.bbclass
index b9e6582eb5..b5a3c3feea 100644
--- a/meta/classes-recipe/python_hatchling.bbclass
+++ b/meta/classes-recipe/python_hatchling.bbclass
@@ -7,3 +7,21 @@
inherit python_pep517 python3native python3-dir setuptools3-base
DEPENDS += "python3-hatchling-native"
+
+# delete nested, empty directories from the python site-packages path. Make
+# sure that we remove the native ones for target builds as well
+hatchling_rm_emptydirs:class-target () {
+ find ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/* -depth -type d -empty -delete
+ find ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/* -depth -type d -empty -delete
+}
+
+hatchling_rm_emptydirs:class-native () {
+ find ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/* -depth -type d -empty -delete
+}
+
+# Define a default empty version of hatchling_rm_emptydirs to appease bitbake
+hatchling_rm_emptydirs () {
+ :
+}
+
+do_prepare_recipe_sysroot[postfuncs] += " hatchling_rm_emptydirs"
diff --git a/meta/classes-recipe/python_maturin.bbclass b/meta/classes-recipe/python_maturin.bbclass
new file mode 100644
index 0000000000..c39d6c6e37
--- /dev/null
+++ b/meta/classes-recipe/python_maturin.bbclass
@@ -0,0 +1,17 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+inherit python_pyo3 python_setuptools_build_meta
+
+DEPENDS += "python3-maturin-native"
+
+python_maturin_do_configure() {
+ python_pyo3_do_configure
+ cargo_common_do_configure
+ python_pep517_do_configure
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes-recipe/python_mesonpy.bbclass b/meta/classes-recipe/python_mesonpy.bbclass
new file mode 100644
index 0000000000..131fa74bed
--- /dev/null
+++ b/meta/classes-recipe/python_mesonpy.bbclass
@@ -0,0 +1,52 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+inherit meson setuptools3-base python3targetconfig python_pep517
+
+# meson_do_qa_configure does the wrong thing here because
+# mesonpy runs "meson setup ..." in do_compile context.
+# Make it a dummy function.
+meson_do_qa_configure () {
+ :
+}
+
+# This prevents the meson error:
+# ERROR: Got argument buildtype as both -Dbuildtype and --buildtype. Pick one.
+MESONOPTS:remove = "--buildtype ${MESON_BUILDTYPE}"
+
+CONFIGURE_FILES = "pyproject.toml"
+
+DEPENDS += "python3-wheel-native python3-meson-python-native"
+
+def mesonpy_get_args(d):
+ vars = ['MESONOPTS', 'MESON_CROSS_FILE', 'EXTRA_OEMESON']
+ varlist = []
+ for var in vars:
+ value = d.getVar(var)
+ vallist = value.split()
+ for elem in vallist:
+ varlist.append("-Csetup-args=" + elem)
+ return ' '.join(varlist)
+
+PEP517_BUILD_OPTS = "-Cbuilddir='${B}' ${@mesonpy_get_args(d)}"
+
+# Python pyx -> c -> so build leaves absolute build paths in the code
+INSANE_SKIP:${PN} += "buildpaths"
+INSANE_SKIP:${PN}-src += "buildpaths"
+
+python_mesonpy_do_configure () {
+ python_pep517_do_configure
+}
+
+python_mesonpy_do_compile () {
+ python_pep517_do_compile
+}
+
+python_mesonpy_do_install () {
+ python_pep517_do_install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/python_pep517.bbclass b/meta/classes-recipe/python_pep517.bbclass
index 202dde0bc3..c30674c8ec 100644
--- a/meta/classes-recipe/python_pep517.bbclass
+++ b/meta/classes-recipe/python_pep517.bbclass
@@ -10,7 +10,7 @@
# This class will build a wheel in do_compile, and use pypa/installer to install
# it in do_install.
-DEPENDS:append = " python3-picobuild-native python3-installer-native"
+DEPENDS:append = " python3-build-native python3-installer-native"
# Where to execute the build process from
PEP517_SOURCE_PATH ?= "${S}"
@@ -18,7 +18,8 @@ PEP517_SOURCE_PATH ?= "${S}"
# The directory where wheels will be written
PEP517_WHEEL_PATH ?= "${WORKDIR}/dist"
-PEP517_PICOBUILD_OPTS ?= ""
+# Other options to pass to build
+PEP517_BUILD_OPTS ?= ""
# The interpreter to use for installed scripts
PEP517_INSTALL_PYTHON = "python3"
@@ -36,12 +37,12 @@ python_pep517_do_configure () {
# When we have Python 3.11 we can parse pyproject.toml to determine the build
# API entry point directly
python_pep517_do_compile () {
- nativepython3 -m picobuild --source ${PEP517_SOURCE_PATH} --dest ${PEP517_WHEEL_PATH} --wheel ${PEP517_PICOBUILD_OPTS}
+ nativepython3 -m build --no-isolation --wheel --outdir ${PEP517_WHEEL_PATH} ${PEP517_SOURCE_PATH} ${PEP517_BUILD_OPTS}
}
do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
python_pep517_do_install () {
- COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' | wc -l)
+ COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' -maxdepth 1 | wc -l)
if test $COUNT -eq 0; then
bbfatal No wheels found in ${PEP517_WHEEL_PATH}
elif test $COUNT -gt 1; then
diff --git a/meta/classes-recipe/python_setuptools3_rust.bbclass b/meta/classes-recipe/python_setuptools3_rust.bbclass
index d6ce2edb96..d3d7590cbe 100644
--- a/meta/classes-recipe/python_setuptools3_rust.bbclass
+++ b/meta/classes-recipe/python_setuptools3_rust.bbclass
@@ -4,14 +4,14 @@
# SPDX-License-Identifier: MIT
#
-inherit python_pyo3 setuptools3
+inherit python_pyo3 python_setuptools_build_meta
DEPENDS += "python3-setuptools-rust-native"
python_setuptools3_rust_do_configure() {
python_pyo3_do_configure
cargo_common_do_configure
- setuptools3_do_configure
+ python_pep517_do_configure
}
EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes-recipe/qemu.bbclass b/meta/classes-recipe/qemu.bbclass
index 874b15127c..dbb5ee0b66 100644
--- a/meta/classes-recipe/qemu.bbclass
+++ b/meta/classes-recipe/qemu.bbclass
@@ -34,7 +34,7 @@ def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
if qemu_binary == "qemu-allarch":
qemu_binary = "qemuwrapper"
- qemu_options = data.getVar("QEMU_OPTIONS")
+ qemu_options = data.getVar("QEMU_OPTIONS") or ""
return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+ " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
diff --git a/meta/classes-recipe/qemuboot.bbclass b/meta/classes-recipe/qemuboot.bbclass
index 018c000ca2..895fd38d68 100644
--- a/meta/classes-recipe/qemuboot.bbclass
+++ b/meta/classes-recipe/qemuboot.bbclass
@@ -13,6 +13,7 @@
# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1.
#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
#
@@ -61,8 +62,8 @@
# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
#
# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
-# ip= kernel comand line argument needs to be changed accordingly. Details are documented
-# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
+# ip= kernel command line argument needs to be changed accordingly. Details are documented
+# in the kernel documentation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
# Example to configure only the first interface: "ip=eth0:dhcp"
# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
@@ -85,6 +86,8 @@
# without the need to specify a dedicated qemu configuration
#
# QB_GRAPHICS: QEMU video card type (e.g. "-vga std")
+# QB_NFSROOTFS_EXTRA_OPT: extra options to be appended to the nfs rootfs options in kernel boot arg, e.g.,
+# "wsize=4096,rsize=4096"
#
# Usage:
# IMAGE_CLASSES += "qemuboot"
@@ -93,15 +96,28 @@
QB_MEM ?= "-m 256"
QB_SMP ?= ""
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
QB_OPT_APPEND ?= ""
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
+
+# qemurunner needs ip information first, so append QB_NO_PNI
+#
+QB_NO_PNI ?= "${@bb.utils.contains('DISTRO_FEATURES', 'pni-names', '', 'net.ifnames=0', d)}"
QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
-QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
+QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8 ${QB_NO_PNI}"
+
QB_ROOTFS_EXTRA_OPT ?= ""
QB_GRAPHICS ?= ""
+QB_NFSROOTFS_EXTRA_OPT ?= ""
+
+# With 6.5+ (specifically, if DMA_BOUNCE_UNALIGNED_KMALLOC is set) the SW IO TLB
+# is used, and it defaults to 64MB. This is too much when there's only 256MB of
+# RAM, so request 0 slabs and lets the kernel round up to the appropriate minimum
+# (1MB, typically). In virtual hardware there's very little need for these bounce
+# buffers, so the 64MB would be mostly wasted.
+QB_KERNEL_CMDLINE_APPEND:append = " swiotlb=0"
# This should be kept align with ROOT_VM
QB_DRIVE_TYPE ?= "/dev/sd"
@@ -139,7 +155,7 @@ python do_write_qemuboot_conf() {
# contains all tools required by runqemu
if k == 'STAGING_BINDIR_NATIVE':
val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
- 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
+ 'qemu-helper-native/1.0/recipe-sysroot-native/usr/bin/')
else:
val = d.getVar(k)
if val is None:
@@ -169,3 +185,5 @@ python do_write_qemuboot_conf() {
os.remove(qemuboot_link)
os.symlink(os.path.basename(qemuboot), qemuboot_link)
}
+
+EXTRA_IMAGEDEPENDS += "qemu-system-native qemu-helper-native:do_addto_recipe_sysroot"
diff --git a/meta/classes-recipe/rootfs-postcommands.bbclass b/meta/classes-recipe/rootfs-postcommands.bbclass
index bf1e992bb2..920da94ba2 100644
--- a/meta/classes-recipe/rootfs-postcommands.bbclass
+++ b/meta/classes-recipe/rootfs-postcommands.bbclass
@@ -5,25 +5,25 @@
#
# Zap the root password if debug-tweaks and empty-root-password features are not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password ", "",d)}'
# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login ", "",d)}'
# Autologin the root user on the serial console, if empty-root-password and serial-autologin-root are active
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", [ 'empty-root-password', 'serial-autologin-root' ], "serial_autologin_root; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", [ 'empty-root-password', 'serial-autologin-root' ], "serial_autologin_root ", "",d)}'
# Enable postinst logging if debug-tweaks or post-install-logging is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp "
-# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
+# Tweak files in /etc if read-only-rootfs is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook ", "",d)}'
# We also need to do the same for the kernel boot parameters,
# otherwise kernel or initramfs end up mounting the rootfs read/write
@@ -34,20 +34,20 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data "
# Write manifest
-IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
-ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
+IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.manifest"
+ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest"
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
# Set default target for systemd images
SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}'
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target systemd_sysusers_check", "", d)}'
-ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile'
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check; overlayfs_postprocess;", "", d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check overlayfs_postprocess", "", d)}'
inherit image-artifact-names
@@ -55,43 +55,122 @@ inherit image-artifact-names
# deterministic. Package installs are not deterministic, causing the ordering
# of entries to change between builds. In case that this isn't desired,
# the command can be overridden.
+SORT_PASSWD_POSTPROCESS_COMMAND ??= "tidy_shadowutils_files"
+ROOTFS_POSTPROCESS_COMMAND += '${SORT_PASSWD_POSTPROCESS_COMMAND}'
+
#
# Note that useradd-staticids.bbclass has to be used to ensure that
# the numeric IDs of dynamically created entries remain stable.
#
-# We want this to run as late as possible, in particular after
-# systemd_sysusers_create and set_user_group. Using :append is not
-# enough for that, set_user_group is added that way and would end
-# up running after us.
-SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
-python () {
- d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
- d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
-}
-
-systemd_create_users () {
- for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/*.conf; do
- [ -e $conffile ] || continue
- grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
- if [ "$type" = "u" ]; then
- useradd_params="--shell /sbin/nologin"
- [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
- [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
- useradd_params="$useradd_params --system $name"
- eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
- elif [ "$type" = "g" ]; then
- groupadd_params=""
- [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
- groupadd_params="$groupadd_params --system $name"
- eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
- elif [ "$type" = "m" ]; then
- group=$id
- eval groupadd --root ${IMAGE_ROOTFS} --system $group || true
- eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name --no-user-group || true
- eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
- fi
- done
- done
+ROOTFS_POSTPROCESS_COMMAND += 'rootfs_reproducible'
+
+# Resolve the ID as described in the sysusers.d(5) manual: ID can be a numeric
+# uid, a couple uid:gid or uid:groupname or it is '-' meaning leaving it
+# automatic or it can be a path. In the latter, the uid/gid matches the
+# user/group owner of that file.
+def resolve_sysusers_id(d, sid):
+ # If the id is a path, the uid/gid matchs to the target's uid/gid in the
+ # rootfs.
+ if '/' in sid:
+ try:
+ osstat = os.stat(os.path.join(d.getVar('IMAGE_ROOTFS'), sid))
+ except FileNotFoundError:
+ bb.error('sysusers.d: file %s is required but it does not exist in the rootfs', sid)
+ return ('-', '-')
+ return (osstat.st_uid, osstat.st_gid)
+ # Else it is a uid:gid or uid:groupname syntax
+ if ':' in sid:
+ return sid.split(':')
+ else:
+ return (sid, '-')
+
+# Check a user exists in the rootfs password file and return its properties
+def check_user_exists(d, uname=None, uid=None):
+ with open(os.path.join(d.getVar('IMAGE_ROOTFS'), 'etc/passwd'), 'r') as pwfile:
+ for line in pwfile:
+ (name, _, u_id, gid, comment, homedir, ushell) = line.strip().split(':')
+ if uname == name or uid == u_id:
+ return (name, u_id, gid, comment or '-', homedir or '/', ushell or '-')
+ return None
+
+# Check a group exists in the rootfs group file and return its properties
+def check_group_exists(d, gname=None, gid=None):
+ with open(os.path.join(d.getVar('IMAGE_ROOTFS'), 'etc/group'), 'r') as gfile:
+ for line in gfile:
+ (name, _, g_id, _) = line.strip().split(':')
+ if name == gname or g_id == gid:
+ return (name, g_id)
+ return None
+
+def compare_users(user, e_user):
+ # user and e_user must not have None values. Unset values must be '-'.
+ (name, uid, gid, comment, homedir, ushell) = user
+ (e_name, e_uid, e_gid, e_comment, e_homedir, e_ushell) = e_user
+ # Ignore 'uid', 'gid' or 'homedir' if they are not set
+ # Ignore 'shell' and 'ushell' if one is not set
+ return name == e_name \
+ and (uid == '-' or uid == e_uid) \
+ and (gid == '-' or gid == e_gid) \
+ and (homedir == '-' or e_homedir == '-' or homedir == e_homedir) \
+ and (ushell == '-' or e_ushell == '-' or ushell == e_ushell)
+
+# Open sysusers.d configuration files and parse each line to check the users and
+# groups are already defined in /etc/passwd and /etc/groups with similar
+# properties. Refer to the sysusers.d(5) manual for its syntax.
+python systemd_sysusers_check() {
+ import glob
+ import re
+
+ pattern_comment = r'(-|\"[^:\"]+\")'
+ pattern_word = r'[^\s]+'
+ pattern_line = r'(' + pattern_word + r')\s+(' + pattern_word + r')\s+(' + pattern_word + r')(\s+' \
+ + pattern_comment + r')?' + r'(\s+(' + pattern_word + r'))?' + r'(\s+(' + pattern_word + r'))?'
+
+ for conffile in glob.glob(os.path.join(d.getVar('IMAGE_ROOTFS'), 'usr/lib/sysusers.d/*.conf')):
+ with open(conffile, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if not len(line) or line[0] == '#': continue
+ ret = re.fullmatch(pattern_line, line.strip())
+ if not ret: continue
+ (stype, sname, sid, _, scomment, _, shomedir, _, sshell) = ret.groups()
+ if stype == 'u':
+ if sid:
+ (suid, sgid) = resolve_sysusers_id(d, sid)
+ if sgid.isalpha():
+ sgid = check_group_exists(d, gname=sgid)
+ elif sgid.isdigit():
+ check_group_exists(d, gid=sgid)
+ else:
+ sgid = '-'
+ else:
+ suid = '-'
+ sgid = '-'
+ scomment = scomment.replace('"', '') if scomment else '-'
+ shomedir = shomedir or '-'
+ sshell = sshell or '-'
+ e_user = check_user_exists(d, uname=sname)
+ if not e_user:
+ bb.warn('User %s has never been defined' % sname)
+ elif not compare_users((sname, suid, sgid, scomment, shomedir, sshell), e_user):
+ bb.warn('User %s has been defined as (%s) but sysusers.d expects it as (%s)'
+ % (sname, ', '.join(e_user),
+ ', '.join((sname, suid, sgid, scomment, shomedir, sshell))))
+ elif stype == 'g':
+ gid = sid or '-'
+ if '/' in gid:
+ (_, gid) = resolve_sysusers_id(d, sid)
+ e_group = check_group_exists(d, gname=sname)
+ if not e_group:
+ bb.warn('Group %s has never been defined' % sname)
+ elif gid != '-':
+ (_, e_gid) = e_group
+ if gid != e_gid:
+ bb.warn('Group %s has been defined with id (%s) but sysusers.d expects gid (%s)'
+ % (sname, e_gid, gid))
+ elif stype == 'm':
+ check_user_exists(d, sname)
+ check_group_exists(d, sid)
}
#
@@ -111,20 +190,26 @@ read_only_rootfs_hook () {
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
# and the keys under /var/run/ssh.
- if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
- if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
- else
- echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ # If overlayfs-etc is used this is not done as /etc is treated as writable
+ # If stateless-rootfs is enabled this is always done as we don't want to save keys then
+ if ${@ 'true' if not bb.utils.contains('IMAGE_FEATURES', 'overlayfs-etc', True, False, d) or bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else 'false'}; then
+ if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ else
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ fi
fi
- fi
- # Also tweak the key location for dropbear in the same way.
- if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
- if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
- echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ # Also tweak the key location for dropbear in the same way.
+ if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
+ if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
+ if ! grep -q "^DROPBEAR_RSAKEY_DIR=" ${IMAGE_ROOTFS}/etc/default/dropbear ; then
+ echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ fi
+ fi
fi
fi
@@ -152,10 +237,10 @@ read_only_rootfs_hook () {
#
zap_empty_root_password () {
if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
- sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
+ sed --follow-symlinks -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
fi
if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
- sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
+ sed --follow-symlinks -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
fi
}
@@ -200,6 +285,7 @@ ssh_allow_root_login () {
if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+ sed -i '/^# Disallow root/d' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
fi
fi
}
@@ -221,9 +307,20 @@ serial_autologin_root () {
fi
}
+python tidy_shadowutils_files () {
+ import rootfspostcommands
+ rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
+}
+
python sort_passwd () {
+ """
+ Deprecated in the favour of tidy_shadowutils_files.
+ """
import rootfspostcommands
- rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
+ bb.warn('[sort_passwd] You are using a deprecated function for '
+ 'SORT_PASSWD_POSTPROCESS_COMMAND. The default one is now called '
+ '"tidy_shadowutils_files".')
+ rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
}
#
@@ -269,12 +366,6 @@ remove_init_link () {
fi
}
-make_zimage_symlink_relative () {
- if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
- (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
- fi
-}
-
python write_image_manifest () {
from oe.rootfs import image_list_installed_packages
from oe.utils import format_pkg_list
diff --git a/meta/classes-recipe/rootfs_ipk.bbclass b/meta/classes-recipe/rootfs_ipk.bbclass
index a48ad07dfc..87fff53a58 100644
--- a/meta/classes-recipe/rootfs_ipk.bbclass
+++ b/meta/classes-recipe/rootfs_ipk.bbclass
@@ -29,7 +29,7 @@ OPKG_POSTPROCESS_COMMANDS = ""
OPKGLIBDIR ??= "${localstatedir}/lib"
-MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
+MULTILIBRE_ALLOW_REP += "${OPKGLIBDIR}/opkg /usr/lib/opkg"
python () {
diff --git a/meta/classes-recipe/rootfs_rpm.bbclass b/meta/classes-recipe/rootfs_rpm.bbclass
index 6eccd5a959..55f1cc92ca 100644
--- a/meta/classes-recipe/rootfs_rpm.bbclass
+++ b/meta/classes-recipe/rootfs_rpm.bbclass
@@ -20,11 +20,9 @@ IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf"
# Dnf is python based, so be sure python3-native is available to us.
EXTRANATIVEPATH += "python3-native"
-# opkg is needed for update-alternatives
RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
dnf-native:do_populate_sysroot \
- createrepo-c-native:do_populate_sysroot \
- opkg-native:do_populate_sysroot"
+ createrepo-c-native:do_populate_sysroot"
do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
diff --git a/meta/classes-recipe/rootfsdebugfiles.bbclass b/meta/classes-recipe/rootfsdebugfiles.bbclass
index cbcf876479..4c2fc1de25 100644
--- a/meta/classes-recipe/rootfsdebugfiles.bbclass
+++ b/meta/classes-recipe/rootfsdebugfiles.bbclass
@@ -34,7 +34,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/meta/classes-recipe/rust-bin.bbclass b/meta/classes-recipe/rust-bin.bbclass
deleted file mode 100644
index b8e7ef8191..0000000000
--- a/meta/classes-recipe/rust-bin.bbclass
+++ /dev/null
@@ -1,154 +0,0 @@
-#
-# Copyright OpenEmbedded Contributors
-#
-# SPDX-License-Identifier: MIT
-#
-
-inherit rust
-
-RDEPENDS:${PN}:append:class-target = " ${RUSTLIB_DEP}"
-
-RUSTC_ARCHFLAGS += "-C opt-level=3 -g -L ${STAGING_DIR_HOST}/${rustlibdir} -C linker=${RUST_TARGET_CCLD}"
-EXTRA_OEMAKE += 'RUSTC_ARCHFLAGS="${RUSTC_ARCHFLAGS}"'
-
-# Some libraries alias with the standard library but libstd is configured to
-# make it difficult or imposisble to use its version. Unfortunately libstd
-# must be explicitly overridden using extern.
-OVERLAP_LIBS = "\
- libc \
- log \
- getopts \
- rand \
-"
-def get_overlap_deps(d):
- deps = d.getVar("DEPENDS").split()
- overlap_deps = []
- for o in d.getVar("OVERLAP_LIBS").split():
- l = len([o for dep in deps if (o + '-rs' in dep)])
- if l > 0:
- overlap_deps.append(o)
- return " ".join(overlap_deps)
-OVERLAP_DEPS = "${@get_overlap_deps(d)}"
-
-# Prevents multiple static copies of standard library modules
-# See https://github.com/rust-lang/rust/issues/19680
-RUSTC_PREFER_DYNAMIC = "-C prefer-dynamic"
-RUSTC_FLAGS += "${RUSTC_PREFER_DYNAMIC}"
-
-CRATE_NAME ?= "${@d.getVar('BPN').replace('-rs', '').replace('-', '_')}"
-BINNAME ?= "${BPN}"
-LIBNAME ?= "lib${CRATE_NAME}-rs"
-CRATE_TYPE ?= "dylib"
-BIN_SRC ?= "${S}/src/main.rs"
-LIB_SRC ?= "${S}/src/lib.rs"
-
-rustbindest ?= "${bindir}"
-rustlibdest ?= "${rustlibdir}"
-RUST_RPATH_ABS ?= "${rustlibdir}:${rustlib}"
-
-def relative_rpaths(paths, base):
- relpaths = set()
- for p in paths.split(':'):
- if p == base:
- relpaths.add('$ORIGIN')
- continue
- relpaths.add(os.path.join('$ORIGIN', os.path.relpath(p, base)))
- return '-rpath=' + ':'.join(relpaths) if len(relpaths) else ''
-
-RUST_LIB_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustlibdest', True))}"
-RUST_BIN_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustbindest', True))}"
-
-def libfilename(d):
- if d.getVar('CRATE_TYPE', True) == 'dylib':
- return d.getVar('LIBNAME', True) + '.so'
- else:
- return d.getVar('LIBNAME', True) + '.rlib'
-
-def link_args(d, bin):
- linkargs = []
- if bin:
- rpaths = d.getVar('RUST_BIN_RPATH_FLAGS', False)
- else:
- rpaths = d.getVar('RUST_LIB_RPATH_FLAGS', False)
- if d.getVar('CRATE_TYPE', True) == 'dylib':
- linkargs.append('-soname')
- linkargs.append(libfilename(d))
- if len(rpaths):
- linkargs.append(rpaths)
- if len(linkargs):
- return ' '.join(['-Wl,' + arg for arg in linkargs])
- else:
- return ''
-
-get_overlap_externs () {
- externs=
- for dep in ${OVERLAP_DEPS}; do
- extern=$(ls ${STAGING_DIR_HOST}/${rustlibdir}/lib$dep-rs.{so,rlib} 2>/dev/null \
- | awk '{print $1}');
- if [ -n "$extern" ]; then
- externs="$externs --extern $dep=$extern"
- else
- echo "$dep in depends but no such library found in ${rustlibdir}!" >&2
- exit 1
- fi
- done
- echo "$externs"
-}
-
-do_configure () {
-}
-
-oe_runrustc () {
- bbnote ${RUSTC} ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
- "${RUSTC}" ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
-}
-
-oe_compile_rust_lib () {
- rm -rf ${LIBNAME}.{rlib,so}
- local -a link_args
- if [ -n '${@link_args(d, False)}' ]; then
- link_args[0]='-C'
- link_args[1]='link-args=${@link_args(d, False)}'
- fi
- oe_runrustc $(get_overlap_externs) \
- "${link_args[@]}" \
- ${LIB_SRC} \
- -o ${@libfilename(d)} \
- --crate-name=${CRATE_NAME} --crate-type=${CRATE_TYPE} \
- "$@"
-}
-oe_compile_rust_lib[vardeps] += "get_overlap_externs"
-
-oe_compile_rust_bin () {
- rm -rf ${BINNAME}
- local -a link_args
- if [ -n '${@link_args(d, True)}' ]; then
- link_args[0]='-C'
- link_args[1]='link-args=${@link_args(d, True)}'
- fi
- oe_runrustc $(get_overlap_externs) \
- "${link_args[@]}" \
- ${BIN_SRC} -o ${BINNAME} "$@"
-}
-oe_compile_rust_bin[vardeps] += "get_overlap_externs"
-
-oe_install_rust_lib () {
- for lib in $(ls ${LIBNAME}.{so,rlib} 2>/dev/null); do
- echo Installing $lib
- install -D -m 755 $lib ${D}/${rustlibdest}/$lib
- done
-}
-
-oe_install_rust_bin () {
- echo Installing ${BINNAME}
- install -D -m 755 ${BINNAME} ${D}/${rustbindest}/${BINNAME}
-}
-
-do_rust_bin_fixups() {
- for f in `find ${PKGD} -name '*.so*'`; do
- echo "Strip rust note: $f"
- ${OBJCOPY} -R .note.rustc $f $f
- done
-}
-PACKAGE_PREPROCESS_FUNCS += "do_rust_bin_fixups"
-
diff --git a/meta/classes-recipe/rust-common.bbclass b/meta/classes-recipe/rust-common.bbclass
index 93bf6c8be6..6940093e59 100644
--- a/meta/classes-recipe/rust-common.bbclass
+++ b/meta/classes-recipe/rust-common.bbclass
@@ -14,10 +14,10 @@ FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
FILES:${PN}-dbg += "${rustlibdir}/.debug"
RUSTLIB = "-L ${STAGING_DIR_HOST}${rustlibdir}"
-RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
+RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=${TARGET_DBGSRC_DIR}"
RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
-RUSTLIB_DEP ?= "libstd-rs"
-RUST_PANIC_STRATEGY ?= "unwind"
+RUSTLIB_DEP ??= "libstd-rs"
+RUST_PANIC_STRATEGY ??= "unwind"
def target_is_armv7(d):
'''Determine if target is armv7'''
@@ -53,12 +53,9 @@ def rust_base_triple(d, thing):
else:
arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing)))
- # When bootstrapping rust-native, BUILD must be the same as upstream snapshot tarballs
- bpn = d.getVar('BPN')
- if thing == "BUILD" and bpn in ["rust"]:
- return arch + "-unknown-linux-gnu"
-
- vendor = d.getVar('{}_VENDOR'.format(thing))
+ # Substituting "unknown" when vendor is empty will match rust's standard
+ # targets when building native recipes (including rust-native itself)
+ vendor = d.getVar('{}_VENDOR'.format(thing)) or "-unknown"
# Default to glibc
libc = "-gnu"
@@ -66,9 +63,17 @@ def rust_base_triple(d, thing):
# This catches ARM targets and appends the necessary hard float bits
if os == "linux-gnueabi" or os == "linux-musleabi":
libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d)
+ elif os == "linux-gnux32" or os == "linux-muslx32":
+ libc = ""
elif "musl" in os:
libc = "-musl"
os = "linux"
+ elif "elf" in os:
+ libc = "-elf"
+ os = "none"
+ elif "eabi" in os:
+ libc = "-eabi"
+ os = "none"
return arch + vendor + '-' + os + libc
@@ -94,7 +99,7 @@ RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
# Rust additionally will use two additional cases:
# - undecorated (e.g. CC) - equivalent to TARGET
# - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both
-# see: https://github.com/alexcrichton/gcc-rs
+# see: https://github.com/rust-lang/cc-rs
# The way that Rust's internal triples and Yocto triples are mapped together
# its likely best to not use the triple suffix due to potential confusion.
@@ -125,12 +130,22 @@ create_wrapper_rust () {
shift
extras="$1"
shift
+ crate_cc_extras="$1"
+ shift
cat <<- EOF > "${file}"
#!/usr/bin/env python3
import os, sys
orig_binary = "$@"
extras = "${extras}"
+
+ # Apply a required subset of CC crate compiler flags
+ # when we build a target recipe for a non-bare-metal target.
+ # https://github.com/rust-lang/cc-rs/blob/main/src/lib.rs#L1614
+ if "CRATE_CC_NO_DEFAULTS" in os.environ.keys() and \
+ "TARGET" in os.environ.keys() and not "-none-" in os.environ["TARGET"]:
+ orig_binary += "${crate_cc_extras}"
+
binary = orig_binary.split()[0]
args = orig_binary.split() + sys.argv[1:]
if extras:
@@ -145,6 +160,10 @@ WRAPPER_TARGET_CXX = "${CXX}"
WRAPPER_TARGET_CCLD = "${CCLD}"
WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}"
WRAPPER_TARGET_EXTRALD = ""
+# see recipes-devtools/gcc/gcc/0018-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
+# we need to link with ssp_nonshared on musl to avoid "undefined reference to `__stack_chk_fail_local'"
+# when building MACHINE=qemux86 for musl
+WRAPPER_TARGET_EXTRALD:libc-musl = "-lssp_nonshared"
WRAPPER_TARGET_AR = "${AR}"
# compiler is used by gcc-rs
@@ -154,22 +173,22 @@ do_rust_create_wrappers () {
mkdir -p "${WRAPPER_DIR}"
# Yocto Build / Rust Host C compiler
- create_wrapper_rust "${RUST_BUILD_CC}" "" "${BUILD_CC}"
+ create_wrapper_rust "${RUST_BUILD_CC}" "" "${CRATE_CC_FLAGS}" "${BUILD_CC}"
# Yocto Build / Rust Host C++ compiler
- create_wrapper_rust "${RUST_BUILD_CXX}" "" "${BUILD_CXX}"
+ create_wrapper_rust "${RUST_BUILD_CXX}" "" "${CRATE_CC_FLAGS}" "${BUILD_CXX}"
# Yocto Build / Rust Host linker
- create_wrapper_rust "${RUST_BUILD_CCLD}" "" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
+ create_wrapper_rust "${RUST_BUILD_CCLD}" "" "" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
# Yocto Build / Rust Host archiver
- create_wrapper_rust "${RUST_BUILD_AR}" "" "${BUILD_AR}"
+ create_wrapper_rust "${RUST_BUILD_AR}" "" "" "${BUILD_AR}"
# Yocto Target / Rust Target C compiler
- create_wrapper_rust "${RUST_TARGET_CC}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
+ create_wrapper_rust "${RUST_TARGET_CC}" "${WRAPPER_TARGET_EXTRALD}" "${CRATE_CC_FLAGS}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
# Yocto Target / Rust Target C++ compiler
- create_wrapper_rust "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CXX}" "${CXXFLAGS}"
+ create_wrapper_rust "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_EXTRALD}" "${CRATE_CC_FLAGS}" "${WRAPPER_TARGET_CXX}" "${CXXFLAGS}"
# Yocto Target / Rust Target linker
- create_wrapper_rust "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
+ create_wrapper_rust "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_EXTRALD}" "" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
# Yocto Target / Rust Target archiver
- create_wrapper_rust "${RUST_TARGET_AR}" "" "${WRAPPER_TARGET_AR}"
+ create_wrapper_rust "${RUST_TARGET_AR}" "" "" "${WRAPPER_TARGET_AR}"
}
diff --git a/meta/classes-recipe/rust-target-config.bbclass b/meta/classes-recipe/rust-target-config.bbclass
index 3405086402..330ad8a3f5 100644
--- a/meta/classes-recipe/rust-target-config.bbclass
+++ b/meta/classes-recipe/rust-target-config.bbclass
@@ -21,16 +21,21 @@ def llvm_features_from_tune(d):
if 'vfpv4' in feat:
f.append("+vfp4")
- if 'vfpv3' in feat:
+ elif 'vfpv4d16' in feat:
+ f.append("+vfp4")
+ f.append("-d32")
+ elif 'vfpv3' in feat:
f.append("+vfp3")
- if 'vfpv3d16' in feat:
- f.append("+d16")
-
- if 'vfpv2' in feat or 'vfp' in feat:
+ elif 'vfpv3d16' in feat:
+ f.append("+vfp3")
+ f.append("-d32")
+ elif 'vfpv2' in feat or 'vfp' in feat:
f.append("+vfp2")
if 'neon' in feat:
f.append("+neon")
+ elif target_is_armv7(d):
+ f.append("-neon")
if 'mips32' in feat:
f.append("+mips32")
@@ -114,7 +119,7 @@ def llvm_features_from_target_fpu(d):
# TARGET_FPU can be hard or soft. +soft-float tell llvm to use soft float
# ABI. There is no option for hard.
- fpu = d.getVar('TARGET_FPU', True)
+ fpu = d.getVar('TARGET_FPU')
return ["+soft-float"] if fpu == "soft" else []
def llvm_features(d):
@@ -231,19 +236,27 @@ TARGET_POINTER_WIDTH[powerpc64le] = "64"
TARGET_C_INT_WIDTH[powerpc64le] = "64"
MAX_ATOMIC_WIDTH[powerpc64le] = "64"
-## riscv32-unknown-linux-{gnu, musl}
-DATA_LAYOUT[riscv32] = "e-m:e-p:32:32-i64:64-n32-S128"
-TARGET_ENDIAN[riscv32] = "little"
-TARGET_POINTER_WIDTH[riscv32] = "32"
-TARGET_C_INT_WIDTH[riscv32] = "32"
-MAX_ATOMIC_WIDTH[riscv32] = "32"
-
-## riscv64-unknown-linux-{gnu, musl}
-DATA_LAYOUT[riscv64] = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
-TARGET_ENDIAN[riscv64] = "little"
-TARGET_POINTER_WIDTH[riscv64] = "64"
-TARGET_C_INT_WIDTH[riscv64] = "64"
-MAX_ATOMIC_WIDTH[riscv64] = "64"
+## riscv32gc-unknown-linux-{gnu, musl}
+DATA_LAYOUT[riscv32gc] = "e-m:e-p:32:32-i64:64-n32-S128"
+TARGET_ENDIAN[riscv32gc] = "little"
+TARGET_POINTER_WIDTH[riscv32gc] = "32"
+TARGET_C_INT_WIDTH[riscv32gc] = "32"
+MAX_ATOMIC_WIDTH[riscv32gc] = "32"
+
+## riscv64gc-unknown-linux-{gnu, musl}
+DATA_LAYOUT[riscv64gc] = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
+TARGET_ENDIAN[riscv64gc] = "little"
+TARGET_POINTER_WIDTH[riscv64gc] = "64"
+TARGET_C_INT_WIDTH[riscv64gc] = "64"
+MAX_ATOMIC_WIDTH[riscv64gc] = "64"
+
+## loongarch64-unknown-linux-{gnu, musl}
+DATA_LAYOUT[loongarch64] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
+TARGET_ENDIAN[loongarch64] = "little"
+TARGET_POINTER_WIDTH[loongarch64] = "64"
+TARGET_C_INT_WIDTH[loongarch64] = "32"
+MAX_ATOMIC_WIDTH[loongarch64] = "64"
+FEATURES[loongarch64] = "+d"
# Convert a normal arch (HOST_ARCH, TARGET_ARCH, BUILD_ARCH, etc) to something
# rust's internals won't choke on.
@@ -258,9 +271,21 @@ def arch_to_rust_target_arch(arch):
return "arm"
elif arch == "powerpc64le":
return "powerpc64"
+ elif arch == "riscv32gc":
+ return "riscv32"
+ elif arch == "riscv64gc":
+ return "riscv64"
else:
return arch
+# Convert a rust target string to a llvm-compatible triplet
+def rust_sys_to_llvm_target(sys):
+ if sys.startswith('riscv32gc-'):
+ return sys.replace('riscv32gc-', 'riscv32-', 1)
+ if sys.startswith('riscv64gc-'):
+ return sys.replace('riscv64gc-', 'riscv64-', 1)
+ return sys
+
# generates our target CPU value
def llvm_cpu(d):
cpu = d.getVar('PACKAGE_ARCH')
@@ -272,13 +297,15 @@ def llvm_cpu(d):
trans['x86-64'] = "x86-64"
trans['i686'] = "i686"
trans['i586'] = "i586"
- trans['powerpc'] = "powerpc"
trans['mips64'] = "mips64"
trans['mips64el'] = "mips64"
+ trans['powerpc64le'] = "ppc64le"
+ trans['powerpc64'] = "ppc64"
trans['riscv64'] = "generic-rv64"
trans['riscv32'] = "generic-rv32"
+ trans['loongarch64'] = "la464"
- if target in ["mips", "mipsel"]:
+ if target in ["mips", "mipsel", "powerpc"]:
feat = frozenset(d.getVar('TUNE_FEATURES').split())
if "mips32r2" in feat:
trans['mipsel'] = "mips32r2"
@@ -286,6 +313,8 @@ def llvm_cpu(d):
elif "mips32" in feat:
trans['mipsel'] = "mips32"
trans['mips'] = "mips32"
+ elif "ppc7400" in feat:
+ trans['powerpc'] = "7400"
try:
return trans[cpu]
@@ -333,7 +362,7 @@ def rust_gen_target(d, thing, wd, arch):
# build tspec
tspec = {}
- tspec['llvm-target'] = rustsys
+ tspec['llvm-target'] = rust_sys_to_llvm_target(rustsys)
tspec['data-layout'] = d.getVarFlag('DATA_LAYOUT', arch_abi)
if tspec['data-layout'] is None:
bb.fatal("No rust target defined for %s" % arch_abi)
@@ -342,7 +371,10 @@ def rust_gen_target(d, thing, wd, arch):
tspec['target-c-int-width'] = d.getVarFlag('TARGET_C_INT_WIDTH', arch_abi)
tspec['target-endian'] = d.getVarFlag('TARGET_ENDIAN', arch_abi)
tspec['arch'] = arch_to_rust_target_arch(rust_arch)
- tspec['os'] = "linux"
+ if "baremetal" in d.getVar('TCLIBC'):
+ tspec['os'] = "none"
+ else:
+ tspec['os'] = "linux"
if "musl" in tspec['llvm-target']:
tspec['env'] = "musl"
else:
@@ -351,6 +383,8 @@ def rust_gen_target(d, thing, wd, arch):
tspec['llvm-abiname'] = "lp64d"
if "riscv32" in tspec['llvm-target']:
tspec['llvm-abiname'] = "ilp32d"
+ if "loongarch64" in tspec['llvm-target']:
+ tspec['llvm-abiname'] = "lp64d"
tspec['vendor'] = "unknown"
tspec['target-family'] = "unix"
tspec['linker'] = "{}{}gcc".format(d.getVar('CCACHE'), prefix)
@@ -362,7 +396,6 @@ def rust_gen_target(d, thing, wd, arch):
tspec['linker-is-gnu'] = True
tspec['linker-flavor'] = "gcc"
tspec['has-rpath'] = True
- tspec['has-elf-tls'] = True
tspec['position-independent-executables'] = True
tspec['panic-strategy'] = d.getVar("RUST_PANIC_STRATEGY")
@@ -389,3 +422,19 @@ python do_rust_gen_targets () {
addtask rust_gen_targets after do_patch before do_compile
do_rust_gen_targets[dirs] += "${RUST_TARGETS_DIR}"
+# For building target C dependecies use only compiler parameters defined in OE
+# and ignore the CC crate defaults which conflicts with OE ones in some cases.
+# https://github.com/rust-lang/cc-rs#external-configuration-via-environment-variables
+# Some CC crate compiler flags are still required.
+# We apply them conditionally in rust wrappers.
+
+CRATE_CC_FLAGS:class-native = ""
+CRATE_CC_FLAGS:class-nativesdk = ""
+CRATE_CC_FLAGS:class-target = " -ffunction-sections -fdata-sections -fPIC"
+
+do_compile:prepend:class-target() {
+ export CRATE_CC_NO_DEFAULTS=1
+}
+do_install:prepend:class-target() {
+ export CRATE_CC_NO_DEFAULTS=1
+}
diff --git a/meta/classes-recipe/scons.bbclass b/meta/classes-recipe/scons.bbclass
index 5f0d4a910b..d20a78dc6e 100644
--- a/meta/classes-recipe/scons.bbclass
+++ b/meta/classes-recipe/scons.bbclass
@@ -9,7 +9,9 @@ inherit python3native
DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
-
+# This value below is derived from $(getconf ARG_MAX)
+SCONS_MAXLINELENGTH ?= "MAXLINELENGTH=2097152"
+EXTRA_OESCONS:append = " ${SCONS_MAXLINELENGTH}"
do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
@@ -31,4 +33,8 @@ scons_do_install() {
die "scons install execution failed."
}
+do_configure[vardepsexclude] = "SCONS_MAXLINELENGTH"
+do_compile[vardepsexclude] = "SCONS_MAXLINELENGTH"
+do_install[vardepsexclude] = "SCONS_MAXLINELENGTH"
+
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes-recipe/setuptools3-base.bbclass b/meta/classes-recipe/setuptools3-base.bbclass
index 21b688ced0..27af6abc58 100644
--- a/meta/classes-recipe/setuptools3-base.bbclass
+++ b/meta/classes-recipe/setuptools3-base.bbclass
@@ -4,9 +4,9 @@
# SPDX-License-Identifier: MIT
#
-DEPENDS:append:class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
-DEPENDS:append:class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
-RDEPENDS:${PN}:append:class-target = " ${PYTHON_PN}-core"
+DEPENDS:append:class-target = " python3-native python3"
+DEPENDS:append:class-nativesdk = " python3-native python3"
+RDEPENDS:${PN}:append:class-target = " python3-core"
export STAGING_INCDIR
export STAGING_LIBDIR
@@ -23,15 +23,8 @@ export CCSHARED = "-fPIC -DPIC"
# the python executable
export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
-FILES:${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+FILES:${PN} += "${PYTHON_SITEPACKAGES_DIR}"
+FILES:${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a"
+FILES:${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la"
-FILES:${PN}-staticdev += "\
- ${PYTHON_SITEPACKAGES_DIR}/*.a \
-"
-FILES:${PN}-dev += "\
- ${datadir}/pkgconfig \
- ${libdir}/pkgconfig \
- ${PYTHON_SITEPACKAGES_DIR}/*.la \
-"
inherit python3native python3targetconfig
-
diff --git a/meta/classes-recipe/setuptools3.bbclass b/meta/classes-recipe/setuptools3.bbclass
index 4c6e79ee9a..d71a089539 100644
--- a/meta/classes-recipe/setuptools3.bbclass
+++ b/meta/classes-recipe/setuptools3.bbclass
@@ -21,9 +21,9 @@ setuptools3_do_compile() {
NO_FETCH_BUILD=1 \
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \
bdist_wheel --verbose --dist-dir ${PEP517_WHEEL_PATH} ${SETUPTOOLS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+ bbfatal_log "'python3 setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
}
setuptools3_do_compile[vardepsexclude] = "MACHINE"
do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
diff --git a/meta/classes-recipe/setuptools3_legacy.bbclass b/meta/classes-recipe/setuptools3_legacy.bbclass
index 21748f922a..264b1f5cfb 100644
--- a/meta/classes-recipe/setuptools3_legacy.bbclass
+++ b/meta/classes-recipe/setuptools3_legacy.bbclass
@@ -38,9 +38,9 @@ setuptools3_legacy_do_compile() {
NO_FETCH_BUILD=1 \
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \
build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+ bbfatal_log "'python3 setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
}
setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE"
@@ -49,10 +49,10 @@ setuptools3_legacy_do_install() {
install -d ${D}${PYTHON_SITEPACKAGES_DIR}
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR}:$PYTHONPATH \
+ ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \
build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
+ bbfatal_log "'python3 setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
# support filenames with *spaces*
find ${D} -name "*.py" -exec grep -q ${D} {} \; \
diff --git a/meta/classes-recipe/siteinfo.bbclass b/meta/classes-recipe/siteinfo.bbclass
index d31c9b2571..25b53d929a 100644
--- a/meta/classes-recipe/siteinfo.bbclass
+++ b/meta/classes-recipe/siteinfo.bbclass
@@ -39,6 +39,8 @@ def siteinfo_data_for_machine(arch, os, d):
"i686": "endian-little bit-32 ix86-common",
"ia64": "endian-little bit-64",
"lm32": "endian-big bit-32",
+ "loongarch32": "endian-little bit-32 loongarch",
+ "loongarch64": "endian-little bit-64 loongarch",
"m68k": "endian-big bit-32",
"microblaze": "endian-big bit-32 microblaze-common",
"microblazeel": "endian-little bit-32 microblaze-common",
@@ -71,6 +73,8 @@ def siteinfo_data_for_machine(arch, os, d):
osinfo = {
"darwin": "common-darwin",
"darwin9": "common-darwin",
+ "darwin19": "common-darwin",
+ "darwin21": "common-darwin",
"linux": "common-linux common-glibc",
"linux-gnu": "common-linux common-glibc",
"linux-gnu_ilp32": "common-linux common-glibc",
@@ -97,6 +101,8 @@ def siteinfo_data_for_machine(arch, os, d):
"arm-linux-musleabi": "arm-linux",
"armeb-linux-gnueabi": "armeb-linux",
"armeb-linux-musleabi": "armeb-linux",
+ "loongarch32-linux": "loongarch32-linux",
+ "loongarch64-linux": "loongarch64-linux",
"microblazeel-linux" : "microblaze-linux",
"microblazeel-linux-musl" : "microblaze-linux",
"mips-linux-musl": "mips-linux",
@@ -126,6 +132,8 @@ def siteinfo_data_for_machine(arch, os, d):
"x86_64-cygwin": "bit-64",
"x86_64-darwin": "bit-64",
"x86_64-darwin9": "bit-64",
+ "x86_64-darwin19": "bit-64",
+ "x86_64-darwin21": "bit-64",
"x86_64-linux": "bit-64",
"x86_64-linux-musl": "x86_64-linux bit-64",
"x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
@@ -213,20 +221,6 @@ def siteinfo_get_files(d, sysrootcache=False):
# This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
searched = []
- if not sysrootcache:
- return sitefiles, searched
-
- # Now check for siteconfig cache files in sysroots
- path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
- if path_siteconfig and os.path.isdir(path_siteconfig):
- for i in os.listdir(path_siteconfig):
- if not i.endswith("_config"):
- continue
- filename = os.path.join(path_siteconfig, i)
- sitefiles.append(filename)
return sitefiles, searched
-#
-# Make some information available via variables
-#
-SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
+
diff --git a/meta/classes-recipe/systemd-boot-cfg.bbclass b/meta/classes-recipe/systemd-boot-cfg.bbclass
index 366dd23738..12da41ebad 100644
--- a/meta/classes-recipe/systemd-boot-cfg.bbclass
+++ b/meta/classes-recipe/systemd-boot-cfg.bbclass
@@ -35,7 +35,7 @@ python build_efi_cfg() {
bb.fatal('Unable to open %s' % cfile)
cfgfile.write('# Automatically created by OE\n')
- cfgfile.write('default %s\n' % (labels.split()[0]))
+ cfgfile.write('default %s.conf\n' % (labels.split()[0]))
timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
if timeout:
cfgfile.write('timeout %s\n' % timeout)
diff --git a/meta/classes-recipe/systemd.bbclass b/meta/classes-recipe/systemd.bbclass
index f6564c2b31..0f7e3b5a08 100644
--- a/meta/classes-recipe/systemd.bbclass
+++ b/meta/classes-recipe/systemd.bbclass
@@ -85,7 +85,7 @@ python systemd_populate_packages() {
def systemd_check_package(pkg_systemd):
packages = d.getVar('PACKAGES')
if not pkg_systemd in packages.split():
- bb.error('%s does not appear in package list, please add it' % pkg_systemd)
+ bb.error('%s is marked for packaging systemd scripts, but it does not appear in package list, please add it to PACKAGES or adjust SYSTEMD_PACKAGES accordingly' % pkg_systemd)
def systemd_generate_package_scripts(pkg):
@@ -152,6 +152,7 @@ python systemd_populate_packages() {
def systemd_check_services():
searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
searchpaths.append(d.getVar("systemd_system_unitdir"))
+ searchpaths.append(d.getVar("systemd_user_unitdir"))
systemd_packages = d.getVar('SYSTEMD_PACKAGES')
keys = 'Also'
@@ -169,7 +170,7 @@ python systemd_populate_packages() {
base = service[:at] + '@' + service[ext:]
for path in searchpaths:
- if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
+ if os.path.lexists(oe.path.join(d.getVar("D"), path, service)):
path_found = path
break
elif base is not None:
@@ -205,7 +206,7 @@ python systemd_populate_packages() {
systemd_check_services()
}
-PACKAGESPLITFUNCS:prepend = "systemd_populate_packages "
+PACKAGESPLITFUNCS =+ "systemd_populate_packages"
python rm_systemd_unitdir (){
import shutil
diff --git a/meta/classes-recipe/testexport.bbclass b/meta/classes-recipe/testexport.bbclass
new file mode 100644
index 0000000000..572f5d9e76
--- /dev/null
+++ b/meta/classes-recipe/testexport.bbclass
@@ -0,0 +1,176 @@
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+# testexport.bbclass allows to execute runtime test outside OE environment.
+# Most of the tests are commands run on target image over ssh.
+# To use it add testexport to global inherit and call your target image with -c testexport
+# You can try it out like this:
+# - First build an image. i.e. core-image-sato
+# - Add IMAGE_CLASSES += "testexport" in local.conf
+# - Then bitbake core-image-sato -c testexport. That will generate the directory structure
+# to execute the runtime tests using runexported.py.
+#
+# For more information on TEST_SUITES check testimage class.
+
+inherit testimage
+
+TEST_LOG_DIR ?= "${WORKDIR}/testexport"
+TEST_EXPORT_DIR ?= "${TMPDIR}/testexport/${PN}"
+TEST_EXPORT_PACKAGED_DIR ?= "packages/packaged"
+TEST_EXPORT_EXTRACTED_DIR ?= "packages/extracted"
+
+TEST_TARGET ?= "simpleremote"
+TEST_TARGET_IP ?= ""
+TEST_SERVER_IP ?= ""
+
+require conf/testexport.conf
+
+TEST_EXPORT_SDK_ENABLED ?= "0"
+
+TEST_EXPORT_DEPENDS = ""
+TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
+TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
+TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
+
+addtask testexport
+do_testexport[nostamp] = "1"
+do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}"
+do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}"
+
+python do_testexport() {
+ testexport_main(d)
+}
+
+def testexport_main(d):
+ import json
+ import logging
+
+ from oeqa.runtime.context import OERuntimeTestContext
+ from oeqa.runtime.context import OERuntimeTestContextExecutor
+
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+
+ tdname = "%s.testdata.json" % image_name
+ td = json.load(open(tdname, "r"))
+
+ logger = logging.getLogger("BitBake")
+
+ target = OERuntimeTestContextExecutor.getTarget(
+ d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
+ d.getVar("TEST_SERVER_IP"))
+
+ image_manifest = "%s.manifest" % image_name
+ image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
+
+ extract_dir = d.getVar("TEST_EXTRACTED_DIR")
+
+ tc = OERuntimeTestContext(td, logger, target, image_packages, extract_dir)
+
+ copy_needed_files(d, tc)
+
+def copy_needed_files(d, tc):
+ import shutil
+ import oe.path
+
+ from oeqa.utils.package_manager import _get_json_file
+ from oeqa.core.utils.test import getSuiteCasesFiles
+
+ export_path = d.getVar('TEST_EXPORT_DIR')
+ corebase_path = d.getVar('COREBASE')
+
+ # Clean everything before starting
+ oe.path.remove(export_path)
+ bb.utils.mkdirhier(os.path.join(export_path, 'lib', 'oeqa'))
+
+ # The source of files to copy are relative to 'COREBASE' directory
+ # The destination is relative to 'TEST_EXPORT_DIR'
+ # Because we are squashing the libraries, we need to remove
+ # the layer/script directory
+ files_to_copy = [ os.path.join('meta', 'lib', 'oeqa', 'core'),
+ os.path.join('meta', 'lib', 'oeqa', 'runtime'),
+ os.path.join('meta', 'lib', 'oeqa', 'files'),
+ os.path.join('meta', 'lib', 'oeqa', 'utils'),
+ os.path.join('scripts', 'oe-test'),
+ os.path.join('scripts', 'lib', 'argparse_oe.py'),
+ os.path.join('scripts', 'lib', 'scriptutils.py'), ]
+
+ for f in files_to_copy:
+ src = os.path.join(corebase_path, f)
+ dst = os.path.join(export_path, f.split('/', 1)[-1])
+ if os.path.isdir(src):
+ oe.path.copytree(src, dst)
+ else:
+ shutil.copy2(src, dst)
+
+ # Remove cases and just copy the ones specified
+ cases_path = os.path.join(export_path, 'lib', 'oeqa', 'runtime', 'cases')
+ oe.path.remove(cases_path)
+ bb.utils.mkdirhier(cases_path)
+ test_paths = get_runtime_paths(d)
+ test_modules = d.getVar('TEST_SUITES').split()
+ tc.loadTests(test_paths, modules=test_modules)
+ for f in getSuiteCasesFiles(tc.suites):
+ shutil.copy2(f, cases_path)
+ json_file = _get_json_file(f)
+ if json_file:
+ shutil.copy2(json_file, cases_path)
+
+ # Copy test data
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+ image_manifest = "%s.manifest" % image_name
+ tdname = "%s.testdata.json" % image_name
+ test_data_path = os.path.join(export_path, 'data')
+ bb.utils.mkdirhier(test_data_path)
+ shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest'))
+ shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json'))
+
+ for subdir, dirs, files in os.walk(export_path):
+ for dir in dirs:
+ if dir == '__pycache__':
+ shutil.rmtree(os.path.join(subdir, dir))
+
+ # Create tar file for common parts of testexport
+ testexport_create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
+
+ # Copy packages needed for runtime testing
+ package_extraction(d, tc.suites)
+ test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR")
+ if os.path.isdir(test_pkg_dir) and os.listdir(test_pkg_dir):
+ export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
+ oe.path.copytree(test_pkg_dir, export_pkg_dir)
+ # Create tar file for packages needed by the DUT
+ testexport_create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
+
+ # Copy SDK
+ if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
+ sdk_deploy = d.getVar("SDK_DEPLOY")
+ tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
+ tarball_path = os.path.join(sdk_deploy, tarball_name)
+ export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"),
+ d.getVar("TEST_EXPORT_SDK_DIR"))
+ bb.utils.mkdirhier(export_sdk_dir)
+ shutil.copy2(tarball_path, export_sdk_dir)
+
+ # Create tar file for the sdk
+ testexport_create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
+
+ bb.plain("Exported tests to: %s" % export_path)
+
+def testexport_create_tarball(d, tar_name, src_dir):
+
+ import tarfile
+
+ tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR"), tar_name)
+ current_dir = os.getcwd()
+ src_dir = src_dir.rstrip('/')
+ dir_name = os.path.dirname(src_dir)
+ base_name = os.path.basename(src_dir)
+
+ os.chdir(dir_name)
+ tar = tarfile.open(tar_path, "w:gz")
+ tar.add(base_name)
+ tar.close()
+ os.chdir(current_dir)
diff --git a/meta/classes-recipe/testimage.bbclass b/meta/classes-recipe/testimage.bbclass
index 8d2fab21df..ed0d87b7a7 100644
--- a/meta/classes-recipe/testimage.bbclass
+++ b/meta/classes-recipe/testimage.bbclass
@@ -18,6 +18,18 @@ inherit image-artifact-names
TESTIMAGE_AUTO ??= "0"
+# When any test fails, TESTIMAGE_FAILED_QA ARTIFACTS will be parsed and for
+# each entry in it, if artifact pointed by path description exists on target,
+# it will be retrieved onto host
+
+TESTIMAGE_FAILED_QA_ARTIFACTS = "\
+ ${localstatedir}/log \
+ ${sysconfdir}/version \
+ ${sysconfdir}/os-release"
+
+# If some ptests are run and fail, retrieve corresponding directories
+TESTIMAGE_FAILED_QA_ARTIFACTS += "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '${libdir}/${MCNAME}/ptest', '', d)}"
+
# You can set (or append to) TEST_SUITES in local.conf to select the tests
# which you want to run for your target.
# The test names are the module names in meta/lib/oeqa/runtime/cases.
@@ -98,34 +110,7 @@ TESTIMAGELOCK:qemuall = ""
TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
-TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
-
-testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
-}
-
-testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
-}
+TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR_IMAGE IMAGE_LINK_NAME"
testimage_dump_monitor () {
query-status
@@ -164,13 +149,6 @@ def get_testimage_configuration(d, test_type, machine):
return configuration
get_testimage_configuration[vardepsexclude] = "DATETIME"
-def get_testimage_json_result_dir(d):
- json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
- custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
- if custom_json_result_dir:
- json_result_dir = custom_json_result_dir
- return json_result_dir
-
def get_testimage_result_id(configuration):
return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
@@ -189,14 +167,9 @@ def get_testimage_boot_patterns(d):
search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
return
- # We know boot prompt is searched through in binary format, others might be expressions
- if flag == 'search_reached_prompt':
- boot_patterns[flag] = flagval.encode()
- else:
- boot_patterns[flag] = flagval.encode().decode('unicode-escape')
+ boot_patterns[flag] = flagval.encode().decode('unicode-escape')
return boot_patterns
-
def testimage_main(d):
import os
import json
@@ -210,6 +183,8 @@ def testimage_main(d):
from oeqa.core.target.qemu import supported_fstypes
from oeqa.core.utils.test import getSuiteCases
from oeqa.utils import make_logger_bitbake_compatible
+ from oeqa.utils import get_json_result_dir
+ from oeqa.utils.postactions import run_failed_tests_post_actions
def sigterm_exception(signum, stackframe):
"""
@@ -240,12 +215,13 @@ def testimage_main(d):
with open(tdname, "r") as f:
td = json.load(f)
except FileNotFoundError as err:
- bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
+ bb.fatal('File %s not found (%s).\nHave you built the image with IMAGE_CLASSES += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
for var in d.getVar('TESTIMAGE_UPDATE_VARS').split():
td[var] = d.getVar(var)
+ td['ORIGPATH'] = d.getVar("BB_ORIGENV").getVar("PATH")
image_manifest = "%s.manifest" % image_name
image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
@@ -296,7 +272,7 @@ def testimage_main(d):
ovmf = d.getVar("QEMU_USE_OVMF")
slirp = False
- if d.getVar("QEMU_USE_SLIRP"):
+ if bb.utils.contains('TEST_RUNQEMUPARAMS', 'slirp', True, False, d):
slirp = True
# TODO: We use the current implementation of qemu runner because of
@@ -326,7 +302,6 @@ def testimage_main(d):
target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
target_kwargs['testimage_dump_monitor'] = d.getVar("testimage_dump_monitor") or ""
- target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
def export_ssh_agent(d):
import os
@@ -343,19 +318,24 @@ def testimage_main(d):
# runtime use network for download projects for build
export_proxies(d)
- # we need the host dumper in test context
- host_dumper = OERuntimeTestContextExecutor.getHostDumper(
- d.getVar("testimage_dump_host"),
- d.getVar("TESTIMAGE_DUMP_DIR"))
+ if slirp:
+ # Default to 127.0.0.1 and let the runner identify the port forwarding
+ # (as OEQemuTarget does), but allow overriding.
+ target_ip = d.getVar("TEST_TARGET_IP") or "127.0.0.1"
+ # Default to 10.0.2.2 as this is the IP that the guest has with the
+ # default qemu slirp networking configuration, but allow overriding.
+ server_ip = d.getVar("TEST_SERVER_IP") or "10.0.2.2"
+ else:
+ target_ip = d.getVar("TEST_TARGET_IP")
+ server_ip = d.getVar("TEST_SERVER_IP")
# the robot dance
target = OERuntimeTestContextExecutor.getTarget(
- d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
- d.getVar("TEST_SERVER_IP"), **target_kwargs)
+ d.getVar("TEST_TARGET"), logger, target_ip,
+ server_ip, **target_kwargs)
# test context
- tc = OERuntimeTestContext(td, logger, target, host_dumper,
- image_packages, extract_dir)
+ tc = OERuntimeTestContext(td, logger, target, image_packages, extract_dir)
# Load tests before starting the target
test_paths = get_runtime_paths(d)
@@ -387,6 +367,8 @@ def testimage_main(d):
pass
results = tc.runTests()
complete = True
+ if results.hasAnyFailingTest():
+ run_failed_tests_post_actions(d, tc)
except (KeyboardInterrupt, BlockingIOError) as err:
if isinstance(err, KeyboardInterrupt):
bb.error('testimage interrupted, shutting down...')
@@ -402,14 +384,14 @@ def testimage_main(d):
# Show results (if we have them)
if results:
configuration = get_testimage_configuration(d, 'runtime', machine)
- results.logDetails(get_testimage_json_result_dir(d),
+ results.logDetails(get_json_result_dir(d),
configuration,
get_testimage_result_id(configuration),
dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
results.logSummary(pn)
# Copy additional logs to tmp/log/oeqa so it's easier to find them
- targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
+ targetdir = os.path.join(get_json_result_dir(d), d.getVar("PN"))
os.makedirs(targetdir, exist_ok=True)
os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
@@ -472,10 +454,7 @@ def create_rpm_index(d):
package_list = glob.glob(idx_path + "*/*.rpm")
for pkg in package_list:
- if os.path.basename(pkg).startswith(("curl-ptest")):
- bb.utils.remove(pkg)
-
- if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
+ if not os.path.basename(pkg).startswith(("dnf-test-", "busybox", "update-alternatives", "libc6", "musl")):
bb.utils.remove(pkg)
bb.utils.unlockfile(lf)
diff --git a/meta/classes-recipe/toolchain-scripts.bbclass b/meta/classes-recipe/toolchain-scripts.bbclass
index 3cc823fe63..b59a295abc 100644
--- a/meta/classes-recipe/toolchain-scripts.bbclass
+++ b/meta/classes-recipe/toolchain-scripts.bbclass
@@ -16,6 +16,13 @@ DEBUG_PREFIX_MAP = ""
EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
+def siteinfo_with_prefix(d, prefix):
+ # Return a prefixed value from siteinfo
+ for item in siteinfo_data_for_machine(d.getVar("TARGET_ARCH"), d.getVar("TARGET_OS"), d):
+ if item.startswith(prefix):
+ return item.replace(prefix, "")
+ raise KeyError
+
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
# Create environment setup script. Remember that $SDKTARGETSYSROOT should
@@ -37,7 +44,7 @@ toolchain_create_sdk_env_script () {
echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script
echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
@@ -53,7 +60,7 @@ toolchain_create_sdk_env_script () {
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
@@ -63,6 +70,8 @@ toolchain_create_sdk_env_script () {
echo 'export OECORE_BASELIB="${baselib}"' >> $script
echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
+ echo 'export OECORE_TARGET_BITS="${@siteinfo_with_prefix(d, 'bit-')}"' >>$script
+ echo 'export OECORE_TARGET_ENDIAN="${@siteinfo_with_prefix(d, 'endian-')}"' >>$script
echo 'unset command_not_found_handle' >> $script
@@ -192,7 +201,6 @@ EOF
#we get the cached site config in the runtime
TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
-TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
@@ -214,14 +222,8 @@ toolchain_create_sdk_siteconfig () {
sitefile=`echo $sitefile | tr / _`
sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
esac
-
- if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
- cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
- fi
done
}
-# The immediate expansion above can result in unwanted path dependencies here
-toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
python __anonymous () {
import oe.classextend
diff --git a/meta/classes-recipe/uboot-config.bbclass b/meta/classes-recipe/uboot-config.bbclass
index 7ab006a20d..e55fc38b7c 100644
--- a/meta/classes-recipe/uboot-config.bbclass
+++ b/meta/classes-recipe/uboot-config.bbclass
@@ -19,6 +19,9 @@ def removesuffix(s, suffix):
return s[:-len(suffix)]
return s
+UBOOT_ENTRYPOINT ?= "20008000"
+UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
+
# Some versions of u-boot use .bin and others use .img. By default use .bin
# but enable individual recipes to change this value.
UBOOT_SUFFIX ??= "bin"
@@ -62,10 +65,6 @@ UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
-# Default name of u-boot initial env, but enable individual recipes to change
-# this value.
-UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
-
# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
# to find EXTLINUX conf file.
UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
@@ -80,6 +79,9 @@ SPL_MKIMAGE_DTCOPTS ??= ""
UBOOT_MKIMAGE ?= "uboot-mkimage"
UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}"
+# Signature activation - this requires KERNEL_IMAGETYPE = "fitImage"
+UBOOT_SIGN_ENABLE ?= "0"
+
# Arguments passed to mkimage for signing
UBOOT_MKIMAGE_SIGN_ARGS ?= ""
SPL_MKIMAGE_SIGN_ARGS ?= ""
@@ -88,6 +90,9 @@ SPL_MKIMAGE_SIGN_ARGS ?= ""
UBOOT_DTB ?= ""
UBOOT_DTB_BINARY ??= ""
+# uboot-fit_check_sign command
+UBOOT_FIT_CHECK_SIGN ?= "uboot-fit_check_sign"
+
python () {
ubootmachine = d.getVar("UBOOT_MACHINE")
ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
@@ -134,4 +139,10 @@ python () {
if not found:
raise bb.parse.SkipRecipe("The selected UBOOT_CONFIG key %s has no match in %s." % (ubootconfig, ubootconfigflags.keys()))
+
+ if len(ubootconfig) == 1:
+ d.setVar('KCONFIG_CONFIG_ROOTDIR', os.path.join(d.getVar("B"), d.getVar("UBOOT_MACHINE").strip()))
+ else:
+ # Disable menuconfig for multiple configs
+ d.setVar('KCONFIG_CONFIG_ENABLE_MENUCONFIG', "false")
}
diff --git a/meta/classes-recipe/uboot-extlinux-config.bbclass b/meta/classes-recipe/uboot-extlinux-config.bbclass
index 86a7d30ca0..0413e760bd 100644
--- a/meta/classes-recipe/uboot-extlinux-config.bbclass
+++ b/meta/classes-recipe/uboot-extlinux-config.bbclass
@@ -6,6 +6,8 @@
#
# External variables:
#
+# UBOOT_EXTLINUX - Set to "1" to enable generation
+# of extlinux.conf using this class.
# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
# default console.
# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
@@ -20,7 +22,10 @@
# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
# Measured in 1/10 of a second.
# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
-# the timeout period
+# the timeout period.
+# UBOOT_EXTLINUX_MENU_TITLE - Menu title. If empty, MENU TITLE entry
+# will not be added to the output file.
+# UBOOT_EXTLINUX_CONFIG - Output file.
#
# If there's only one label system will boot automatically and menu won't be
# created. If you want to use more than one labels, e.g linux and alternate,
@@ -33,11 +38,11 @@
# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
# UBOOT_EXTLINUX_TIMEOUT ??= "30"
#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
+# UBOOT_EXTLINUX_KERNEL_IMAGE:default ??= "../zImage"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION:default ??= "Linux Default"
#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
+# UBOOT_EXTLINUX_KERNEL_IMAGE:fallback ??= "../zImage-fallback"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION:fallback ??= "Linux Fallback"
#
# Results:
#
@@ -65,6 +70,7 @@ UBOOT_EXTLINUX_FDTDIR ??= "../"
UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}"
+UBOOT_EXTLINUX_MENU_TITLE ??= "Select the boot mode"
UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
@@ -92,10 +98,11 @@ python do_create_extlinux_config() {
with open(cfile, 'w') as cfgfile:
cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
- if len(labels.split()) > 1:
- cfgfile.write('menu title Select the boot mode\n')
+ menu_title = localdata.getVar('UBOOT_EXTLINUX_MENU_TITLE')
+ if len(labels.split()) > 1 and menu_title:
+ cfgfile.write('MENU TITLE %s\n' % (menu_title))
- timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
+ timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
if timeout:
cfgfile.write('TIMEOUT %s\n' % (timeout))
@@ -152,7 +159,7 @@ python do_create_extlinux_config() {
bb.fatal('Unable to open %s' % (cfile))
}
UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
-do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s:%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes-recipe/uboot-sign.bbclass b/meta/classes-recipe/uboot-sign.bbclass
index debbf23ec6..c8e097f2f2 100644
--- a/meta/classes-recipe/uboot-sign.bbclass
+++ b/meta/classes-recipe/uboot-sign.bbclass
@@ -5,7 +5,7 @@
#
# This file is part of U-Boot verified boot support and is intended to be
-# inherited from u-boot recipe and from kernel-fitimage.bbclass.
+# inherited from the u-boot recipe.
#
# The signature procedure requires the user to generate an RSA key and
# certificate in a directory and to define the following variable:
@@ -22,19 +22,6 @@
#
# The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot.
#
-# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
-# treat the device tree blob:
-#
-# * u-boot:do_install:append
-# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
-# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
-#
-# * virtual/kernel:do_assemble_fitimage
-# Sign the image
-#
-# * u-boot:do_deploy[postfuncs]
-# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
-#
# For more details on signature process, please refer to U-Boot documentation.
# We need some variables from u-boot-config
@@ -43,13 +30,13 @@ inherit uboot-config
# Enable use of a U-Boot fitImage
UBOOT_FITIMAGE_ENABLE ?= "0"
-# Signature activation - these require their respective fitImages
-UBOOT_SIGN_ENABLE ?= "0"
+# Signature activation - this requires UBOOT_FITIMAGE_ENABLE = "1"
SPL_SIGN_ENABLE ?= "0"
# Default value for deployment filenames.
UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
UBOOT_DTB_BINARY ?= "u-boot.dtb"
+UBOOT_DTB_SIGNED ?= "${UBOOT_DTB_BINARY}-signed"
UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.bin"
UBOOT_NODTB_BINARY ?= "u-boot-nodtb.bin"
@@ -63,6 +50,7 @@ UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}"
SPL_DIR ?= "spl"
SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb"
SPL_DTB_BINARY ?= "u-boot-spl.dtb"
+SPL_DTB_SIGNED ?= "${SPL_DTB_BINARY}-signed"
SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb"
SPL_NODTB_IMAGE ?= "u-boot-spl-nodtb-${MACHINE}-${PV}-${PR}.bin"
SPL_NODTB_BINARY ?= "u-boot-spl-nodtb.bin"
@@ -71,90 +59,81 @@ SPL_NODTB_SYMLINK ?= "u-boot-spl-nodtb-${MACHINE}.bin"
# U-Boot fitImage description
UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
-# Kernel / U-Boot fitImage Hash Algo
-FIT_HASH_ALG ?= "sha256"
+# U-Boot fitImage Hash Algo
UBOOT_FIT_HASH_ALG ?= "sha256"
-# Kernel / U-Boot fitImage Signature Algo
-FIT_SIGN_ALG ?= "rsa2048"
+# U-Boot fitImage Signature Algo
UBOOT_FIT_SIGN_ALG ?= "rsa2048"
-# Kernel / U-Boot fitImage Padding Algo
-FIT_PAD_ALG ?= "pkcs-1.5"
-
-# Generate keys for signing Kernel / U-Boot fitImage
-FIT_GENERATE_KEYS ?= "0"
+# Generate keys for signing U-Boot fitImage
UBOOT_FIT_GENERATE_KEYS ?= "0"
# Size of private keys in number of bits
-FIT_SIGN_NUMBITS ?= "2048"
UBOOT_FIT_SIGN_NUMBITS ?= "2048"
# args to openssl genrsa (Default is just the public exponent)
-FIT_KEY_GENRSA_ARGS ?= "-F4"
UBOOT_FIT_KEY_GENRSA_ARGS ?= "-F4"
# args to openssl req (Default is -batch for non interactive mode and
# -new for new certificate)
-FIT_KEY_REQ_ARGS ?= "-batch -new"
UBOOT_FIT_KEY_REQ_ARGS ?= "-batch -new"
# Standard format for public key certificate
-FIT_KEY_SIGN_PKCS ?= "-x509"
UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509"
-# Functions on this bbclass can apply to either U-boot or Kernel,
-# depending on the scenario
-UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
-KERNEL_PN = "${@d.getVar('PREFERRED_PROVIDER_virtual/kernel')}"
+# length of address in number of <u32> cells
+# ex: 1 32bits address, 2 64bits address
+UBOOT_FIT_ADDRESS_CELLS ?= "1"
+
+# This is only necessary for determining the signing configuration
+KERNEL_PN = "${PREFERRED_PROVIDER_virtual/kernel}"
+
+UBOOT_FIT_UBOOT_LOADADDRESS ?= "${UBOOT_LOADADDRESS}"
+UBOOT_FIT_UBOOT_ENTRYPOINT ?= "${UBOOT_ENTRYPOINT}"
-# We need u-boot-tools-native if we're creating a U-Boot fitImage
python() {
- if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1':
- depends = d.getVar("DEPENDS")
- depends = "%s u-boot-tools-native dtc-native" % depends
- d.setVar("DEPENDS", depends)
+ # We need u-boot-tools-native if we're creating a U-Boot fitImage
+ sign = d.getVar('UBOOT_SIGN_ENABLE') == '1'
+ if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' or sign:
+ d.appendVar('DEPENDS', " u-boot-tools-native dtc-native")
+ if sign:
+ d.appendVar('DEPENDS', " " + d.getVar('KERNEL_PN'))
}
-concat_dtb_helper() {
- if [ -e "${UBOOT_DTB_BINARY}" ]; then
- ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
- ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
- fi
+concat_dtb() {
+ type="$1"
+ binary="$2"
- if [ -f "${UBOOT_NODTB_BINARY}" ]; then
- install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
- ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
+ if [ -e "${UBOOT_DTB_BINARY}" ]; then
+ # Re-sign the kernel in order to add the keys to our dtb
+ ${UBOOT_MKIMAGE_SIGN} \
+ ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
+ -F -k "${UBOOT_SIGN_KEYDIR}" \
+ -K "${UBOOT_DTB_BINARY}" \
+ -r ${B}/fitImage-linux \
+ ${UBOOT_MKIMAGE_SIGN_ARGS}
+ # Verify the kernel image and u-boot dtb
+ ${UBOOT_FIT_CHECK_SIGN} \
+ -k "${UBOOT_DTB_BINARY}" \
+ -f ${B}/fitImage-linux
+ cp ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SIGNED}
fi
# If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB
- # with public key (otherwise it will be deployed by the equivalent
- # concat_spl_dtb_helper function - cf. kernel-fitimage.bbclass for more details)
+ # with public key (otherwise U-Boot will be packaged by uboot_fitimage_assemble)
if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then
- deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
- [ -e "$deployed_uboot_dtb_binary" ]; then
- oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
- install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
- elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
- cd ${DEPLOYDIR}
- cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
-
- if [ -n "${UBOOT_CONFIG}" ]
- then
- i=0
- j=0
- for config in ${UBOOT_MACHINE}; do
- i=$(expr $i + 1);
- for type in ${UBOOT_CONFIG}; do
- j=$(expr $j + 1);
- if [ $j -eq $i ]
- then
- cp ${UBOOT_IMAGE} ${B}/${CONFIG_B_PATH}/u-boot-$type.${UBOOT_SUFFIX}
- fi
- done
- done
+ [ -e "${UBOOT_DTB_BINARY}" ]; then
+ oe_runmake EXT_DTB="${UBOOT_DTB_SIGNED}" ${UBOOT_MAKE_TARGET}
+ if [ -n "${binary}" ]; then
+ cp ${binary} ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX}
+ fi
+ elif [ -e "${UBOOT_NODTB_BINARY}" -a -e "${UBOOT_DTB_BINARY}" ]; then
+ if [ -n "${binary}" ]; then
+ cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} | tee ${binary} > \
+ ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX}
+ else
+ cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} > ${UBOOT_BINARY}
fi
else
bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
@@ -162,120 +141,67 @@ concat_dtb_helper() {
fi
}
-concat_spl_dtb_helper() {
-
- # We only deploy symlinks to the u-boot-spl.dtb,as the KERNEL_PN will
- # be responsible for deploying the real file
- if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
- ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
- ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
- fi
+deploy_dtb() {
+ type="$1"
- # Concatenate the SPL nodtb binary and u-boot.dtb
- deployed_spl_dtb_binary='${DEPLOY_DIR_IMAGE}/${SPL_DTB_IMAGE}'
- if [ -e "${DEPLOYDIR}/${SPL_NODTB_IMAGE}" -a -e "$deployed_spl_dtb_binary" ] ; then
- cd ${DEPLOYDIR}
- cat ${SPL_NODTB_IMAGE} $deployed_spl_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${SPL_BINARY} > ${SPL_IMAGE}
+ if [ -n "${type}" ]; then
+ uboot_dtb_binary="u-boot-${type}-${PV}-${PR}.dtb"
+ uboot_nodtb_binary="u-boot-nodtb-${type}-${PV}-${PR}.bin"
else
- bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
+ uboot_dtb_binary="${UBOOT_DTB_IMAGE}"
+ uboot_nodtb_binary="${UBOOT_NODTB_IMAGE}"
fi
-}
-
-concat_dtb() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
- mkdir -p ${DEPLOYDIR}
- if [ -n "${UBOOT_CONFIG}" ]; then
- for config in ${UBOOT_MACHINE}; do
- CONFIG_B_PATH="$config"
- cd ${B}/$config
- concat_dtb_helper
- done
- else
- CONFIG_B_PATH=""
- cd ${B}
- concat_dtb_helper
+ if [ -e "${UBOOT_DTB_SIGNED}" ]; then
+ install -Dm644 ${UBOOT_DTB_SIGNED} ${DEPLOYDIR}/${uboot_dtb_binary}
+ if [ -n "${type}" ]; then
+ ln -sf ${uboot_dtb_binary} ${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
fi
fi
-}
-concat_spl_dtb() {
- if [ "${SPL_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${SPL_DTB_BINARY}" ]; then
- mkdir -p ${DEPLOYDIR}
- if [ -n "${UBOOT_CONFIG}" ]; then
- for config in ${UBOOT_MACHINE}; do
- CONFIG_B_PATH="$config"
- cd ${B}/$config
- concat_spl_dtb_helper
- done
- else
- CONFIG_B_PATH=""
- cd ${B}
- concat_spl_dtb_helper
+ if [ -f "${UBOOT_NODTB_BINARY}" ]; then
+ install -Dm644 ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${uboot_nodtb_binary}
+ if [ -n "${type}" ]; then
+ ln -sf ${uboot_nodtb_binary} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
fi
fi
}
-
-# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
-# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
-install_helper() {
- if [ -f "${UBOOT_DTB_BINARY}" ]; then
- # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
- # need both of them.
- install -Dm 0644 ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
- ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
+concat_spl_dtb() {
+ if [ -e "${SPL_DIR}/${SPL_NODTB_BINARY}" -a -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
+ cat ${SPL_DIR}/${SPL_NODTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED} > "${SPL_BINARY}"
else
- bbwarn "${UBOOT_DTB_BINARY} not found"
+ bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
fi
}
-# Install SPL dtb and u-boot nodtb to datadir,
-install_spl_helper() {
- if [ -f "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
- install -Dm 0644 ${SPL_DIR}/${SPL_DTB_BINARY} ${D}${datadir}/${SPL_DTB_IMAGE}
- ln -sf ${SPL_DTB_IMAGE} ${D}${datadir}/${SPL_DTB_BINARY}
- else
- bbwarn "${SPL_DTB_BINARY} not found"
- fi
- if [ -f "${UBOOT_NODTB_BINARY}" ] ; then
- install -Dm 0644 ${UBOOT_NODTB_BINARY} ${D}${datadir}/${UBOOT_NODTB_IMAGE}
- ln -sf ${UBOOT_NODTB_IMAGE} ${D}${datadir}/${UBOOT_NODTB_BINARY}
+deploy_spl_dtb() {
+ type="$1"
+
+ if [ -n "${type}" ]; then
+ spl_dtb_binary="u-boot-spl-${type}-${PV}-${PR}.dtb"
+ spl_nodtb_binary="u-boot-spl-nodtb-${type}-${PV}-${PR}.bin"
else
- bbwarn "${UBOOT_NODTB_BINARY} not found"
+ spl_dtb_binary="${SPL_DTB_IMAGE}"
+ spl_nodtb_binary="${SPL_NODTB_IMAGE}"
fi
- # We need to install a 'stub' u-boot-fitimage + its to datadir,
- # so that the KERNEL_PN can use the correct filename when
- # assembling and deploying them
- touch ${D}/${datadir}/${UBOOT_FITIMAGE_IMAGE}
- touch ${D}/${datadir}/${UBOOT_ITS_IMAGE}
-}
+ if [ -e "${SPL_DIR}/${SPL_DTB_SIGNED}" ] ; then
+ install -Dm644 ${SPL_DIR}/${SPL_DTB_SIGNED} ${DEPLOYDIR}/${spl_dtb_binary}
+ if [ -n "${type}" ]; then
+ ln -sf ${spl_dtb_binary} ${DEPLOYDIR}/${SPL_DTB_IMAGE}
+ fi
+ fi
-do_install:append() {
- if [ "${PN}" = "${UBOOT_PN}" ]; then
- if [ -n "${UBOOT_CONFIG}" ]; then
- for config in ${UBOOT_MACHINE}; do
- cd ${B}/$config
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
- [ -n "${UBOOT_DTB_BINARY}" ]; then
- install_helper
- fi
- if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
- install_spl_helper
- fi
- done
- else
- cd ${B}
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
- [ -n "${UBOOT_DTB_BINARY}" ]; then
- install_helper
- fi
- if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
- install_spl_helper
- fi
+ if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then
+ install -Dm644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${spl_nodtb_binary}
+ if [ -n "${type}" ]; then
+ ln -sf ${spl_nodtb_binary} ${DEPLOYDIR}/${SPL_NODTB_IMAGE}
fi
fi
+
+ # For backwards compatibility...
+ install -Dm644 ${SPL_BINARY} ${DEPLOYDIR}/${SPL_IMAGE}
}
do_uboot_generate_rsa_keys() {
@@ -298,7 +224,7 @@ do_uboot_generate_rsa_keys() {
"${UBOOT_FIT_SIGN_NUMBITS}"
echo "Generating certificate for signing U-Boot fitImage"
- openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
+ openssl req ${UBOOT_FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
-key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
-out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
fi
@@ -311,66 +237,57 @@ addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compi
# Create a ITS file for the U-boot FIT, for use when
# we want to sign it so that the SPL can verify it
uboot_fitimage_assemble() {
- uboot_its="$1"
- uboot_nodtb_bin="$2"
- uboot_dtb="$3"
- uboot_bin="$4"
- spl_dtb="$5"
- uboot_csum="${UBOOT_FIT_HASH_ALG}"
- uboot_sign_algo="${UBOOT_FIT_SIGN_ALG}"
- uboot_sign_keyname="${SPL_SIGN_KEYNAME}"
-
- rm -f $uboot_its $uboot_bin
+ rm -f ${UBOOT_ITS} ${UBOOT_FITIMAGE_BINARY}
# First we create the ITS script
- cat << EOF >> $uboot_its
+ cat << EOF >> ${UBOOT_ITS}
/dts-v1/;
/ {
description = "${UBOOT_FIT_DESC}";
- #address-cells = <1>;
+ #address-cells = <${UBOOT_FIT_ADDRESS_CELLS}>;
images {
uboot {
description = "U-Boot image";
- data = /incbin/("$uboot_nodtb_bin");
+ data = /incbin/("${UBOOT_NODTB_BINARY}");
type = "standalone";
os = "u-boot";
arch = "${UBOOT_ARCH}";
compression = "none";
- load = <${UBOOT_LOADADDRESS}>;
- entry = <${UBOOT_ENTRYPOINT}>;
+ load = <${UBOOT_FIT_UBOOT_LOADADDRESS}>;
+ entry = <${UBOOT_FIT_UBOOT_ENTRYPOINT}>;
EOF
if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
- cat << EOF >> $uboot_its
+ cat << EOF >> ${UBOOT_ITS}
signature {
- algo = "$uboot_csum,$uboot_sign_algo";
- key-name-hint = "$uboot_sign_keyname";
+ algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}";
+ key-name-hint = "${SPL_SIGN_KEYNAME}";
};
EOF
fi
- cat << EOF >> $uboot_its
+ cat << EOF >> ${UBOOT_ITS}
};
fdt {
description = "U-Boot FDT";
- data = /incbin/("$uboot_dtb");
+ data = /incbin/("${UBOOT_DTB_BINARY}");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
EOF
if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
- cat << EOF >> $uboot_its
+ cat << EOF >> ${UBOOT_ITS}
signature {
- algo = "$uboot_csum,$uboot_sign_algo";
- key-name-hint = "$uboot_sign_keyname";
+ algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}";
+ key-name-hint = "${SPL_SIGN_KEYNAME}";
};
EOF
fi
- cat << EOF >> $uboot_its
+ cat << EOF >> ${UBOOT_ITS}
};
};
@@ -390,8 +307,8 @@ EOF
#
${UBOOT_MKIMAGE} \
${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
- -f $uboot_its \
- $uboot_bin
+ -f ${UBOOT_ITS} \
+ ${UBOOT_FITIMAGE_BINARY}
if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
#
@@ -400,78 +317,146 @@ EOF
${UBOOT_MKIMAGE_SIGN} \
${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${SPL_SIGN_KEYDIR}" \
- -K "$spl_dtb" \
- -r $uboot_bin \
+ -K "${SPL_DIR}/${SPL_DTB_BINARY}" \
+ -r ${UBOOT_FITIMAGE_BINARY} \
${SPL_MKIMAGE_SIGN_ARGS}
+ #
+ # Verify the U-boot FIT image and SPL dtb
+ #
+ ${UBOOT_FIT_CHECK_SIGN} \
+ -k "${SPL_DIR}/${SPL_DTB_BINARY}" \
+ -f ${UBOOT_FITIMAGE_BINARY}
fi
+ if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
+ cp ${SPL_DIR}/${SPL_DTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED}
+ fi
+}
+
+uboot_assemble_fitimage_helper() {
+ type="$1"
+ binary="$2"
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
+ concat_dtb $type $binary
+ fi
+
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
+ uboot_fitimage_assemble
+ fi
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then
+ concat_spl_dtb
+ fi
}
do_uboot_assemble_fitimage() {
- # This function runs in KERNEL_PN context. The reason for that is that we need to
- # support the scenario where UBOOT_SIGN_ENABLE is placing the Kernel fitImage's
- # pubkey in the u-boot.dtb file, so that we can use it when building the U-Boot
- # fitImage itself.
- if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
- [ -n "${SPL_DTB_BINARY}" -a "${PN}" = "${KERNEL_PN}" ] ; then
- if [ "${UBOOT_SIGN_ENABLE}" != "1" ]; then
- # If we're not signing the Kernel fitImage, that means
- # we need to copy the u-boot.dtb from staging ourselves
- cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
- fi
- # As we are in the kernel context, we need to copy u-boot-spl.dtb from staging first.
- # Unfortunately, need to glob on top of ${SPL_DTB_BINARY} since _IMAGE and _SYMLINK
- # will contain U-boot's PV
- # Similarly, we need to get the filename for the 'stub' u-boot-fitimage + its in
- # staging so that we can use it for creating the image with the correct filename
- # in the KERNEL_PN context.
- # As for the u-boot.dtb (with fitimage's pubkey), it should come from the dependent
- # do_assemble_fitimage task
- cp -P ${STAGING_DATADIR}/u-boot-spl*.dtb ${B}
- cp -P ${STAGING_DATADIR}/u-boot-nodtb*.bin ${B}
- rm -rf ${B}/u-boot-fitImage-* ${B}/u-boot-its-*
- kernel_uboot_fitimage_name=`basename ${STAGING_DATADIR}/u-boot-fitImage-*`
- kernel_uboot_its_name=`basename ${STAGING_DATADIR}/u-boot-its-*`
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
+ cp "${STAGING_DIR_HOST}/sysroot-only/fitImage" "${B}/fitImage-linux"
+ fi
+
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ unset i j k
+ for config in ${UBOOT_MACHINE}; do
+ i=$(expr $i + 1);
+ for type in ${UBOOT_CONFIG}; do
+ j=$(expr $j + 1);
+ if [ $j -eq $i ]; then
+ break;
+ fi
+ done
+
+ for binary in ${UBOOT_BINARIES}; do
+ k=$(expr $k + 1);
+ if [ $k -eq $i ]; then
+ break;
+ fi
+ done
+
+ cd ${B}/${config}
+ uboot_assemble_fitimage_helper ${type} ${binary}
+ done
+ else
cd ${B}
- uboot_fitimage_assemble $kernel_uboot_its_name ${UBOOT_NODTB_BINARY} \
- ${UBOOT_DTB_BINARY} $kernel_uboot_fitimage_name \
- ${SPL_DTB_BINARY}
+ uboot_assemble_fitimage_helper "" ${UBOOT_BINARY}
fi
}
-addtask uboot_assemble_fitimage before do_deploy after do_compile
+addtask uboot_assemble_fitimage before do_install do_deploy after do_compile
-do_deploy:prepend:pn-${UBOOT_PN}() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
- concat_dtb
+deploy_helper() {
+ type="$1"
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_SIGNED}" ] ; then
+ deploy_dtb $type
fi
- if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
- # Deploy the u-boot-nodtb binary and symlinks...
- if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then
- echo "Copying u-boot-nodtb binary..."
- install -m 0644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${SPL_NODTB_IMAGE}
- ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
- ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY}
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
+ if [ -n "${type}" ]; then
+ uboot_its_image="u-boot-its-${type}-${PV}-${PR}"
+ uboot_fitimage_image="u-boot-fitImage-${type}-${PV}-${PR}"
+ else
+ uboot_its_image="${UBOOT_ITS_IMAGE}"
+ uboot_fitimage_image="${UBOOT_FITIMAGE_IMAGE}"
fi
+ install -Dm644 ${UBOOT_FITIMAGE_BINARY} ${DEPLOYDIR}/$uboot_fitimage_image
+ install -Dm644 ${UBOOT_ITS} ${DEPLOYDIR}/$uboot_its_image
- # We only deploy the symlinks to the uboot-fitImage and uboot-its
- # images, as the KERNEL_PN will take care of deploying the real file
- ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY}
- ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
+ if [ -n "${type}" ]; then
+ ln -sf $uboot_its_image ${DEPLOYDIR}/${UBOOT_ITS_IMAGE}
+ ln -sf $uboot_fitimage_image ${DEPLOYDIR}/${UBOOT_FITIMAGE_IMAGE}
+ fi
+ fi
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_SIGNED}" ] ; then
+ deploy_spl_dtb $type
+ fi
+}
+
+do_deploy:prepend() {
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ unset i j k
+ for config in ${UBOOT_MACHINE}; do
+ i=$(expr $i + 1);
+ for type in ${UBOOT_CONFIG}; do
+ j=$(expr $j + 1);
+ if [ $j -eq $i ]; then
+ cd ${B}/${config}
+ deploy_helper ${type}
+ fi
+ done
+ unset j
+ done
+ unset i
+ else
+ cd ${B}
+ deploy_helper ""
+ fi
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
+ fi
+
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS}
ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS_SYMLINK}
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY}
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
fi
if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then
- concat_spl_dtb
+ ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
+ ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
+ ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
+ ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY}
fi
-
-
}
-do_deploy:append:pn-${UBOOT_PN}() {
+do_deploy:append() {
# If we're creating a u-boot fitImage, point u-boot.bin
# symlink since it might get used by image recipes
if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
@@ -479,27 +464,3 @@ do_deploy:append:pn-${UBOOT_PN}() {
ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_SYMLINK}
fi
}
-
-python () {
- if ( (d.getVar('UBOOT_SIGN_ENABLE') == '1'
- or d.getVar('UBOOT_FITIMAGE_ENABLE') == '1')
- and d.getVar('PN') == d.getVar('UBOOT_PN')
- and d.getVar('UBOOT_DTB_BINARY')):
-
- # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
- # and/or the U-Boot fitImage
- d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % d.getVar('KERNEL_PN'))
-
- if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' and d.getVar('PN') == d.getVar('KERNEL_PN'):
- # As the U-Boot fitImage is created by the KERNEL_PN, we need
- # to make sure that the u-boot-spl.dtb and u-boot-spl-nodtb.bin
- # files are in the staging dir for it's use
- d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % d.getVar('UBOOT_PN'))
-
- # If the Kernel fitImage is being signed, we need to
- # create the U-Boot fitImage after it
- if d.getVar('UBOOT_SIGN_ENABLE') == '1':
- d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage' % d.getVar('KERNEL_PN'))
- d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage_initramfs' % d.getVar('KERNEL_PN'))
-
-}
diff --git a/meta/classes-recipe/update-alternatives.bbclass b/meta/classes-recipe/update-alternatives.bbclass
index 970d9bcd45..b153e1b297 100644
--- a/meta/classes-recipe/update-alternatives.bbclass
+++ b/meta/classes-recipe/update-alternatives.bbclass
@@ -5,7 +5,7 @@
#
# This class is used to help the alternatives system which is useful when
-# multiple sources provide same command. You can use update-alternatives
+# multiple sources provide the same command. You can use update-alternatives
# command directly in your recipe, but in most cases this class simplifies
# that job.
#
@@ -35,7 +35,7 @@
# A non-default link to create for a target
# ALTERNATIVE_TARGET[name] = "target"
#
-# This is the name of the binary as it's been install by do_install
+# This is the name of the binary as it's been installed by do_install
# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
#
# A package specific link for a target
@@ -68,7 +68,7 @@ ALTERNATIVE_PRIORITY = "10"
# We need special processing for vardeps because it can not work on
# modified flag values. So we aggregate the flags into a new variable
-# and include that vairable in the set.
+# and include that variable in the set.
UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
@@ -86,10 +86,10 @@ def gen_updatealternativesvardeps(d):
for p in pkgs:
for v in vars:
- for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
+ for flag in sorted((d.getVarFlags("%s:%s" % (v,p)) or {}).keys()):
if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
continue
- d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
+ d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s:%s' % (v,p), flag, False)))
def ua_extend_depends(d):
if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
@@ -265,7 +265,7 @@ def update_alternatives_alt_targets(d, pkg):
return updates
-PACKAGESPLITFUNCS:prepend = "populate_packages_updatealternatives "
+PACKAGESPLITFUNCS =+ "populate_packages_updatealternatives"
python populate_packages_updatealternatives () {
if not update_alternatives_enabled(d):
diff --git a/meta/classes-recipe/update-rc.d.bbclass b/meta/classes-recipe/update-rc.d.bbclass
index cb2aaba57c..a19e704741 100644
--- a/meta/classes-recipe/update-rc.d.bbclass
+++ b/meta/classes-recipe/update-rc.d.bbclass
@@ -68,8 +68,8 @@ python __anonymous() {
update_rc_after_parse(d)
}
-PACKAGESPLITFUNCS:prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
-PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd "
+PACKAGESPLITFUNCS =+ "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd', '', d)}"
+PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd"
populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes-recipe/waf.bbclass b/meta/classes-recipe/waf.bbclass
index 5fa0cc4987..01707c8e2c 100644
--- a/meta/classes-recipe/waf.bbclass
+++ b/meta/classes-recipe/waf.bbclass
@@ -54,8 +54,21 @@ python waf_preconfigure() {
wafbin = os.path.join(subsrcdir, 'waf')
try:
result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
- version = result.decode('utf-8').split()[1]
- if bb.utils.vercmp_string_op(version, "1.8.7", ">="):
+ # Output looks like:
+ # # output from lower modules (e.g. warnings, ...)
+ # waf X.Y.Z ...
+ # So, look for the line starting with "waf "
+ version = None
+ for line in result.decode('utf-8').split("\n"):
+ if line.startswith("waf "):
+ version = line.split()[1]
+ break
+
+ if not version or not bb.utils.is_semver(version):
+ bb.warn("Unable to parse \"waf --version\" output. Assuming waf version without bindir/libdir support.")
+ bb.warn("waf·--version·output = \n%s" % result.decode('utf-8'))
+ elif bb.utils.vercmp_string_op(version, "1.8.7", ">="):
+ bb.note("waf version is high enough to add --bindir and --libdir")
d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
except subprocess.CalledProcessError as e:
bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
diff --git a/meta/classes-recipe/xmlcatalog.bbclass b/meta/classes-recipe/xmlcatalog.bbclass
index 5826d0a8b5..d3ef7ff43c 100644
--- a/meta/classes-recipe/xmlcatalog.bbclass
+++ b/meta/classes-recipe/xmlcatalog.bbclass
@@ -4,13 +4,17 @@
# SPDX-License-Identifier: MIT
#
-DEPENDS = "libxml2-native"
+# Note that this recipe only handles XML catalogues in the native sysroot, and doesn't
+# yet support catalogue management in the target sysroot or on the target itself.
+# (https://bugzilla.yoctoproject.org/13271)
# A whitespace-separated list of XML catalogs to be registered, for example
# "${sysconfdir}/xml/docbook-xml.xml".
XMLCATALOGS ?= ""
-SYSROOT_PREPROCESS_FUNCS:append = " xmlcatalog_sstate_postinst"
+DEPENDS:append = " libxml2-native"
+
+SYSROOT_PREPROCESS_FUNCS:append:class-native = " xmlcatalog_sstate_postinst"
xmlcatalog_complete() {
ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"