summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/archiver.bbclass4
-rw-r--r--meta/classes/autotools.bbclass22
-rw-r--r--meta/classes/base.bbclass57
-rw-r--r--meta/classes/binconfig-disabled.bbclass13
-rw-r--r--meta/classes/bluetooth.bbclass14
-rw-r--r--meta/classes/boot-directdisk.bbclass3
-rw-r--r--meta/classes/bootimg.bbclass3
-rw-r--r--meta/classes/buildhistory.bbclass32
-rw-r--r--meta/classes/buildstats.bbclass4
-rw-r--r--meta/classes/chrpath.bbclass2
-rw-r--r--meta/classes/cmake.bbclass4
-rw-r--r--meta/classes/compress_doc.bbclass12
-rw-r--r--meta/classes/core-image.bbclass3
-rw-r--r--meta/classes/devshell.bbclass4
-rw-r--r--meta/classes/distrodata.bbclass492
-rw-r--r--meta/classes/externalsrc.bbclass41
-rw-r--r--meta/classes/fontcache.bbclass6
-rw-r--r--meta/classes/image-buildinfo.bbclass69
-rw-r--r--meta/classes/image.bbclass75
-rw-r--r--meta/classes/image_types.bbclass32
-rw-r--r--meta/classes/insane.bbclass90
-rw-r--r--meta/classes/kernel-arch.bbclass1
-rw-r--r--meta/classes/kernel-module-split.bbclass9
-rw-r--r--meta/classes/kernel-yocto.bbclass112
-rw-r--r--meta/classes/kernel.bbclass159
-rw-r--r--meta/classes/kernelsrc.bbclass10
-rw-r--r--meta/classes/libc-package.bbclass5
-rw-r--r--meta/classes/license.bbclass79
-rw-r--r--meta/classes/linux-kernel-base.bbclass13
-rw-r--r--meta/classes/module-base.bbclass12
-rw-r--r--meta/classes/module.bbclass6
-rw-r--r--meta/classes/native.bbclass25
-rw-r--r--meta/classes/nativesdk.bbclass1
-rw-r--r--meta/classes/oelint.bbclass19
-rw-r--r--meta/classes/package.bbclass105
-rw-r--r--meta/classes/package_deb.bbclass2
-rw-r--r--meta/classes/package_ipk.bbclass2
-rw-r--r--meta/classes/package_rpm.bbclass12
-rw-r--r--meta/classes/pixbufcache.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass17
-rw-r--r--meta/classes/populate_sdk_ext.bbclass220
-rw-r--r--meta/classes/pythonnative.bbclass2
-rw-r--r--meta/classes/qemu.bbclass29
-rw-r--r--meta/classes/qmake_base.bbclass6
-rw-r--r--meta/classes/qt4e.bbclass3
-rw-r--r--meta/classes/qt4x11.bbclass8
-rw-r--r--meta/classes/report-error.bbclass14
-rw-r--r--meta/classes/rootfs_ipk.bbclass2
-rw-r--r--meta/classes/sanity.bbclass21
-rw-r--r--meta/classes/siteinfo.bbclass17
-rw-r--r--meta/classes/spdx.bbclass334
-rw-r--r--meta/classes/sstate.bbclass2
-rw-r--r--meta/classes/toaster.bbclass18
-rw-r--r--meta/classes/toolchain-scripts.bbclass17
-rw-r--r--meta/classes/uboot-config.bbclass40
-rw-r--r--meta/classes/update-alternatives.bbclass8
-rw-r--r--meta/classes/update-rc.d.bbclass4
-rw-r--r--meta/classes/useradd.bbclass10
-rw-r--r--meta/classes/useradd_base.bbclass14
-rw-r--r--meta/classes/vala.bbclass3
60 files changed, 1355 insertions, 995 deletions
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 058ba63437..b598aa3ad6 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -146,9 +146,9 @@ python do_ar_original() {
fetch.unpack(tmpdir, (url,))
os.chdir(tmpdir)
- # We split on '+' to chuck any annoying AUTOINC+ in the revision.
+ # We eliminate any AUTOINC+ in the revision.
try:
- src_rev = bb.fetch2.get_srcrev(d).split('+')[-1][:10]
+ src_rev = bb.fetch2.get_srcrev(d).replace('AUTOINC+','')
except:
src_rev = 'NOREV'
tarname = os.path.join(ar_outdir, basename + '.' + src_rev + '.tar.gz')
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 6b99bddd5d..f213e893df 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -27,7 +27,7 @@ inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+export CONFIG_SITE = "${@siteinfo_get_files(d, False)}"
acpaths = "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
@@ -86,7 +86,7 @@ oe_runconf () {
${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
if [ "$?" != "0" ]; then
echo "Configure failed. The contents of all config.log files follows to aid debugging"
- find ${S} -name config.log -print -exec cat {} \;
+ find ${S} -ignore_readdir_race -name config.log -print -exec cat {} \;
bbfatal "oe_runconf failed"
fi
set -e
@@ -109,7 +109,12 @@ autotools_preconfigure() {
else
# At least remove the .la files since automake won't automatically
# regenerate them even if CFLAGS/LDFLAGS are different
- cd ${S}; find ${S} -name \*.la -delete
+ cd ${S}
+ if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
+ echo "Running \"${MAKE} clean\" in ${S}"
+ ${MAKE} clean
+ fi
+ find ${S} -ignore_readdir_race -name \*.la -delete
fi
fi
fi
@@ -182,6 +187,7 @@ python autotools_copy_aclocals () {
#bb.warn(str(configuredeps2))
cp = []
+ siteconf = []
for c in configuredeps:
if c.endswith("-native"):
manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
@@ -196,6 +202,8 @@ python autotools_copy_aclocals () {
for l in f:
if "/aclocal/" in l and l.strip().endswith(".m4"):
cp.append(l.strip())
+ elif "config_site.d/" in l:
+ cp.append(l.strip())
except:
bb.warn("%s not found" % manifest)
@@ -203,6 +211,8 @@ python autotools_copy_aclocals () {
t = os.path.join(aclocaldir, os.path.basename(c))
if not os.path.exists(t):
os.symlink(c, t)
+
+ d.setVar("CONFIG_SITE", siteinfo_get_files(d, False))
}
autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
@@ -215,7 +225,7 @@ autotools_do_configure() {
# for a package whose autotools are old, on an x86_64 machine, which the old
# config.sub does not support. Work around this by installing them manually
# regardless.
- ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
+ ( for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
rm -f `dirname $ac`/configure
done )
if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
@@ -224,7 +234,7 @@ autotools_do_configure() {
ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
if [ x"${acpaths}" = xdefault ]; then
acpaths=
- for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
+ for i in `find ${S} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
acpaths="$acpaths -I $i"
done
@@ -265,7 +275,7 @@ autotools_do_configure() {
fi
fi
for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
- for j in `find ${S} -name $i | grep -v aclocal-copy`; do
+ for j in `find ${S} -ignore_readdir_race -name $i | grep -v aclocal-copy`; do
rm $j
done
done
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index ff8c63394f..3d9235e1a6 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -94,9 +94,26 @@ def extra_path_elements(d):
PATH_prepend = "${@extra_path_elements(d)}"
+def get_lic_checksum_file_list(d):
+ filelist = []
+ lic_files = d.getVar("LIC_FILES_CHKSUM", True) or ''
+
+ urls = lic_files.split()
+ for url in urls:
+ # We only care about items that are absolute paths since
+ # any others should be covered by SRC_URI.
+ try:
+ path = bb.fetch.decodeurl(url)[2]
+ if path[0] == '/':
+ filelist.append(path + ":" + str(os.path.exists(path)))
+ except bb.fetch.MalformedUrl:
+ raise bb.build.FuncFailed(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+ return " ".join(filelist)
+
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
+do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
do_fetch[vardeps] += "SRCREV"
python base_do_fetch() {
@@ -113,7 +130,6 @@ python base_do_fetch() {
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
-do_unpack[cleandirs] = "${S}/patches"
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
if len(src_uri) == 0:
@@ -121,11 +137,21 @@ python base_do_unpack() {
rootdir = d.getVar('WORKDIR', True)
+ # Ensure that we cleanup ${S}/patches
+ # TODO: Investigate if we can remove
+ # the entire ${S} in this case.
+ s_dir = d.getVar('S', True)
+ p_dir = os.path.join(s_dir, 'patches')
+ bb.utils.remove(p_dir, True)
+
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(rootdir)
except bb.fetch2.BBFetchException as e:
raise bb.build.FuncFailed(e)
+
+ if not os.path.exists(s_dir):
+ bb.warn("%s ('S') doesn't exist, please set 'S' to a proper value" % s_dir)
}
def pkgarch_mapping(d):
@@ -216,15 +242,29 @@ python base_eventhandler() {
}
+CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
+CLEANBROKEN = "0"
+
addtask configure after do_patch
-do_configure[dirs] = "${S} ${B}"
+do_configure[dirs] = "${B}"
do_configure[deptask] = "do_populate_sysroot"
base_do_configure() {
- :
+ if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
+ if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
+ cd ${B}
+ if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
+ oe_runmake clean
+ fi
+ find ${B} -name \*.la -delete
+ fi
+ fi
+ if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
+ fi
}
addtask compile after do_configure
-do_compile[dirs] = "${S} ${B}"
+do_compile[dirs] = "${B}"
base_do_compile() {
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
oe_runmake || die "make failed"
@@ -234,7 +274,7 @@ base_do_compile() {
}
addtask install after do_compile
-do_install[dirs] = "${D} ${S} ${B}"
+do_install[dirs] = "${D} ${B}"
# Remove and re-create ${D} so that is it guaranteed to be empty
do_install[cleandirs] = "${D}"
@@ -333,8 +373,6 @@ python () {
extrardeps = []
extraconf = []
for flag, flagval in sorted(pkgconfigflags.items()):
- if flag == "defaultval":
- continue
items = flagval.split(",")
num = len(items)
if num > 4:
@@ -362,7 +400,7 @@ python () {
# obsolete. Return a warning to the user.
princ = d.getVar('PRINC', True)
if princ and princ != "0":
- bb.warn("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
+ bb.error("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
pr = d.getVar('PR', True)
pr_prefix = re.search("\D+",pr)
prval = re.search("\d+",pr)
@@ -378,6 +416,7 @@ python () {
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
if bb.data.inherits_class('license', d):
+ check_license_format(d)
unmatched_license_flag = check_license_flags(d)
if unmatched_license_flag:
bb.debug(1, "Skipping %s because it has a restricted license not"
@@ -431,7 +470,7 @@ python () {
check_license = False
if check_license and bad_licenses:
- bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
whitelist = []
for lic in bad_licenses:
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
index 27f904eb42..595cd096f5 100644
--- a/meta/classes/binconfig-disabled.bbclass
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -10,6 +10,19 @@ FILES_${PN}-dev += "${bindir}/*-config"
do_install_append () {
for x in ${BINCONFIG}; do
echo "#!/bin/sh" > ${D}$x
+ # Make the disabled script emit invalid parameters for those configure
+ # scripts which call it without checking the return code.
+ echo "echo '--should-not-have-used-$x'" >> ${D}$x
echo "exit 1" >> ${D}$x
done
}
+
+SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
+
+binconfig_disabled_sysroot_preprocess () {
+ for x in ${BINCONFIG}; do
+ configname=`basename $x`
+ install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ done
+}
diff --git a/meta/classes/bluetooth.bbclass b/meta/classes/bluetooth.bbclass
new file mode 100644
index 0000000000..f88b4ae5b8
--- /dev/null
+++ b/meta/classes/bluetooth.bbclass
@@ -0,0 +1,14 @@
+# Avoid code duplication in bluetooth-dependent recipes.
+
+# Define a variable that expands to the recipe (package) providing core
+# bluetooth support on the platform:
+# "" if bluetooth is not in DISTRO_FEATURES
+# else "bluez5" if bluez5 is in DISTRO_FEATURES
+# else "bluez4"
+
+# Use this with:
+# inherit bluetooth
+# PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}
+# PACKAGECONFIG[bluez4] = "--enable-bluez4,--disable-bluez4,bluez4"
+
+BLUEZ ?= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', bb.utils.contains('DISTRO_FEATURES', 'bluez5', 'bluez5', 'bluez4', d), '', d)}"
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
index 09da032049..44f738b02e 100644
--- a/meta/classes/boot-directdisk.bbclass
+++ b/meta/classes/boot-directdisk.bbclass
@@ -20,6 +20,7 @@
# ${ROOTFS} - the rootfs image to incorporate
do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
parted-native:do_populate_sysroot \
@@ -69,7 +70,7 @@ boot_direct_populate() {
install -d $dest
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/bzImage $dest/vmlinuz
# initrd is made of concatenation of multiple filesystem images
if [ -n "${INITRD}" ]; then
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
index 859d517dbd..b1c03ba068 100644
--- a/meta/classes/bootimg.bbclass
+++ b/meta/classes/bootimg.bbclass
@@ -28,6 +28,7 @@
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
cdrtools-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}"
PACKAGES = " "
@@ -66,7 +67,7 @@ populate() {
install -d ${DEST}
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/bzImage ${DEST}/vmlinuz
# initrd is made of concatenation of multiple filesystem images
if [ -n "${INITRD}" ]; then
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 8b5d5c214c..211dcf18b5 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -155,7 +155,7 @@ python buildhistory_emit_pkghistory() {
with open(os.path.join(pkgdata_dir, pn)) as f:
for line in f.readlines():
if line.startswith('PACKAGES: '):
- packages = squashspaces(line.split(': ', 1)[1])
+ packages = oe.utils.squashspaces(line.split(': ', 1)[1])
break
except IOError as e:
if e.errno == errno.ENOENT:
@@ -181,7 +181,7 @@ python buildhistory_emit_pkghistory() {
rcpinfo.pe = pe
rcpinfo.pv = pv
rcpinfo.pr = pr
- rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
+ rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS', True) or ""))
rcpinfo.packages = packages
write_recipehistory(rcpinfo, d)
@@ -222,13 +222,13 @@ python buildhistory_emit_pkghistory() {
pkginfo.pkge = pkge
pkginfo.pkgv = pkgv
pkginfo.pkgr = pkgr
- pkginfo.rprovides = sortpkglist(squashspaces(pkgdata.get('RPROVIDES', "")))
- pkginfo.rdepends = sortpkglist(squashspaces(pkgdata.get('RDEPENDS', "")))
- pkginfo.rrecommends = sortpkglist(squashspaces(pkgdata.get('RRECOMMENDS', "")))
- pkginfo.rsuggests = sortpkglist(squashspaces(pkgdata.get('RSUGGESTS', "")))
- pkginfo.rreplaces = sortpkglist(squashspaces(pkgdata.get('RREPLACES', "")))
- pkginfo.rconflicts = sortpkglist(squashspaces(pkgdata.get('RCONFLICTS', "")))
- pkginfo.files = squashspaces(pkgdata.get('FILES', ""))
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
+ pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
+ pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
for filevar in pkginfo.filevars:
pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
@@ -374,7 +374,7 @@ buildhistory_get_installed() {
printf "" > $1/installed-package-sizes.tmp
cat $pkgcache | while read pkg pkgfile pkgarch
do
- size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
+ size=`oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" ${pkg}_${pkgarch}`
if [ "$size" != "" ] ; then
echo "$size $pkg" >> $1/installed-package-sizes.tmp
fi
@@ -484,8 +484,9 @@ END
echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
}
-# By prepending we get in before the removal of packaging files
-ROOTFS_POSTPROCESS_COMMAND =+ " buildhistory_list_installed_image ;\
+# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
+# unneeded packages but before the removal of packaging files
+ROOTFS_POSTUNINSTALL_COMMAND += " buildhistory_list_installed_image ;\
buildhistory_get_image_installed ; "
IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
@@ -525,11 +526,6 @@ def buildhistory_get_metadata_revs(d):
for i in layers]
return '\n'.join(medadata_revs)
-
-def squashspaces(string):
- import re
- return re.sub("\s+", " ", string).strip()
-
def outputvars(vars, listvars, d):
vars = vars.split()
listvars = listvars.split()
@@ -538,7 +534,7 @@ def outputvars(vars, listvars, d):
value = d.getVar(var, True) or ""
if var in listvars:
# Squash out spaces
- value = squashspaces(value)
+ value = oe.utils.squashspaces(value)
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
index 89ae72c679..48442641d4 100644
--- a/meta/classes/buildstats.bbclass
+++ b/meta/classes/buildstats.bbclass
@@ -52,8 +52,8 @@ def set_device(e):
# If we end up hitting one of these fs, we'll just skip diskstats collection.
############################################################################
device=os.stat(tmpdir)
- majordev=os.major(device.st_dev)
- minordev=os.minor(device.st_dev)
+ majordev=os.major(long(device.st_dev))
+ minordev=os.minor(long(device.st_dev))
############################################################################
# Bug 1700:
# Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
index 77b19372ba..7a5d9602f5 100644
--- a/meta/classes/chrpath.bbclass
+++ b/meta/classes/chrpath.bbclass
@@ -10,6 +10,8 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
if p.returncode != 0:
return
+ # Handle RUNPATH as well as RPATH
+ err = err.replace("RUNPATH=","RPATH=")
# Throw away everything other than the rpath list
curr_rpath = err.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index 995ddf1ea2..3549c38f15 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -13,6 +13,7 @@ inherit autotools
# C/C++ Compiler (without cpu arch/tune arguments)
OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
+OECMAKE_AR ?= "${AR}"
# Compiler flags
OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
@@ -35,6 +36,7 @@ set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
+set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
@@ -79,6 +81,8 @@ cmake_do_configure() {
rm -rf ${B}
mkdir -p ${B}
cd ${B}
+ else
+ find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
fi
# Just like autotools cmake can use a site file to cache result that need generated binaries to run
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
index 6edbaf531f..9b58d82ce5 100644
--- a/meta/classes/compress_doc.bbclass
+++ b/meta/classes/compress_doc.bbclass
@@ -136,11 +136,14 @@ def _is_info(file):
return False
def _is_man(file):
+ import re
+
# It refers MANSECT-var in man(1.6g)'s man.config
- flags = '.1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o'.split(':')
- for flag in flags:
- if os.path.basename(file).endswith(flag):
- return True
+ # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
+ # Not start with '.', and contain the above colon-seperate element
+ p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
+ if p.search(file):
+ return True
return False
@@ -254,3 +257,4 @@ python compress_doc_updatealternatives () {
if new_names:
d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
}
+
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index 62363fb334..a78f93405b 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -73,8 +73,5 @@ inherit image
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
-# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_empty_root_password ; ",d)}'
-
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index 41164a3f33..4451436473 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -17,7 +17,9 @@ python do_devshell () {
addtask devshell after do_patch
-do_devshell[dirs] = "${S}"
+# The directory that the terminal starts in
+DEVSHELL_STARTDIR ?= "${S}"
+do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
do_devshell[nostamp] = "1"
# devshell and fakeroot/pseudo need careful handling since only the final
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
index a890de7911..83aa381fe7 100644
--- a/meta/classes/distrodata.bbclass
+++ b/meta/classes/distrodata.bbclass
@@ -268,240 +268,6 @@ python do_checkpkg() {
import tempfile
import subprocess
- """
- sanity check to ensure same name and type. Match as many patterns as possible
- such as:
- gnome-common-2.20.0.tar.gz (most common format)
- gtk+-2.90.1.tar.gz
- xf86-input-synaptics-12.6.9.tar.gz
- dri2proto-2.3.tar.gz
- blktool_4.orig.tar.gz
- libid3tag-0.15.1b.tar.gz
- unzip552.tar.gz
- icu4c-3_6-src.tgz
- genext2fs_1.3.orig.tar.gz
- gst-fluendo-mp3
- """
- prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*\+?[\-_]" # match most patterns which uses "-" as separator to version digits
- prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
- prefix3 = "[0-9]+[\-]?[a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
- prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
- ver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"#"((\d+[\.\-_[a-z]])+)"
- # src.rpm extension was added only for rpm package. Can be removed if the rpm
- # packaged will always be considered as having to be manually upgraded
- suffix = "(tar\.gz|tgz|tar\.bz2|tar\.lz4|zip|xz|rpm|bz2|lz4|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
-
- suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "tar.lz4", "bz2", "lz4", "orig.tar.gz", "src.tar.gz", "src.rpm", "src.tgz", "svnr\d+.tar.bz2", "stable.tar.gz", "src.rpm")
- sinterstr = "(?P<name>%s?)v?(?P<ver>%s)(\-source)?" % (prefix, ver_regex)
- sdirstr = "(?P<name>%s)\.?v?(?P<ver>%s)(\-source)?[\.\-](?P<type>%s$)" % (prefix, ver_regex, suffix)
-
- def parse_inter(s):
- m = re.search(sinterstr, s)
- if not m:
- return None
- else:
- return (m.group('name'), m.group('ver'), "")
-
- def parse_dir(s):
- m = re.search(sdirstr, s)
- if not m:
- return None
- else:
- return (m.group('name'), m.group('ver'), m.group('type'))
-
- def modelate_version(version):
- if version[0] in ['.', '-']:
- if version[1].isdigit():
- version = version[1] + version[0] + version[2:len(version)]
- else:
- version = version[1:len(version)]
-
- version = re.sub('\-', '.', version)
- version = re.sub('_', '.', version)
- version = re.sub('(rc)+', '.-1.', version)
- version = re.sub('(alpha)+', '.-3.', version)
- version = re.sub('(beta)+', '.-2.', version)
- if version[0] == 'v':
- version = version[1:len(version)]
- return version
-
- """
- Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
- purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
- for simplicity as it's somehow difficult to get from various upstream format
- """
- def __vercmp(old, new):
- (on, ov, ot) = old
- (en, ev, et) = new
- if on != en or (et and et not in suffixtuple):
- return False
- ov = modelate_version(ov)
- ev = modelate_version(ev)
-
- result = bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
- if result < 0:
- return True
- else:
- return False
-
- """
- wrapper for fetch upstream directory info
- 'url' - upstream link customized by regular expression
- 'd' - database
- 'tmpf' - tmpfile for fetcher output
- We don't want to exit whole build due to one recipe error. So handle all exceptions
- gracefully w/o leaking to outer.
- """
- def internal_fetch_wget(url, ud, d, tmpf):
- status = "ErrFetchUnknown"
-
- agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
- fetchcmd = "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"%s\" '%s'" % (tmpf.name, agent, url)
- try:
- fetcher = bb.fetch2.wget.Wget(d)
- fetcher._runwget(ud, d, fetchcmd, True)
- status = "SUCC"
- except bb.fetch2.BBFetchException, e:
- status = "ErrFetch"
-
- return status
-
- """
- Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
- 'url' - upstream link customized by regular expression
- 'd' - database
- 'curver' - current version
- Return new version if success, or else error in "Errxxxx" style
- """
- def check_new_dir(url, curver, ud, d):
- pn = d.getVar('PN', True)
- f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
- status = internal_fetch_wget(url, ud, d, f)
- fhtml = f.read()
- if status == "SUCC" and len(fhtml):
- newver = parse_inter(curver)
-
- """
- match "*4.1/">*4.1/ where '*' matches chars
- N.B. add package name, only match for digits
- """
- regex = d.getVar('REGEX', True)
- if regex == '':
- regex = "^%s" %prefix
- m = re.search("^%s" % regex, curver)
- if m:
- s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
- else:
- s = "(\d+[\.\-_])+\d+/?"
-
- searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
-
- reg = re.compile(searchstr)
- valid = 0
- for line in fhtml.split("\n"):
- if line.find(curver) >= 0:
- valid = 1
- m = reg.search(line)
- if m:
- ver = m.group().split("\"")[1]
- ver = ver.strip("/")
- ver = parse_inter(ver)
- if ver and __vercmp(newver, ver) == True:
- newver = ver
-
- """Expect a match for curver in directory list, or else it indicates unknown format"""
- if not valid:
- status = "ErrParseInterDir"
- else:
- """rejoin the path name"""
- status = newver[0] + newver[1]
- elif not len(fhtml):
- status = "ErrHostNoDir"
-
- f.close()
- if status != "ErrHostNoDir" and re.match("Err", status):
- logpath = d.getVar('LOG_DIR', True)
- subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
- os.unlink(f.name)
- return status
-
- """
- Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
- 'url' - upstream link customized by regular expression
- 'd' - database
- 'curname' - current package name
- Return new version if success, or else error in "Errxxxx" style
- """
- def check_new_version(url, curname, ud, d):
- """possible to have no version in pkg name, such as spectrum-fw"""
- if not re.search("\d+", curname):
- return pcurver
- pn = d.getVar('PN', True)
- newver_regex = d.getVar('REGEX', True)
- f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
- status = internal_fetch_wget(url, ud, d, f)
- fhtml = f.read()
-
- if status == "SUCC" and len(fhtml):
- newver = parse_dir(curname)
-
- if not newver_regex:
- """this is the default matching pattern, if recipe does not """
- """provide a regex expression """
- """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
- pn1 = re.search("^%s" % prefix, curname).group()
- s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
- searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
- reg = searchstr
- else:
- reg = newver_regex
- valid = 0
- count = 0
- for line in fhtml.split("\n"):
- if pn == 'kconfig-frontends':
- m = re.findall(reg, line)
- if m:
- valid = 1
- for match in m:
- (on, ov, oe) = newver
- ver = (on, match[0], oe)
- if ver and __vercmp(newver, ver) == True:
- newver = ver
- continue
- count += 1
- m = re.search(reg, line)
- if m:
- valid = 1
- if not newver_regex:
- ver = m.group().split("\"")[1].split("/")[-1]
- if ver == "download":
- ver = m.group().split("\"")[1].split("/")[-2]
- ver = parse_dir(ver)
- else:
- """ we cheat a little here, but we assume that the
- regular expression in the recipe will extract exacly
- the version """
- (on, ov, oe) = newver
- ver = (on, m.group('pver'), oe)
- if ver and __vercmp(newver, ver) == True:
- newver = ver
- """Expect a match for curver in directory list, or else it indicates unknown format"""
- if not valid:
- status = "ErrParseDir"
- else:
- """newver still contains a full package name string"""
- status = re.sub('_', '.', newver[1])
- elif not len(fhtml):
- status = "ErrHostNoDir"
-
- f.close()
- """if host hasn't directory information, no need to save tmp file"""
- if status != "ErrHostNoDir" and re.match("Err", status):
- logpath = d.getVar('LOG_DIR', True)
- subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
- os.unlink(f.name)
- return status
-
"""first check whether a uri is provided"""
src_uri = d.getVar('SRC_URI', True)
if not src_uri:
@@ -543,9 +309,6 @@ python do_checkpkg() {
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
- chk_uri = d.getVar('REGEX_URI', True)
- if not chk_uri:
- chk_uri = src_uri
pdesc = localdata.getVar('DESCRIPTION', True)
pgrp = localdata.getVar('SECTION', True)
if localdata.getVar('PRSPV', True):
@@ -562,232 +325,63 @@ python do_checkpkg() {
psrcuri = localdata.getVar('SRC_URI', True)
maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ """ Get upstream version version """
+ pupver = None
+ pstatus = "ErrUnknown"
found = 0
+
for uri in src_uri.split():
- m = re.compile('(?P<type>[^:]*)').match(uri)
- if not m:
- raise MalformedUrl(uri)
- elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
- found = 1
- pproto = m.group('type')
- break
+ m = re.compile('(?P<type>[^:]*)').match(uri)
+ if not m:
+ raise MalformedUrl(uri)
+ elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
+ found = 1
+ psrcuri = uri
+ pproto = m.group('type')
+ break
if not found:
pproto = "file"
- pupver = "N/A"
- pstatus = "ErrUnknown"
-
- (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(uri)
- if type in ['http', 'https', 'ftp']:
- if d.getVar('PRSPV', True):
- pcurver = d.getVar('PRSPV', True)
- else:
- pcurver = d.getVar('PV', True)
- else:
- if d.getVar('PRSPV', True):
- pcurver = d.getVar('PRSPV', True)
- else:
- pcurver = d.getVar("SRCREV", True)
-
-
- if type in ['http', 'https', 'ftp']:
- ud = bb.fetch2.FetchData(uri, d)
- newver = pcurver
- altpath = path
- dirver = "-"
- curname = "-"
-
- """
- match version number amid the path, such as "5.7" in:
- http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
- N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
- """
- m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
- if m:
- altpath = path.split(m.group())[0]
- dirver = m.group().strip("/")
-
- """use new path and remove param. for wget only param is md5sum"""
- alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
- my_uri = d.getVar('REGEX_URI', True)
- if my_uri:
- if d.getVar('PRSPV', True):
- newver = d.getVar('PRSPV', True)
- else:
- newver = d.getVar('PV', True)
- else:
- newver = check_new_dir(alturi, dirver, ud, d)
- altpath = path
- if not re.match("Err", newver) and dirver != newver:
- altpath = altpath.replace(dirver, newver, True)
- # For folder in folder cases - try to enter the folder again and then try parsing
- """Now try to acquire all remote files in current directory"""
- if not re.match("Err", newver):
- curname = altpath.split("/")[-1]
-
- """get remote name by skipping pacakge name"""
- m = re.search(r"/.*/", altpath)
- if not m:
- altpath = "/"
- else:
- altpath = m.group()
-
- chk_uri = d.getVar('REGEX_URI', True)
- if not chk_uri:
- alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
- else:
- alturi = chk_uri
- newver = check_new_version(alturi, curname, ud, d)
- while(newver == "ErrHostNoDir"):
- if alturi == "/download":
- break
- else:
- alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
- newver = check_new_version(alturi, curname, ud, d)
- if not re.match("Err", newver):
- pupver = newver
- if pupver != pcurver:
- pstatus = "UPDATE"
- else:
- pstatus = "MATCH"
-
- if re.match("Err", newver):
- pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
- elif type == 'git':
- if user:
- gituser = user + '@'
- else:
- gituser = ""
-
- if 'protocol' in parm:
- gitproto = parm['protocol']
- else:
- gitproto = "git"
- # Get all tags and HEAD
- if d.getVar('GIT_REGEX', True):
- gitcmd = "git ls-remote %s://%s%s%s %s 2>&1" % (gitproto, gituser, host, path, d.getVar('GIT_REGEX', True))
- else:
- gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
- gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
-
- tmp = os.popen(gitcmd).read()
- if 'unable to connect' in tmp:
- tmp = None
- tmp2 = os.popen(gitcmd2).read()
- if 'unable to connect' in tmp2:
- tmp2 = None
- #This is for those repos have tag like: refs/tags/1.2.2
- phash = pversion.rsplit("+")[-1]
- if tmp:
- tmpline = tmp.split("\n")
- verflag = 0
- pupver = pversion
- for line in tmpline:
- if len(line)==0:
- break;
- puptag = line.split("/")[-1]
- upstr_regex = d.getVar('REGEX', True)
- if upstr_regex:
- puptag = re.search(upstr_regex, puptag)
- else:
- puptag = re.search("(?P<pver>([0-9][\.|_]?)+)", puptag)
- if puptag == None:
- continue
- puptag = puptag.group('pver')
- puptag = re.sub("_",".",puptag)
- plocaltag = pupver.split("+git")[0]
- if "git" in plocaltag:
- plocaltag = plocaltag.split("-")[0]
- result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
-
- if result > 0:
- verflag = 1
- pupver = puptag
- elif verflag == 0 :
- pupver = plocaltag
- #This is for those no tag repo
- elif tmp2:
+ if pproto in ['http', 'https', 'ftp', 'git']:
+ try:
+ ud = bb.fetch2.FetchData(psrcuri, d)
+ pupver = ud.method.latest_versionstring(ud, d)
+ if pproto == 'git':
+ if pupver == "":
pupver = pversion.rsplit("+")[0]
- phash = pupver
- else:
- pstatus = "ErrGitAccess"
- if not ('ErrGitAccess' in pstatus):
-
- latest_head = tmp2.rsplit("\t")[0][:7]
- tmp3 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pversion)
- tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pupver)
- if not tmp4:
- tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)', pupver)
-
- if tmp3:
- # Get status of the package - MATCH/UPDATE
- result = bb.utils.vercmp(("0", tmp3.group('git_ver'), ""), ("0",tmp3.group('git_ver') , ""))
- # Get the latest tag
- pstatus = 'MATCH'
- if result < 0:
- latest_pv = tmp3.group('git_ver')
- else:
- latest_pv = pupver
- if not(tmp3.group('head_md5')[:7] in latest_head) or not(latest_head in tmp3.group('head_md5')[:7]):
- pstatus = 'UPDATE'
-
- git_prefix = tmp3.group('git_prefix')
- pupver = latest_pv + tmp3.group('git_prefix') + latest_head
- else:
- if not tmp3:
- bb.plain("#DEBUG# Package %s: current version (%s) doesn't match the usual pattern" %(pname, pversion))
- elif type == 'svn':
- ud = bb.fetch2.FetchData(uri, d)
-
- svnFetcher = bb.fetch2.svn.Svn(d)
- svnFetcher.urldata_init(ud, d)
- try:
- pupver = svnFetcher.latest_revision(ud, d, ud.names[0])
- except bb.fetch2.FetchError:
- pstatus = "ErrSvnAccess"
-
- if pupver:
- if pupver in pversion:
- pstatus = "MATCH"
- else:
- pstatus = "UPDATE"
- else:
- pstatus = "ErrSvnAccess"
-
- if 'rev' in ud.parm:
- pcurver = ud.parm['rev']
-
- if pstatus != "ErrSvnAccess":
- tag = pversion.rsplit("+svn")[0]
- svn_prefix = re.search('(\+svn[r|\-]?)', pversion)
- if tag and svn_prefix:
- pupver = tag + svn_prefix.group() + pupver
-
- elif type == 'cvs':
- pupver = "HEAD"
- pstatus = "UPDATE"
- elif type == 'file':
- """local file is always up-to-date"""
- pupver = pcurver
- pstatus = "MATCH"
+ if re.search(pversion, "gitrAUTOINC"):
+ pupver += "+gitrAUTOINC+"
+ else:
+ pupver += "+gitAUTOINC+"
+ latest_revision = ud.method.latest_revision(ud, d, ud.names[0])
+ pupver += latest_revision[:10]
+ except Exception as inst:
+ bb.warn("%s: unexpected error: %s" % (pname, repr(inst)))
+ pstatus = "ErrAccess"
+ elif pproto == "file":
+ """Local files are always updated"""
+ pupver = pversion
else:
- pstatus = "ErrUnsupportedProto"
+ pstatus = "ErrUnsupportedProto"
+ bb.note("do_checkpkg, protocol %s isn't implemented" % pproto)
- if re.match("Err", pstatus):
- pstatus += ":%s%s" % (host, path)
+ if not pupver:
+ pupver = "N/A"
+ elif pupver == pversion:
+ pstatus = "MATCH"
+ else:
+ pstatus = "UPDATE"
"""Read from manual distro tracking fields as alternative"""
pmver = d.getVar("RECIPE_UPSTREAM_VERSION", True)
if not pmver:
- pmver = "N/A"
- pmstatus = "ErrNoRecipeData"
+ pmver = "N/A"
+ pmstatus = "ErrNoRecipeData"
+ elif pmver == pupver:
+ pmstatus = "MATCH"
else:
- if pmver == pcurver:
- pmstatus = "MATCH"
- else:
- pmstatus = "UPDATE"
+ pmstatus = "UPDATE"
- psrcuri = psrcuri.split()[0]
pdepends = "".join(pdepends.split("\t"))
pdesc = "".join(pdesc.split("\t"))
no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 2ac62747a2..75bdb7a14d 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -35,7 +35,17 @@ python () {
d.setVar('B', externalsrcbuild)
else:
d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
- d.setVar('SRC_URI', '')
+
+ srcuri = (d.getVar('SRC_URI', True) or '').split()
+ local_srcuri = []
+ for uri in srcuri:
+ if uri.startswith('file://'):
+ local_srcuri.append(uri)
+ d.setVar('SRC_URI', ' '.join(local_srcuri))
+
+ if '{SRCPV}' in d.getVar('PV', False):
+ # Dummy value because the default function can't be called with blank SRC_URI
+ d.setVar('SRCPV', '999')
tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
@@ -47,7 +57,36 @@ python () {
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock")
+ # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
+ cleandirs = d.getVarFlag(task, 'cleandirs', False)
+ if cleandirs:
+ cleandirs = cleandirs.split()
+ setvalue = False
+ if '${S}' in cleandirs:
+ cleandirs.remove('${S}')
+ setvalue = True
+ if externalsrcbuild == externalsrc and '${B}' in cleandirs:
+ cleandirs.remove('${B}')
+ setvalue = True
+ if setvalue:
+ d.setVarFlag(task, 'cleandirs', ' '.join(cleandirs))
+
+ fetch_tasks = ['do_fetch', 'do_unpack']
+ # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
+ d.appendVarFlag('do_configure', 'deps', ['do_unpack'])
+
for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
+ if local_srcuri and task in fetch_tasks:
+ continue
bb.build.deltask(task, d)
+
+ d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
+
+ # Ensure compilation happens every time
+ d.setVarFlag('do_compile', 'nostamp', '1')
}
+python externalsrc_compile_prefunc() {
+ # Make it obvious that this is happening, since forgetting about it could lead to much confusion
+ bb.warn('Compiling %s from external source %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
+}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 2bf1e4bd1b..d122387ffd 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -8,11 +8,11 @@ inherit qemu
FONT_PACKAGES ??= "${PN}"
FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
-
+FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
fontcache_common() {
if [ "x$D" != "x" ] ; then
$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \
- libdir=${libdir} base_libdir=${base_libdir}
+ libdir=${libdir} base_libdir=${base_libdir} fontconfigcachedir=${FONTCONFIG_CACHE_DIR}
else
fc-cache
fi
@@ -42,4 +42,4 @@ python add_fontcache_postinsts() {
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
-PACKAGEFUNCS += "add_fontcache_postinsts"
+PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/image-buildinfo.bbclass b/meta/classes/image-buildinfo.bbclass
new file mode 100644
index 0000000000..aa17cc8f9e
--- /dev/null
+++ b/meta/classes/image-buildinfo.bbclass
@@ -0,0 +1,69 @@
+#
+# Writes build information to target filesystem on /etc/build
+#
+# Copyright (C) 2014 Intel Corporation
+# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
+#
+# Licensed under the MIT license, see COPYING.MIT for details
+#
+# Usage: add INHERIT += "image-buildinfo" to your conf file
+#
+
+# Desired variables to display
+IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
+
+# From buildhistory.bbclass
+def image_buildinfo_outputvars(vars, listvars, d):
+ vars = vars.split()
+ listvars = listvars.split()
+ ret = ""
+ for var in vars:
+ value = d.getVar(var, True) or ""
+ if (d.getVarFlag(var, 'type') == "list"):
+ value = oe.utils.squashspaces(value)
+ ret += "%s = %s\n" % (var, value)
+ return ret.rstrip('\n')
+
+# Gets git branch's status (clean or dirty)
+def get_layer_git_status(path):
+ f = os.popen("cd %s; git diff --stat 2>&1 | tail -n 1" % path)
+ data = f.read()
+ if f.close() is None:
+ if len(data) != 0:
+ return "-- modified"
+ return ""
+
+# Returns layer revisions along with their respective status
+def get_layer_revs(d):
+ layers = (d.getVar("BBLAYERS", True) or "").split()
+ medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \
+ base_get_metadata_git_branch(i, None).strip(), \
+ base_get_metadata_git_revision(i, None), \
+ get_layer_git_status(i)) \
+ for i in layers]
+ return '\n'.join(medadata_revs)
+
+def buildinfo_target(d):
+ # Get context
+ if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ return ""
+ # Single and list variables to be read
+ vars = (d.getVar("IMAGE_BUILDINFO_VARS", True) or "")
+ listvars = (d.getVar("IMAGE_BUILDINFO_LVARS", True) or "")
+ return image_buildinfo_outputvars(vars, listvars, d)
+
+# Write build information to target filesystem
+buildinfo () {
+cat > ${IMAGE_ROOTFS}${sysconfdir}/build << END
+-----------------------
+Build Configuration: |
+-----------------------
+${@buildinfo_target(d)}
+-----------------------
+Layer Revisions: |
+-----------------------
+${@get_layer_revs(d)}
+END
+}
+
+IMAGE_PREPROCESS_COMMAND += "buildinfo;"
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 1c0fda7d60..01f8b3fc19 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -1,6 +1,6 @@
inherit rootfs_${IMAGE_PKGTYPE}
-inherit populate_sdk_base
+inherit populate_sdk_ext
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
@@ -22,7 +22,7 @@ inherit ${TESTIMAGECLASS}
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password post-install-logging"
# rootfs bootstrap install
ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
@@ -52,7 +52,10 @@ def check_image_features(d):
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
for feature in features:
if feature not in valid_features:
- bb.fatal("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
+ if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
+ raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
+ else:
+ raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
IMAGE_INSTALL ?= ""
IMAGE_INSTALL[type] = "list"
@@ -63,6 +66,7 @@ PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
EXCLUDE_FROM_WORLD = "1"
USE_DEVFS ?= "1"
+USE_DEPMOD ?= "1"
PID = "${@os.getpid()}"
@@ -72,8 +76,17 @@ LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
LDCONFIGDEPEND_libc-uclibc = ""
LDCONFIGDEPEND_libc-musl = ""
-do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
-do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
+# This is needed to have depmod data in PKGDATA_DIR,
+# but if you're building small initramfs image
+# e.g. to include it in your kernel, you probably
+# don't want this dependency, which is causing dependency loop
+KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
+
+do_rootfs[depends] += " \
+ makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
+ virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
+ ${KERNELDEPMODDEPEND} \
+"
do_rootfs[recrdeptask] += "do_packagedata"
def command_variables(d):
@@ -94,8 +107,8 @@ def rootfs_variables(d):
'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS','SDK_OS',
'SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT','SDKTARGETSYSROOT','MULTILIBRE_ALLOW_REP',
'MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
- 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','BUILDNAME','USE_DEVFS',
- 'STAGING_KERNEL_DIR','COMPRESSIONTYPES']
+ 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
+ 'COMPRESSIONTYPES']
variables.extend(command_variables(d))
variables.extend(variable_depends(d))
return " ".join(variables)
@@ -156,20 +169,27 @@ IMAGE_CLASSES += "image_types"
inherit ${IMAGE_CLASSES}
IMAGE_POSTPROCESS_COMMAND ?= ""
-MACHINE_POSTPROCESS_COMMAND ?= ""
+
+# Zap the root password if debug-tweaks feature is not enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "ssh_allow_empty_password; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
+
# Enable postinst logging if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "postinst_enable_logging; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
+
# Write manifest
IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
-ROOTFS_POSTPROCESS_COMMAND =+ "write_image_manifest ; "
+ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
# Set default target for systemd images
SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
+ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+
# some default locales
IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
@@ -188,18 +208,17 @@ do_rootfs[cleandirs] += "${S}"
do_rootfs[umask] = "022"
# A hook function to support read-only-rootfs IMAGE_FEATURES
-# Currently, it only supports sysvinit system.
read_only_rootfs_hook () {
# Tweak the mount option and fs_passno for rootfs in fstab
sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
- # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
+ # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
fi
- # Run populate-volatile.sh at rootfs time to set up basic files
- # and directories to support read-only rootfs.
+ # Run populate-volatile.sh at rootfs time to set up basic files
+ # and directories to support read-only rootfs.
if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
@@ -316,7 +335,8 @@ MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
zap_empty_root_password () {
if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
- elif [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
+ fi
+ if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
fi
}
@@ -367,6 +387,22 @@ set_systemd_default_target () {
fi
}
+# If /var/volatile is not empty, we have seen problems where programs such as the
+# journal make assumptions based on the contents of /var/volatile. The journal
+# would then write to /var/volatile before it was mounted, thus hiding the
+# items previously written.
+#
+# This change is to attempt to fix those types of issues in a way that doesn't
+# affect users that may not be using /var/volatile.
+empty_var_volatile () {
+ if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
+ match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
+ if [ -n "$match" ]; then
+ find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
+ fi
+ fi
+}
+
# Turn any symbolic /sbin/init link into a file
remove_init_link () {
if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
@@ -386,12 +422,7 @@ python write_image_manifest () {
from oe.rootfs import image_list_installed_packages
with open(d.getVar('IMAGE_MANIFEST', True), 'w+') as image_manifest:
image_manifest.write(image_list_installed_packages(d, 'ver'))
-}
-
-# Make login manager(s) enable automatic login.
-# Useful for devices where we do not want to log in at all (e.g. phones)
-set_image_autologin () {
- sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin
+ image_manifest.write("\n")
}
# Can be use to create /etc/timestamp during image construction to give a reasonably
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index c7da4c3ed8..72c7337b6b 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -13,7 +13,7 @@ def imagetypes_getdepends(d):
deps = []
ctypes = d.getVar('COMPRESSIONTYPES', True).split()
for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
- if type == "vmdk" or type == "live" or type == "iso" or type == "hddimg":
+ if type in ["vmdk", "live", "iso", "hddimg"]:
type = "ext3"
basetype = type
for ctype in ctypes:
@@ -21,6 +21,8 @@ def imagetypes_getdepends(d):
basetype = type[:-len("." + ctype)]
adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
break
+ for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split():
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % typedepends, True) , deps)
adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
depstr = ""
@@ -56,9 +58,14 @@ IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
+MIN_BTRFS_SIZE ?= "16384"
IMAGE_CMD_btrfs () {
- touch ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
- mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
+ if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
+ dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs count=${ROOTFS_SIZE} bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
+ else
+ bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
+ fi
}
IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
@@ -134,7 +141,22 @@ IMAGE_DEPENDS_ubi = "mtd-utils-native"
IMAGE_DEPENDS_ubifs = "mtd-utils-native"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
-IMAGE_TYPES = "jffs2 jffs2.sum cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs iso hddimg squashfs squashfs-xz squashfs-lzo ubi ubifs tar tar.gz tar.bz2 tar.xz tar.lz4 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 vmdk elf"
+IMAGE_TYPES = " \
+ jffs2 jffs2.sum \
+ cramfs \
+ ext2 ext2.gz ext2.bz2 ext2.lzma \
+ ext3 ext3.gz \
+ ext4 ext4.gz \
+ btrfs \
+ iso \
+ hddimg \
+ squashfs squashfs-xz squashfs-lzo \
+ ubi ubifs \
+ tar tar.gz tar.bz2 tar.xz tar.lz4 \
+ cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
+ vmdk \
+ elf \
+"
COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum"
COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
@@ -150,7 +172,7 @@ COMPRESS_DEPENDS_xz = "xz-native"
COMPRESS_DEPENDS_lz4 = "lz4-native"
COMPRESS_DEPENDS_sum = "mtd-utils-native"
-RUNNABLE_IMAGE_TYPES ?= "ext2 ext3"
+RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index c6dea22618..ab7ca3b64b 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -30,11 +30,12 @@ WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
textrel already-stripped incompatible-license files-invalid \
installed-vs-shipped compile-host-path install-host-path \
pn-overrides infodir build-deps file-rdeps \
+ unknown-configure-option \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
split-strip packages-list pkgv-undefined var-undefined \
- version-going-backwards \
+ version-going-backwards expanded-d \
"
ALL_QA = "${WARN_QA} ${ERROR_QA}"
@@ -52,6 +53,13 @@ def package_qa_get_machine_dict():
"darwin9" : {
"arm" : (40, 0, 0, True, 32),
},
+ "eabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "elf" : {
+ "i586" : (3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ },
"linux" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
@@ -771,31 +779,32 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
# Now do the sanity check!!!
- for rdepend in rdepends:
- if "-dbg" in rdepend and "debug-deps" not in skip:
- error_msg = "%s rdepends on %s" % (pkg,rdepend)
- sane = package_qa_handle_error("debug-deps", error_msg, d)
- if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
- error_msg = "%s rdepends on %s" % (pkg, rdepend)
- sane = package_qa_handle_error("dev-deps", error_msg, d)
- if rdepend not in packages:
- rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
- if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
- continue
- if not rdep_data or not 'PN' in rdep_data:
- pkgdata_dir = d.getVar("PKGDATA_DIR", True)
- try:
- possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
- except OSError:
- possibles = []
- for p in possibles:
- rdep_data = oe.packagedata.read_subpkgdata(p, d)
- if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
- break
- if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
- continue
- error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
- sane = package_qa_handle_error("build-deps", error_msg, d)
+ if "build-deps" not in skip:
+ for rdepend in rdepends:
+ if "-dbg" in rdepend and "debug-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkg,rdepend)
+ sane = package_qa_handle_error("debug-deps", error_msg, d)
+ if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkg, rdepend)
+ sane = package_qa_handle_error("dev-deps", error_msg, d)
+ if rdepend not in packages:
+ rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ continue
+ if not rdep_data or not 'PN' in rdep_data:
+ pkgdata_dir = d.getVar("PKGDATA_DIR", True)
+ try:
+ possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
+ except OSError:
+ possibles = []
+ for p in possibles:
+ rdep_data = oe.packagedata.read_subpkgdata(p, d)
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ break
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ continue
+ error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
+ sane = package_qa_handle_error("build-deps", error_msg, d)
if "file-rdeps" not in skip:
ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
@@ -905,6 +914,33 @@ def package_qa_check_deps(pkg, pkgdest, skip, d):
return sane
+QAPATHTEST[expanded-d] = "package_qa_check_expanded_d"
+def package_qa_check_expanded_d(path,name,d,elf,messages):
+ """
+ Check for the expanded D (${D}) value in pkg_* and FILES
+ variables, warn the user to use it correctly.
+ """
+
+ sane = True
+ expanded_d = d.getVar('D',True)
+
+ # Get packages for current recipe and iterate
+ packages = d.getVar('PACKAGES', True).split(" ")
+ for pak in packages:
+ # Go through all variables and check if expanded D is found, warn the user accordingly
+ for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
+ bbvar = d.getVar(var + "_" + pak)
+ if bbvar:
+ # Bitbake expands ${D} within bbvar during the previous step, so we check for its expanded value
+ if expanded_d in bbvar:
+ if var == 'FILES':
+ messages["expanded-d"] = "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak
+ sane = False
+ else:
+ messages["expanded-d"] = "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak)
+ sane = False
+ return sane
+
# The PACKAGE FUNC to scan each package
python do_package_qa () {
import subprocess
@@ -1111,7 +1147,7 @@ do_configure[postfuncs] += "do_qa_configure "
python () {
tests = d.getVar('ALL_QA', True).split()
if "desktop" in tests:
- d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native")
+ d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
###########################################################################
# Check various variables
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index bbcfa15b84..6a6ad91866 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -40,7 +40,6 @@ def map_uboot_arch(a, d):
if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
elif re.match('i.86$', a): return 'x86'
- elif re.match('arm64$', a): return 'arm'
return a
export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index 9a95b72744..e1a70e6215 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -70,12 +70,12 @@ python split_kernel_module_packages () {
m = kerverrexp.match(kernelver)
if m:
kernelver_stripped = m.group(1)
- staging_kernel_dir = d.getVar("STAGING_KERNEL_DIR", True)
+ staging_kernel_dir = d.getVar("STAGING_KERNEL_BUILDDIR", True)
system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
if not os.path.exists(system_map_file):
system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
if not os.path.exists(system_map_file):
- bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_DIR '%s'" % (kernelver, dvar, staging_kernel_dir))
+ bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_BUILDDIR '%s'" % (kernelver, dvar, staging_kernel_dir))
cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
f = os.popen(cmd, 'r')
@@ -148,7 +148,7 @@ python split_kernel_module_packages () {
postinst = d.getVar('pkg_postinst_%s' % pkg, True)
if not postinst:
bb.fatal("pkg_postinst_%s not defined" % pkg)
- postinst += d.getVar('autoload_postinst_fragment', True) % autoload
+ postinst += d.getVar('autoload_postinst_fragment', True) % (autoload or basename)
d.setVar('pkg_postinst_%s' % pkg, postinst)
# Write out any modconf fragment
@@ -176,6 +176,9 @@ python split_kernel_module_packages () {
rdepends[dep] = []
d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ # Avoid automatic -dev recommendations for modules ending with -dev.
+ d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
+
module_deps = parse_depmod()
module_regex = '^(.*)\.k?o$'
module_pattern = 'kernel-module-%s'
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index f42a5c2534..650ae5a473 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -1,7 +1,5 @@
-S = "${WORKDIR}/linux"
-
# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch"
+SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_shared_workdir do_fetch do_unpack do_patch"
# returns local (absolute) path names for all valid patches in the
# src_uri
@@ -57,7 +55,8 @@ def get_machine_branch(d, default):
return default
-do_patch() {
+do_kernel_metadata() {
+ set +e
cd ${S}
export KMETA=${KMETA}
@@ -77,22 +76,56 @@ do_patch() {
machine_srcrev="${SRCREV}"
fi
+ # In a similar manner to the kernel itself:
+ #
+ # defconfig: $(obj)/conf
+ # ifeq ($(KBUILD_DEFCONFIG),)
+ # $< --defconfig $(Kconfig)
+ # else
+ # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
+ # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
+ # endif
+ #
+ # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
+ # from the source tree, into a common location and normalized "defconfig" name,
+ # where the rest of the process will include and incoroporate it into the build
+ #
+ # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
+ # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
+ # precendence.
+ #
+ if [ -n "${KBUILD_DEFCONFIG}" ]; then
+ if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
+ if [ -f "${WORKDIR}/defconfig" ]; then
+ # If the two defconfigs are the same, leave the existing one in place
+ cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
+ if [ $? -ne 0 ]; then
+ bbnote "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
+ else
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ sccs="${WORKDIR}/defconfig"
+ fi
+ fi
+ else
+ bbfatal "A KBUILD_DECONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ fi
+ fi
+
# if we have a defined/set meta branch we should not be generating
# any meta data. The passed branch has what we need.
if [ -n "${KMETA}" ]; then
createme_flags="--disable-meta-gen --meta ${KMETA}"
fi
- createme ${createme_flags} ${ARCH} ${machine_branch}
+ createme -v -v ${createme_flags} ${ARCH} ${machine_branch}
if [ $? -ne 0 ]; then
bbfatal "Could not create ${machine_branch}"
fi
- sccs="${@" ".join(find_sccs(d))}"
+ sccs="$sccs ${@" ".join(find_sccs(d))}"
patches="${@" ".join(find_patches(d))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
- set +e
# add any explicitly referenced features onto the end of the feature
# list that is passed to the kernel build scripts.
if [ -n "${KERNEL_FEATURES}" ]; then
@@ -120,6 +153,10 @@ do_patch() {
if [ $? -ne 0 ]; then
bbfatal "Could not update ${machine_branch}"
fi
+}
+
+do_patch() {
+ cd ${S}
# executes and modifies the source tree as required
patchme ${KMACHINE}
@@ -131,7 +168,7 @@ do_patch() {
# check to see if the specified SRCREV is reachable from the final branch.
# if it wasn't something wrong has happened, and we should error.
if [ "${machine_srcrev}" != "AUTOINC" ]; then
- if ! [ "$(git rev-parse --verify ${machine_srcrev})" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
+ if ! [ "$(git rev-parse --verify ${machine_srcrev}~0)" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
bberror "SRCREV ${machine_srcrev} was specified, but is not reachable"
bbfatal "Check the BSP description for incorrect branch selection, or other errors."
fi
@@ -181,9 +218,11 @@ do_kernel_checkout() {
bberror "S is not set to the linux source directory. Check "
bbfatal "the recipe and set S to the proper extracted subdirectory"
fi
+ rm -f .gitignore
git init
git add .
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
+ git clean -d -f
fi
# end debare
@@ -211,7 +250,7 @@ do_kernel_checkout() {
# Create a working tree copy of the kernel by checking out a branch
machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
- if [ $? -eq 0 ]; then
+ if [ $? -ne 0 ]; then
machine_branch="master"
fi
@@ -221,6 +260,8 @@ do_kernel_checkout() {
do_kernel_checkout[dirs] = "${S}"
addtask kernel_checkout before do_patch after do_unpack
+addtask kernel_metadata after do_validate_branches before do_patch
+do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
@@ -248,13 +289,11 @@ do_kernel_configme() {
echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
}
-addtask kernel_configme after do_patch
+addtask kernel_configme before do_configure after do_patch
python do_kernel_configcheck() {
import re, string, sys
- bb.plain("NOTE: validating kernel config, see log.do_kernel_configcheck for details")
-
# if KMETA isn't set globally by a recipe using this routine, we need to
# set the default to 'meta'. Otherwise, kconf_check is not passed a valid
# meta-series for processing
@@ -266,11 +305,33 @@ python do_kernel_configcheck() {
cmd = d.expand("cd ${S}; kconf_check -config- %s/meta-series ${S} ${B}" % kmeta)
ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
- config_check_visibility = d.getVar( "KCONF_AUDIT_LEVEL", True ) or 1
- if config_check_visibility == 1:
- bb.debug( 1, "%s" % result )
- else:
- bb.note( "%s" % result )
+ config_check_visibility = int(d.getVar( "KCONF_AUDIT_LEVEL", True ) or 0)
+ bsp_check_visibility = int(d.getVar( "KCONF_BSP_AUDIT_LEVEL", True ) or 0)
+
+ # if config check visibility is non-zero, report dropped configuration values
+ mismatch_file = "${S}/" + kmeta + "/" + "mismatch.cfg"
+ if os.path.exists(mismatch_file):
+ if config_check_visibility:
+ with open (mismatch_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+
+ # if config check visibility is level 2 or higher, report non-hardware options
+ nonhw_file = "${S}/" + kmeta + "/" + "nonhw_report.cfg"
+ if os.path.exists(nonhw_file):
+ if config_check_visibility > 1:
+ with open (nonhw_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: BSP specified non-hw configuration:\n\n%s" % results)
+
+ bsp_desc = "${S}/" + kmeta + "/" + "top_tgt"
+ if os.path.exists(bsp_desc) and bsp_check_visibility > 1:
+ with open (bsp_desc, "r") as myfile:
+ bsp_tgt = myfile.read()
+ m = re.match("^(.*)scratch.obj(.*)$", bsp_tgt)
+ if not m is None:
+ bb.warn( "[kernel]: An auto generated BSP description was used, this normally indicates a misconfiguration.\n" +
+ "Check that your machine (%s) has an associated kernel description." % "${MACHINE}" )
}
# Ensure that the branches (BSP and meta) are on the locations specified by
@@ -288,12 +349,14 @@ do_validate_branches() {
# check and we can exit early
if [ "${machine_srcrev}" = "AUTOINC" ]; then
bbnote "SRCREV validation is not required for AUTOREV"
- elif [ "${machine_srcrev}" = "" ] && [ "${SRCREV}" != "AUTOINC" ]; then
- # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
- # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
- # this case, we need to reset to the give SRCREV before heading to patching
- bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
- force_srcrev="${SRCREV}"
+ elif [ "${machine_srcrev}" = "" ]; then
+ if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
+ # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
+ # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
+ # this case, we need to reset to the give SRCREV before heading to patching
+ bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
+ force_srcrev="${SRCREV}"
+ fi
else
git cat-file -t ${machine_srcrev} > /dev/null
if [ $? -ne 0 ]; then
@@ -350,8 +413,7 @@ do_kernel_link_vmlinux() {
ln -sf ../../../vmlinux
}
-OE_TERMINAL_EXPORTS += "GUILT_BASE KBUILD_OUTPUT"
-GUILT_BASE = "meta"
+OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
KBUILD_OUTPUT = "${B}"
python () {
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 2a6ec34c36..125ed88406 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -3,6 +3,11 @@ inherit linux-kernel-base kernel-module-split
PROVIDES += "virtual/kernel"
DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native"
+S = "${STAGING_KERNEL_DIR}"
+B = "${WORKDIR}/build"
+KBUILD_OUTPUT = "${B}"
+OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
+
# we include gcc above, we dont need virtual/libc
INHIBIT_DEFAULT_DEPS = "1"
@@ -31,6 +36,26 @@ python __anonymous () {
d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
}
+# Old style kernels may set ${S} = ${WORKDIR}/git for example
+# We need to move these over to STAGING_KERNEL_DIR. We can't just
+# create the symlink in advance as the git fetcher can't cope with
+# the symlink.
+do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
+do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
+base_do_unpack_append () {
+ s = d.getVar("S", True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
+ s=s[:-1]
+ kernsrc = d.getVar("STAGING_KERNEL_DIR", True)
+ if s != kernsrc:
+ bb.utils.mkdirhier(kernsrc)
+ bb.utils.remove(kernsrc, recurse=True)
+ import subprocess
+ subprocess.call(d.expand("mv ${S} ${STAGING_KERNEL_DIR}"), shell=True)
+ os.symlink(kernsrc, s)
+}
+
inherit kernel-arch deploy
PACKAGES_DYNAMIC += "^kernel-module-.*"
@@ -55,7 +80,7 @@ KERNEL_IMAGEDEST = "boot"
#
export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
-KERNEL_VERSION = "${@get_kernelversion('${B}')}"
+KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
KERNEL_LOCALVERSION ?= ""
@@ -206,116 +231,70 @@ kernel_do_install() {
[ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
install -d ${D}${sysconfdir}/modules-load.d
install -d ${D}${sysconfdir}/modprobe.d
+}
+do_install[prefuncs] += "package_get_auto_pr"
- #
- # Support for external module building - create a minimal copy of the
- # kernel source tree.
- #
- kerneldir=${D}${KERNEL_SRC_PATH}
- install -d $kerneldir
- mkdir -p ${D}/lib/modules/${KERNEL_VERSION}
- ln -sf ${KERNEL_SRC_PATH} "${D}/lib/modules/${KERNEL_VERSION}/build"
-
- #
- # Store the kernel version in sysroots for module-base.bbclass
- #
-
- echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
+addtask shared_workdir after do_compile before do_install
- #
- # Store kernel image name to allow use during image generation
- #
+emit_depmod_pkgdata() {
+ # Stash data for depmod
+ install -d ${PKGDESTWORK}/kernel-depmod/
+ echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/kernel-depmod/kernel-abiversion
+ cp System.map ${PKGDESTWORK}/kernel-depmod/System.map-${KERNEL_VERSION}
+}
- echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
+PACKAGEFUNCS += "emit_depmod_pkgdata"
- #
- # Copy the entire source tree. In case an external build directory is
- # used, copy the build directory over first, then copy over the source
- # dir. This ensures the original Makefiles are used and not the
- # redirecting Makefiles in the build directory.
- #
- find . -depth -not -name "*.cmd" -not -name "*.o" -not -name "*.so.dbg" -not -name "*.so" -not -path "./Documentation*" -not -path "./source*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
- cp .config $kerneldir
- if [ "${S}" != "${B}" ]; then
- pwd="$PWD"
- cd "${S}"
- find . -depth -not -path "./Documentation*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
- cd "$pwd"
- fi
-
- # Test to ensure that the output file and image type are not actually
- # the same file. If hardlinking is used, they will be the same, and there's
- # no need to install.
- ! [ ${KERNEL_OUTPUT} -ef $kerneldir/${KERNEL_IMAGETYPE} ] && install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
- install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
+do_shared_workdir () {
+ cd ${B}
- # Dummy Makefile so the clean below works
- mkdir $kerneldir/Documentation
- touch $kerneldir/Documentation/Makefile
+ kerneldir=${STAGING_KERNEL_BUILDDIR}
+ install -d $kerneldir
#
- # Clean and remove files not needed for building modules.
- # Some distributions go through a lot more trouble to strip out
- # unecessary headers, for now, we just prune the obvious bits.
- #
- # We don't want to leave host-arch binaries in /sysroots, so
- # we clean the scripts dir while leaving the generated config
- # and include files.
+ # Store the kernel version in sysroots for module-base.bbclass
#
- oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean _mrproper_scripts
- # hide directories that shouldn't have their .c, s and S files deleted
- for d in tools scripts lib; do
- mv $kerneldir/$d $kerneldir/.$d
- done
-
- # delete .c, .s and .S files, unless we hid a directory as .<dir>. This technique is
- # much faster than find -prune and -exec
- find $kerneldir -not -path '*/\.*' -type f -name "*.[csS]" -delete
+ echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
- # put the hidden dirs back
- for d in tools scripts lib; do
- mv $kerneldir/.$d $kerneldir/$d
- done
+ # Copy files required for module builds
+ cp System.map $kerneldir/System.map-${KERNEL_VERSION}
+ cp Module.symvers $kerneldir/
+ cp .config $kerneldir/
+ mkdir -p $kerneldir/include/config
+ cp include/config/kernel.release $kerneldir/include/config/kernel.release
+
+ # We can also copy over all the generated files and avoid special cases
+ # like version.h, but we've opted to keep this small until file creep starts
+ # to happen
+ if [ -e include/linux/version.h ]; then
+ mkdir -p $kerneldir/include/linux
+ cp include/linux/version.h $kerneldir/include/linux/version.h
+ fi
# As of Linux kernel version 3.0.1, the clean target removes
# arch/powerpc/lib/crtsavres.o which is present in
# KBUILD_LDFLAGS_MODULE, making it required to build external modules.
if [ ${ARCH} = "powerpc" ]; then
- cp -l arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ mkdir -p $kerneldir/arch/powerpc/lib/
+ cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
fi
- # Necessary for building modules like compat-wireless.
- if [ -f include/generated/bounds.h ]; then
- cp -l include/generated/bounds.h $kerneldir/include/generated/bounds.h
- fi
+ mkdir -p $kerneldir/include/generated/
+ cp -fR include/generated/* $kerneldir/include/generated/
+
if [ -d arch/${ARCH}/include/generated ]; then
mkdir -p $kerneldir/arch/${ARCH}/include/generated/
- cp -flR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
- fi
-
- # Remove the following binaries which cause strip or arch QA errors
- # during do_package for cross-compiled platforms
- bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
- arch/powerpc/boot/mktree scripts/kconfig/zconf.tab.o \
- scripts/kconfig/conf.o scripts/kconfig/kxgettext.o"
- for entry in $bin_files; do
- rm -f $kerneldir/$entry
- done
-
- # kernels <2.6.30 don't have $kerneldir/tools directory so we check if it exists before calling sed
- if [ -f $kerneldir/tools/perf/Makefile ]; then
- # Fix SLANG_INC for slang.h
- sed -i 's#-I/usr/include/slang#-I=/usr/include/slang#g' $kerneldir/tools/perf/Makefile
+ cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
fi
}
-do_install[prefuncs] += "package_get_auto_pr"
-python sysroot_stage_all () {
- oe.path.copyhardlinktree(d.expand("${D}${KERNEL_SRC_PATH}"), d.expand("${SYSROOT_DESTDIR}${KERNEL_SRC_PATH}"))
+# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
+sysroot_stage_all () {
+ :
}
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call oldnoconfig || yes '' | oe_runmake oldconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig || yes '' | oe_runmake -C ${S} O=${B} oldconfig"
kernel_do_configure() {
# fixes extra + in /lib/modules/2.6.37+
@@ -324,6 +303,10 @@ kernel_do_configure() {
# $ make kernelrelease => 2.6.37+
touch ${B}/.scmversion ${S}/.scmversion
+ if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
+ mv "${S}/.config" "${B}/.config"
+ fi
+
# Copy defconfig to .config if .config does not exist. This allows
# recipes to manage the .config themselves in do_configure_prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
@@ -499,7 +482,7 @@ kernel_do_deploy() {
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
do_deploy[prefuncs] += "package_get_auto_pr"
-addtask deploy before do_build after do_install
+addtask deploy after do_populate_sysroot
EXPORT_FUNCTIONS do_deploy
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
new file mode 100644
index 0000000000..9efd46a92d
--- /dev/null
+++ b/meta/classes/kernelsrc.bbclass
@@ -0,0 +1,10 @@
+S = "${STAGING_KERNEL_DIR}"
+do_fetch[noexec] = "1"
+do_unpack[depends] += "virtual/kernel:do_patch"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_package[depends] += "virtual/kernel:do_populate_sysroot"
+KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+
+inherit linux-kernel-base
+
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index c1bc399c18..793936e10b 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -268,6 +268,7 @@ python package_do_split_gconvs () {
locale_arch_options = { \
"arm": " --uint32-align=4 --little-endian ", \
"armeb": " --uint32-align=4 --big-endian ", \
+ "aarch64": " --uint32-align=4 --little-endian ", \
"aarch64_be": " --uint32-align=4 --big-endian ", \
"sh4": " --uint32-align=4 --big-endian ", \
"powerpc": " --uint32-align=4 --big-endian ", \
@@ -298,9 +299,7 @@ python package_do_split_gconvs () {
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
- qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
- if not qemu_options:
- qemu_options = d.getVar('QEMU_OPTIONS', True)
+ qemu_options = d.getVar('QEMU_OPTIONS', True)
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
-E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index 14d3107c4a..73a0e9727e 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -49,24 +49,33 @@ license_create_manifest() {
pkged_pv="$(sed -n 's/^PV: //p' ${filename})"
pkged_name="$(basename $(readlink ${filename}))"
- pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
- if [ -z ${pkged_lic} ]; then
+ pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; p }" ${filename})"
+ pkged_size="$(sed -n "/^PKGSIZE_${pkged_name}: /{ s/^PKGSIZE_${pkged_name}: //; p }" ${filename})"
+ if [ -z "${pkged_lic}" ]; then
# fallback checking value of LICENSE
- pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
+ pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; p }" ${filename})"
fi
echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_MANIFEST}
echo "PACKAGE VERSION:" ${pkged_pv} >> ${LICENSE_MANIFEST}
echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_MANIFEST}
- printf "LICENSE:" >> ${LICENSE_MANIFEST}
- for lic in ${pkged_lic}; do
+ echo "LICENSE:" ${pkged_lic} >> ${LICENSE_MANIFEST}
+ echo "" >> ${LICENSE_MANIFEST}
+
+ # If the package doesn't contain any file, that is, its size is 0, the license
+ # isn't relevant as far as the final image is concerned. So doing license check
+ # doesn't make much sense, skip it.
+ if [ "$pkged_size" = "0" ]; then
+ continue
+ fi
+
+ lics="$(echo ${pkged_lic} | sed "s/[|&()*]/ /g" | sed "s/ */ /g" )"
+ for lic in ${lics}; do
# to reference a license file trim trailing + symbol
if ! [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic%+}" ]; then
bbwarn "The license listed ${lic} was not in the licenses collected for ${pkged_pn}"
fi
- printf " ${lic}" >> ${LICENSE_MANIFEST}
done
- printf "\n\n" >> ${LICENSE_MANIFEST}
done
# Two options here:
@@ -79,7 +88,7 @@ license_create_manifest() {
if [ "${COPY_LIC_DIRS}" = "1" ]; then
for pkg in ${INSTALLED_PKGS}; do
mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
- pkged_pn="$(oe-pkgdata-util lookup-recipe ${PKGDATA_DIR} ${pkg})"
+ pkged_pn="$(oe-pkgdata-util -p ${PKGDATA_DIR} lookup-recipe ${pkg})"
for lic in `ls ${LICENSE_DIRECTORY}/${pkged_pn}`; do
# Really don't need to copy the generics as they're
# represented in the manifest and in the actual pkg licenses
@@ -149,12 +158,12 @@ def copy_license_files(lic_files_paths, destdir):
dst = os.path.join(destdir, basename)
if os.path.exists(dst):
os.remove(dst)
- if (os.stat(src).st_dev == os.stat(destdir).st_dev):
+ if os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev):
os.link(src, dst)
else:
shutil.copyfile(src, dst)
except Exception as e:
- bb.warn("Could not copy license file %s: %s" % (basename, e))
+ bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
def find_license_files(d):
"""
@@ -285,6 +294,31 @@ def canonical_license(d, license):
lic += '+'
return lic or license
+def expand_wildcard_licenses(d, wildcard_licenses):
+ """
+ Return actual spdx format license names if wildcard used. We expand
+ wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ """
+ import fnmatch
+ licenses = []
+ spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
+ for wld_lic in wildcard_licenses:
+ spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
+ licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
+
+ spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES') or '').split()
+ for wld_lic in wildcard_licenses:
+ licenses += fnmatch.filter(spdx_lics, wld_lic)
+
+ licenses = list(set(licenses))
+ return licenses
+
+def incompatible_license_contains(license, truevalue, falsevalue, d):
+ license = canonical_license(d, license)
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ return truevalue if license in bad_licenses else falsevalue
+
def incompatible_license(d, dont_want_licenses, package=None):
"""
This function checks if a recipe has only incompatible licenses. It also
@@ -383,12 +417,37 @@ def check_license_flags(d):
return unmatched_flag
return None
+def check_license_format(d):
+ """
+ This function checks if LICENSE is well defined,
+ Validate operators in LICENSES.
+ No spaces are allowed between LICENSES.
+ """
+ pn = d.getVar('PN', True)
+ licenses = d.getVar('LICENSE', True)
+ from oe.license import license_operator, license_operator_chars, license_pattern
+
+ elements = filter(lambda x: x.strip(), license_operator.split(licenses))
+ for pos, element in enumerate(elements):
+ if license_pattern.match(element):
+ if pos > 0 and license_pattern.match(elements[pos - 1]):
+ bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
+ 'must be separated by the following characters to indicate ' \
+ 'the license selection: %s' %
+ (pn, licenses, license_operator_chars))
+ elif not license_operator.match(element):
+ bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
+ 'in the valid list of separators (%s)' %
+ (pn, licenses, element, license_operator_chars))
+
SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+do_populate_lic_setscene[dirs] = "${LICSSTATEDIR}/${PN}"
+do_populate_lic_setscene[cleandirs] = "${LICSSTATEDIR}"
python do_populate_lic_setscene () {
sstate_setscene(d)
}
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
index 4f2b0a4a98..89ce71605c 100644
--- a/meta/classes/linux-kernel-base.bbclass
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -1,5 +1,5 @@
# parse kernel ABI version out of <linux/version.h>
-def get_kernelversion(p):
+def get_kernelversion_headers(p):
import re
fn = p + '/include/linux/utsrelease.h'
@@ -9,7 +9,6 @@ def get_kernelversion(p):
if not os.path.isfile(fn):
fn = p + '/include/linux/version.h'
- import re
try:
f = open(fn, 'r')
except IOError:
@@ -24,6 +23,16 @@ def get_kernelversion(p):
return m.group(1)
return None
+
+def get_kernelversion_file(p):
+ fn = p + '/kernel-abiversion'
+
+ try:
+ with open(fn, 'r') as f:
+ return f.readlines()[0].strip()
+ except IOError:
+ return None
+
def linux_module_packages(s, d):
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
index 9537ba9f43..3eb2e9226e 100644
--- a/meta/classes/module-base.bbclass
+++ b/meta/classes/module-base.bbclass
@@ -3,16 +3,24 @@ inherit kernel-arch
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
+# This points to the build artefacts from the main kernel build
+# such as .config and System.map
+# Confusingly it is not the module build output (which is ${B}) but
+# we didn't pick the name.
+export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
+
+export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"
+do_configure[depends] += "virtual/kernel:do_shared_workdir"
+
# Function to ensure the kernel scripts are created. Expected to
# be called before do_compile. See module.bbclass for an exmaple.
do_make_scripts() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
- -C ${STAGING_KERNEL_DIR} scripts
+ -C ${STAGING_KERNEL_DIR} O=${STAGING_KERNEL_BUILDDIR} scripts
}
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index ad6f7af1bb..a03cc74de4 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -6,21 +6,23 @@ addtask make_scripts after do_patch before do_compile
do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
do_make_scripts[deptask] = "do_populate_sysroot"
+EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
+
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
- KERNEL_SRC=${STAGING_KERNEL_DIR} \
KERNEL_VERSION=${KERNEL_VERSION} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
AR="${KERNEL_AR}" \
+ O=${STAGING_KERNEL_BUILDDIR} \
${MAKE_TARGETS}
}
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
- KERNEL_SRC=${STAGING_KERNEL_DIR} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ O=${STAGING_KERNEL_BUILDDIR} \
modules_install
}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index dcd364b92c..5ca5c95b4d 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -61,16 +61,17 @@ PTEST_ENABLED = "0"
export CONFIG_SITE = "${COREBASE}/meta/site/native"
# set the compiler as well. It could have been set to something else
-export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
-export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
-export FC = "${CCACHE}${HOST_PREFIX}gfortran ${HOST_CC_ARCH}"
-export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
-export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
-export CCLD = "${CC}"
-export AR = "${HOST_PREFIX}ar"
-export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
-export RANLIB = "${HOST_PREFIX}ranlib"
-export STRIP = "${HOST_PREFIX}strip"
+export CC = "${BUILD_CC}"
+export CXX = "${BUILD_CXX}"
+export FC = "${BUILD_FC}"
+export CPP = "${BUILD_CPP}"
+export LD = "${BUILD_LD}"
+export CCLD = "${BUILD_CCLD}"
+export AR = "${BUILD_AR}"
+export AS = "${BUILD_AS}"
+export RANLIB = "${BUILD_RANLIB}"
+export STRIP = "${BUILD_STRIP}"
+export NM = "${BUILD_NM}"
# Path prefixes
base_prefix = "${STAGING_DIR_NATIVE}"
@@ -131,7 +132,9 @@ python native_virtclass_handler () {
deps = bb.utils.explode_deps(deps)
newdeps = []
for dep in deps:
- if "-cross-" in dep:
+ if dep == pn:
+ continue
+ elif "-cross-" in dep:
newdeps.append(dep.replace("-cross", "-native"))
elif not dep.endswith("-native"):
newdeps.append(dep + "-native")
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 5e78116ab8..30bcdfeb44 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -25,6 +25,7 @@ EXTRANATIVEPATH += "chrpath-native"
STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
+PKGDATA_DIR = "${STAGING_DIR_HOST}/pkgdata"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
index d00f468d9a..3e01503756 100644
--- a/meta/classes/oelint.bbclass
+++ b/meta/classes/oelint.bbclass
@@ -1,4 +1,4 @@
-addtask lint before do_fetch
+addtask lint before do_build
do_lint[nostamp] = "1"
python do_lint() {
pkgname = d.getVar("PN", True)
@@ -54,6 +54,12 @@ python do_lint() {
f.close()
return ret
+ def checkPN(pkgname, varname, str):
+ if str.find("{PN}") != -1:
+ bb.warn("%s: should use BPN instead of PN in %s" % (pkgname, varname))
+ if str.find("{P}") != -1:
+ bb.warn("%s: should use BP instead of P in %s" % (pkgname, varname))
+
length = len("file://")
for item in srcuri:
if item.startswith("file://"):
@@ -72,14 +78,7 @@ python do_lint() {
#
for s in srcuri:
if not s.startswith("file://"):
- if not s.find("{PN}") == -1:
- bb.warn("%s: should use BPN instead of PN in SRC_URI" % pkgname)
- if not s.find("{P}") == -1:
- bb.warn("%s: should use BP instead of P in SRC_URI" % pkgname)
+ checkPN(pkgname, 'SRC_URI', s)
- srcpath = d.getVar("S")
- if not srcpath.find("{PN}") == -1:
- bb.warn("%s: should use BPN instead of PN in S" % pkgname)
- if not srcpath.find("{P}") == -1:
- bb.warn("%s: should use BP instead of P in S" % pkgname)
+ checkPN(pkgname, 'S', d.getVar('S'))
}
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 4cddbefe08..5558d0d100 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -239,6 +239,66 @@ python () {
d.appendVarFlag('do_package', 'deptask', " do_packagedata")
}
+# Get a list of files from file vars by searching files under current working directory
+# The list contains symlinks, directories and normal files.
+def files_from_filevars(filevars):
+ import os,glob
+ cpath = oe.cachedpath.CachedPath()
+ files = []
+ for f in filevars:
+ if os.path.isabs(f):
+ f = '.' + f
+ if not f.startswith("./"):
+ f = './' + f
+ globbed = glob.glob(f)
+ if globbed:
+ if [ f ] != globbed:
+ files += globbed
+ continue
+ files.append(f)
+
+ for f in files:
+ if not cpath.islink(f):
+ if cpath.isdir(f):
+ newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
+ if newfiles:
+ files += newfiles
+
+ return files
+
+# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
+def get_conffiles(pkg, d):
+ pkgdest = d.getVar('PKGDEST', True)
+ root = os.path.join(pkgdest, pkg)
+ cwd = os.getcwd()
+ os.chdir(root)
+
+ conffiles = d.getVar('CONFFILES_%s' % pkg, True);
+ if conffiles == None:
+ conffiles = d.getVar('CONFFILES', True)
+ if conffiles == None:
+ conffiles = ""
+ conffiles = conffiles.split()
+ conf_orig_list = files_from_filevars(conffiles)
+
+ # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
+ conf_list = []
+ for f in conf_orig_list:
+ if os.path.isdir(f):
+ continue
+ if os.path.islink(f):
+ continue
+ if not os.path.exists(f):
+ continue
+ conf_list.append(f)
+
+ # Remove the leading './'
+ for i in range(0, len(conf_list)):
+ conf_list[i] = conf_list[i][1:]
+
+ os.chdir(cwd)
+ return conf_list
+
def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
@@ -1009,26 +1069,9 @@ python populate_packages () {
filesvar.replace("//", "/")
origfiles = filesvar.split()
- files = []
- for file in origfiles:
- if os.path.isabs(file):
- file = '.' + file
- if not file.startswith("./"):
- file = './' + file
- globbed = glob.glob(file)
- if globbed:
- if [ file ] != globbed:
- files += globbed
- continue
- files.append(file)
+ files = files_from_filevars(origfiles)
for file in files:
- if not cpath.islink(file):
- if cpath.isdir(file):
- newfiles = [ os.path.join(file,x) for x in os.listdir(file) ]
- if newfiles:
- files += newfiles
- continue
if (not cpath.islink(file)) and (not cpath.exists(file)):
continue
if file in seen:
@@ -1392,32 +1435,11 @@ python package_do_shlibs() {
pkgdest = d.getVar('PKGDEST', True)
- shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
# Take shared lock since we're only reading, not writing
lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
- def read_shlib_providers():
- list_re = re.compile('^(.*)\.list$')
- # Go from least to most specific since the last one found wins
- for dir in reversed(shlibs_dirs):
- bb.debug(2, "Reading shlib providers in %s" % (dir))
- if not os.path.exists(dir):
- continue
- for file in os.listdir(dir):
- m = list_re.match(file)
- if m:
- dep_pkg = m.group(1)
- fd = open(os.path.join(dir, file))
- lines = fd.readlines()
- fd.close()
- for l in lines:
- s = l.strip().split(":")
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
-
def linux_so(file, needed, sonames, renames, pkgver):
needs_ldconfig = False
ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
@@ -1514,8 +1536,7 @@ python package_do_shlibs() {
use_ldconfig = False
needed = {}
- shlib_provider = {}
- read_shlib_providers()
+ shlib_provider = oe.package.read_shlib_providers(d)
for pkg in packages.split():
private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
@@ -1887,7 +1908,7 @@ python package_depchains() {
# Since bitbake can't determine which variables are accessed during package
# iteration, we need to list them here:
-PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES"
+PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE"
def gen_packagevar(d):
ret = []
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 5b5f7e2c9a..9d7c59ba53 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -262,7 +262,7 @@ python do_package_deb () {
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
- conffiles_str = localdata.getVar("CONFFILES", True)
+ conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
try:
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 44fd3eb29c..dba68042ac 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -226,7 +226,7 @@ python do_package_ipk () {
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
- conffiles_str = localdata.getVar("CONFFILES", True)
+ conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
try:
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index 92ddf7a30f..e305e8b4ab 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -197,6 +197,16 @@ python write_specfile () {
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
continue
+ # Treat all symlinks to directories as normal files.
+ # os.walk() lists them as directories.
+ def move_to_files(dir):
+ if os.path.islink(os.path.join(rootpath, dir)):
+ files.append(dir)
+ return True
+ else:
+ return False
+ dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
+
# Directory handling can happen in two ways, either DIRFILES is not set at all
# in which case we fall back to the older behaviour of packages owning all their
# directories
@@ -324,7 +334,7 @@ python write_specfile () {
bb.data.update_data(localdata)
- conffiles = (localdata.getVar('CONFFILES', True) or "").split()
+ conffiles = get_conffiles(pkg, d)
dirfiles = localdata.getVar('DIRFILES', True)
if dirfiles is not None:
dirfiles = dirfiles.split()
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
index b8d75bd38c..9e6ecc8a53 100644
--- a/meta/classes/pixbufcache.bbclass
+++ b/meta/classes/pixbufcache.bbclass
@@ -67,6 +67,11 @@ pixbufcache_sstate_postinst() {
# Packages that use this class should extend this variable with their runtime
# dependencies.
PIXBUFCACHE_SYSROOT_DEPS = ""
-PIXBUFCACHE_SYSROOT_DEPS_class-native = "${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene"
+PIXBUFCACHE_SYSROOT_DEPS_class-native = "\
+ ${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} \
+ glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene \
+ libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene \
+ harfbuzz-native:do_populate_sysroot_setscene \
+ "
do_populate_sysroot_setscene[depends] += "${PIXBUFCACHE_SYSROOT_DEPS}"
do_populate_sysroot[depends] += "${@d.getVar('PIXBUFCACHE_SYSROOT_DEPS', True).replace('_setscene','')}"
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index de72e32ed8..7f7a87fdd2 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -40,7 +40,7 @@ TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native sed-native"
+SDK_DEPENDS = "virtual/fakeroot-native"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
# could be set to the MACHINE_ARCH
@@ -52,6 +52,7 @@ EXCLUDE_FROM_WORLD = "1"
SDK_PACKAGING_FUNC ?= "create_shar"
SDK_POST_INSTALL_COMMAND ?= ""
+SDK_RELOCATE_AFTER_INSTALL ?= "1"
SDK_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.manifest"
python write_target_sdk_manifest () {
@@ -93,7 +94,9 @@ fakeroot python do_populate_sdk() {
bb.build.exec_func("tar_sdk", d)
- bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d)
+ sdk_packaging_func = d.getVar("SDK_PACKAGING_FUNC", True) or ""
+ if sdk_packaging_func.strip():
+ bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d)
}
fakeroot create_sdk_files() {
@@ -116,9 +119,14 @@ fakeroot tar_sdk() {
fakeroot create_shar() {
# copy in the template shar extractor script
- cp ${COREBASE}/meta/files/toolchain-shar-template.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
- cat << "EOF" > ${T}/post_install_command
+ rm -f ${T}/post_install_command
+
+ if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
+ cp ${COREBASE}/meta/files/toolchain-shar-relocate.sh ${T}/post_install_command
+ fi
+ cat << "EOF" >> ${T}/post_install_command
${SDK_POST_INSTALL_COMMAND}
EOF
sed -i -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
@@ -126,6 +134,7 @@ EOF
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
-e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@OLDEST_KERNEL@#${OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
-e '/@SDK_POST_INSTALL_COMMAND@/d' \
${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
new file mode 100644
index 0000000000..17a8e8cdf4
--- /dev/null
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -0,0 +1,220 @@
+# Extensible SDK
+
+inherit populate_sdk_base
+
+# NOTE: normally you cannot use task overrides for this kind of thing - this
+# only works because of get_sdk_ext_rdepends()
+
+TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
+ meta-environment-extsdk-${MACHINE} \
+ "
+
+TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
+
+SDK_RDEPENDS_append_task-populate-sdk-ext = " ${SDK_TARGETS}"
+
+SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
+
+SDK_META_CONF_WHITELIST ?= "MACHINE DISTRO PACKAGE_CLASSES"
+
+SDK_TARGETS ?= "${PN}"
+OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
+
+# The files from COREBASE that you want preserved in the COREBASE copied
+# into the sdk. This allows someone to have their own setup scripts in
+# COREBASE be preserved as well as untracked files.
+COREBASE_FILES ?= " \
+ oe-init-build-env \
+ oe-init-build-env-memres \
+ scripts \
+ LICENSE \
+ .templateconf \
+"
+
+SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
+B_task-populate-sdk-ext = "${SDK_DIR}"
+TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+
+python copy_buildsystem () {
+ import re
+ import oe.copy_buildsystem
+
+ oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT', True)
+
+ conf_bbpath = ''
+ conf_initpath = ''
+ core_meta_subdir = ''
+
+ # Copy in all metadata layers + bitbake (as repositories)
+ buildsystem = oe.copy_buildsystem.BuildSystem(d)
+ baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
+ layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers')
+
+ sdkbblayers = []
+ corebase = os.path.basename(d.getVar('COREBASE', True))
+ for layer in layers_copied:
+ if corebase == os.path.basename(layer):
+ conf_bbpath = os.path.join('layers', layer, 'bitbake')
+ else:
+ sdkbblayers.append(layer)
+
+ for path in os.listdir(baseoutpath + '/layers'):
+ relpath = os.path.join('layers', path, oe_init_env_script)
+ if os.path.exists(os.path.join(baseoutpath, relpath)):
+ conf_initpath = relpath
+
+ relpath = os.path.join('layers', path, 'scripts', 'devtool')
+ if os.path.exists(os.path.join(baseoutpath, relpath)):
+ scriptrelpath = os.path.dirname(relpath)
+
+ relpath = os.path.join('layers', path, 'meta')
+ if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
+ core_meta_subdir = relpath
+
+ d.setVar('oe_init_build_env_path', conf_initpath)
+ d.setVar('scriptrelpath', scriptrelpath)
+
+ # Write out config file for devtool
+ import ConfigParser
+ config = ConfigParser.SafeConfigParser()
+ config.add_section('General')
+ config.set('General', 'bitbake_subdir', conf_bbpath)
+ config.set('General', 'init_path', conf_initpath)
+ config.set('General', 'core_meta_subdir', core_meta_subdir)
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
+ with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
+ config.write(f)
+
+ # Create a layer for new recipes / appends
+ bb.process.run("devtool --basepath %s create-workspace --create-only %s" % (baseoutpath, os.path.join(baseoutpath, 'workspace')))
+
+ # Create bblayers.conf
+ bb.utils.mkdirhier(baseoutpath + '/conf')
+ with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
+ f.write('LCONF_VERSION = "%s"\n\n' % d.getVar('LCONF_VERSION'))
+ f.write('BBPATH = "$' + '{TOPDIR}"\n')
+ f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
+ f.write('BBLAYERS := " \\\n')
+ for layerrelpath in sdkbblayers:
+ f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
+ f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
+ f.write(' "\n')
+
+ # Create local.conf
+ with open(baseoutpath + '/conf/local.conf', 'w') as f:
+ f.write('INHERIT += "%s"\n\n' % 'uninative')
+ f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION'))
+
+ # This is a bit of a hack, but we really don't want these dependencies
+ # (we're including them in the SDK as nativesdk- versions instead)
+ f.write('POKYQEMUDEPS_forcevariable = ""\n\n')
+ f.write('EXTRA_IMAGEDEPENDS_remove = "qemu-native qemu-helper-native"\n\n')
+
+ # Bypass the default connectivity check if any
+ f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
+
+ # Another hack, but we want the native part of sstate to be kept the same
+ # regardless of the host distro
+ fixedlsbstring = 'SDK-Fixed'
+ f.write('NATIVELSBSTRING_forcevariable = "%s"\n\n' % fixedlsbstring)
+
+ # Ensure locked sstate cache objects are re-used without error
+ f.write('SIGGEN_LOCKEDSIGS_CHECK_LEVEL = "warn"\n\n')
+
+ for varname in d.getVar('SDK_META_CONF_WHITELIST', True).split():
+ f.write('%s = "%s"\n' % (varname, d.getVar(varname, True)))
+ f.write('require conf/locked-sigs.inc\n')
+ f.write('require conf/work-config.inc\n')
+
+ sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+
+ # Filter the locked signatures file to just the sstate tasks we are interested in
+ allowed_tasks = ['do_populate_lic', 'do_populate_sysroot', 'do_packagedata', 'do_package_write_ipk', 'do_package_write_rpm', 'do_package_write_deb', 'do_package_qa', 'do_deploy']
+ excluded_targets = d.getVar('SDK_TARGETS', True)
+ lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ oe.copy_buildsystem.prune_lockedsigs(allowed_tasks,
+ excluded_targets,
+ sigfile,
+ lockedsigs_pruned)
+
+ sstate_out = baseoutpath + '/sstate-cache'
+ bb.utils.remove(sstate_out, True)
+ oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
+ d.getVar('SSTATE_DIR', True),
+ sstate_out, d,
+ fixedlsbstring)
+
+ # Create a dummy config file for additional settings
+ with open(baseoutpath + '/conf/work-config.inc', 'w') as f:
+ pass
+}
+
+install_tools() {
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
+ ln -sr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/devtool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/devtool
+ ln -sr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/recipetool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/recipetool
+ touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
+
+ install ${SDK_DEPLOY}/${DISTRO}-${TCLIBC}-${SDK_ARCH}-buildtools-tarball-${TUNE_PKGARCH}-buildtools-nativesdk-standalone-${DISTRO_VERSION}.sh ${SDK_OUTPUT}/${SDKPATH}
+
+ install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}
+}
+
+# FIXME this preparation should be done as part of the SDK construction
+sdk_ext_postinst() {
+ printf "\nExtracting buildtools...\n"
+ cd $target_sdk_dir
+ printf "buildtools\ny" | ./*buildtools-tarball* > /dev/null
+
+ # Make sure when the user sets up the environment, they also get
+ # the buildtools-tarball tools in their path.
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $target_sdk_dir/environment-setup*
+
+ # Allow bitbake environment setup to be ran as part of this sdk.
+ echo "export OE_SKIP_SDK_CHECK=1" >> $target_sdk_dir/environment-setup*
+
+ # A bit of another hack, but we need this in the path only for devtool
+ # so put it at the end of $PATH.
+ echo "export PATH=\$PATH:$target_sdk_dir/sysroots/${SDK_SYS}/${bindir_nativesdk}" >> $target_sdk_dir/environment-setup*
+
+ # For now this is where uninative.bbclass expects the tarball
+ mv *-nativesdk-libc.tar.* $target_sdk_dir/`dirname ${oe_init_build_env_path}`
+
+ printf "Preparing build system...\n"
+ # dash which is /bin/sh on Ubuntu will not preserve the
+ # current working directory when first ran, nor will it set $1 when
+ # sourcing a script. That is why this has to look so ugly.
+ sh -c ". buildtools/environment-setup* > preparing_build_system.log && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> preparing_build_system.log && bitbake ${SDK_TARGETS} >> preparing_build_system.log" || { echo "SDK preparation failed: see `pwd`/preparing_build_system.log" ; exit 1 ; }
+ echo done
+}
+
+SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
+
+SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+
+fakeroot python do_populate_sdk_ext() {
+ bb.build.exec_func("do_populate_sdk", d)
+}
+
+def get_sdk_ext_rdepends(d):
+ localdata = d.createCopy()
+ localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
+ bb.data.update_data(localdata)
+ return localdata.getVarFlag('do_populate_sdk', 'rdepends', True)
+
+do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
+do_populate_sdk_ext[depends] += "${@d.getVarFlag('do_populate_sdk', 'depends', False)}"
+do_populate_sdk_ext[rdepends] = "${@get_sdk_ext_rdepends(d)}"
+do_populate_sdk_ext[recrdeptask] += "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
+
+
+do_populate_sdk_ext[depends] += "buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk"
+
+do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
+do_populate_sdk_ext[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy"
+
+# Make sure codes change in copy_buildsystem can result in rebuilt
+do_populate_sdk_ext[vardeps] += "copy_buildsystem"
+
+addtask populate_sdk_ext
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
index fdd22bbc86..97029dc525 100644
--- a/meta/classes/pythonnative.bbclass
+++ b/meta/classes/pythonnative.bbclass
@@ -2,5 +2,7 @@
inherit python-dir
PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
+# PYTHON_EXECUTABLE is used by cmake
+PYTHON_EXECUTABLE="${PYTHON}"
EXTRANATIVEPATH += "${PYTHON_PN}-native"
DEPENDS += " ${PYTHON_PN}-native "
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
index b2cf85d628..601f587534 100644
--- a/meta/classes/qemu.bbclass
+++ b/meta/classes/qemu.bbclass
@@ -29,20 +29,23 @@ def qemu_run_binary(data, rootfs_path, binary):
libdir = rootfs_path + data.getVar("libdir", False)
base_libdir = rootfs_path + data.getVar("base_libdir", False)
- oldest_kernel = data.getVar("OLDEST_KERNEL", True)
+ qemu_options = data.getVar("QEMU_OPTIONS", True)
- return "PSEUDO_UNLOAD=1 " + qemu_binary + " -r " + oldest_kernel + " -L " + rootfs_path\
+ return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+ " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\
+ rootfs_path + binary
-QEMU_OPTIONS = "-r ${OLDEST_KERNEL}"
-QEMU_OPTIONS_append_iwmmxt = " -cpu pxa270-c5"
-QEMU_OPTIONS_append_armv6 = " -cpu arm1136"
-QEMU_OPTIONS_append_armv7a = " -cpu cortex-a8"
-QEMU_OPTIONS_append_e500v2 = " -cpu e500v2"
-QEMU_OPTIONS_append_e500mc = " -cpu e500mc"
-QEMU_OPTIONS_append_e5500 = " -cpu e5500"
-QEMU_OPTIONS_append_e5500-64b = " -cpu e5500"
-QEMU_OPTIONS_append_e6500 = " -cpu e6500"
-QEMU_OPTIONS_append_e6500-64b = " -cpu e6500"
-QEMU_OPTIONS_append_ppc7400 = " -cpu 7400"
+# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
+# PACKAGE_ARCH, not overrides and hence have to do this dance. Simply being arch
+# specific isn't good enough.
+QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) or ""}"
+QEMU_EXTRAOPTIONS_iwmmxt = " -cpu pxa270-c5"
+QEMU_EXTRAOPTIONS_armv6 = " -cpu arm1136"
+QEMU_EXTRAOPTIONS_armv7a = " -cpu cortex-a8"
+QEMU_EXTRAOPTIONS_e500v2 = " -cpu e500v2"
+QEMU_EXTRAOPTIONS_e500mc = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_e5500 = " -cpu e5500"
+QEMU_EXTRAOPTIONS_e5500-64b = " -cpu e5500"
+QEMU_EXTRAOPTIONS_e6500 = " -cpu e6500"
+QEMU_EXTRAOPTIONS_e6500-64b = " -cpu e6500"
+QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass
index 86bbede260..dc98713fd2 100644
--- a/meta/classes/qmake_base.bbclass
+++ b/meta/classes/qmake_base.bbclass
@@ -38,9 +38,9 @@ do_generate_qt_config_file() {
[Paths]
Prefix =
Binaries = ${STAGING_BINDIR_NATIVE}
-Headers = ${STAGING_INCDIR}/qt4
-Plugins = ${STAGING_LIBDIR}/qt4/plugins/
-Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
+Headers = ${STAGING_INCDIR}/${QT_DIR_NAME}
+Plugins = ${STAGING_LIBDIR}/${QT_DIR_NAME}/plugins/
+Mkspecs = ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/
EOF
}
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
index 850bb6a717..13b1050aac 100644
--- a/meta/classes/qt4e.bbclass
+++ b/meta/classes/qt4e.bbclass
@@ -19,6 +19,3 @@ EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
# Qt4 uses atomic instructions not supported in thumb mode
ARM_INSTRUCTION_SET = "arm"
-
-# Qt4 could NOT be built on MIPS64 with 64 bits userspace
-COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
index 65d196afc6..6f06d34755 100644
--- a/meta/classes/qt4x11.bbclass
+++ b/meta/classes/qt4x11.bbclass
@@ -1,7 +1,10 @@
QT4DEPENDS ?= "qt4-x11 "
DEPENDS_prepend = "${QT4DEPENDS}"
-inherit qmake2
+# depends on qt4-x11
+REQUIRED_DISTRO_FEATURES += "x11"
+
+inherit qmake2 distro_features_check
QT_BASE_NAME = "qt4"
QT_DIR_NAME = "qt4"
@@ -9,6 +12,3 @@ QT_LIBINFIX = ""
# Qt4 uses atomic instructions not supported in thumb mode
ARM_INSTRUCTION_SET = "arm"
-
-# Qt4 could NOT be built on MIPS64 with 64 bits userspace
-COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 8b30422edf..9edf2ceb31 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -47,9 +47,13 @@ python errorreport_handler () {
taskdata['package'] = e.data.expand("${PF}")
taskdata['task'] = task
if log:
- logFile = open(log, 'r')
- taskdata['log'] = logFile.read()
- logFile.close()
+ try:
+ logFile = open(log, 'r')
+ taskdata['log'] = logFile.read().decode('utf-8')
+ logFile.close()
+ except:
+ taskdata['log'] = "Unable to read log file"
+
else:
taskdata['log'] = "No Log"
jsondata = json.loads(errorreport_getdata(e))
@@ -62,8 +66,8 @@ python errorreport_handler () {
if(len(failures) > 0):
filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
datafile = errorreport_savedata(e, jsondata, filename)
- bb.note("The errors for this build are stored in %s\nYou can send the errors to an upstream server by running:\n send-error-report %s [server]" % (datafile, datafile))
- bb.note("The contents of these logs will be posted in public if you use the above command with the default server. If you need to do so, please ensure you remove any identifying or proprietary information before sending.")
+ bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
+ bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
}
addhandler errorreport_handler
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index 6139cc7d59..dd144e49ef 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -6,7 +6,7 @@
#
EXTRAOPKGCONFIG ?= ""
-ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}"
+ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 5be5efb8a4..cca39c9b52 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -70,6 +70,12 @@ python oecore_update_bblayers() {
sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
return
+ elif current_lconf == 5 and lconf_version > 5:
+ # Null update, to avoid issues with people switching between poky and other distros
+ current_lconf = 6
+ sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+ return
+
sys.exit()
}
@@ -470,7 +476,6 @@ def sanity_check_conffiles(status, d):
if success:
bb.note("Your conf/bblayers.conf has been automatically updated.")
status.reparse = True
- break
if not status.reparse:
status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version))
@@ -519,6 +524,16 @@ def sanity_handle_abichanges(status, d):
status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
elif (abi != current_abi and current_abi == "9"):
status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
+ elif (abi != current_abi and current_abi == "10" and (abi == "8" or abi == "9")):
+ bb.note("Converting staging layout from version 8/9 to layout version 10")
+ cmd = d.expand("grep -r -l sysroot-providers/virtual_kernel ${SSTATE_MANIFESTS}")
+ ret, result = oe.utils.getstatusoutput(cmd)
+ result = result.split()
+ for f in result:
+ bb.note("Uninstalling manifest file %s" % f)
+ sstate_clean_manifest(f, d)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -804,6 +819,10 @@ def check_sanity_everybuild(status, d):
with open(checkfile, "w") as f:
f.write(tmpdir)
+ # Check vmdk and live can't be built together.
+ if 'vmdk' in d.getVar('IMAGE_FSTYPES', True) and 'live' in d.getVar('IMAGE_FSTYPES', True):
+ status.addresult("Error, IMAGE_FSTYPES vmdk and live can't be built together\n")
+
def check_sanity(sanity_data):
import subprocess
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index e90632aeef..5fd99bf27a 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -18,10 +18,10 @@
def siteinfo_data(d):
archinfo = {
"allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
- "aarch64": "endian-little bit-64 arm-common",
- "aarch64_be": "endian-big bit-64 arm-common",
- "arm": "endian-little bit-32 arm-common",
- "armeb": "endian-big bit-32 arm-common",
+ "aarch64": "endian-little bit-64 arm-common arm-64",
+ "aarch64_be": "endian-big bit-64 arm-common arm-64",
+ "arm": "endian-little bit-32 arm-common arm-32",
+ "armeb": "endian-big bit-32 arm-common arm-32",
"avr32": "endian-big bit-32 avr32-common",
"bfin": "endian-little bit-32 bfin-common",
"i386": "endian-little bit-32 ix86-common",
@@ -95,6 +95,7 @@ def siteinfo_data(d):
"x86_64-linux": "bit-64",
"x86_64-linux-musl": "x86_64-linux bit-64",
"x86_64-linux-uclibc": "bit-64",
+ "x86_64-elf": "bit-64",
"x86_64-linux-gnu": "bit-64 x86_64-linux",
"x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
"x86_64-mingw32": "bit-64",
@@ -150,9 +151,13 @@ def siteinfo_get_files(d, no_cache = False):
if no_cache: return sitefiles
# Now check for siteconfig cache files
- path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True)
- if os.path.isdir(path_siteconfig):
+ # Use the files copied to the aclocal cache generated by autotools.bbclass
+ # to avoid races
+ path_siteconfig = d.getVar('ACLOCALDIR', True)
+ if path_siteconfig and os.path.isdir(path_siteconfig):
for i in os.listdir(path_siteconfig):
+ if not i.endswith("_config"):
+ continue
filename = os.path.join(path_siteconfig, i)
sitefiles += filename + " "
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
index bccc230d8c..454c53e96f 100644
--- a/meta/classes/spdx.bbclass
+++ b/meta/classes/spdx.bbclass
@@ -15,7 +15,6 @@
# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
# in ./meta/conf/licenses.conf.
-SPDXOUTPUTDIR = "${WORKDIR}/spdx_output_dir"
SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
@@ -24,247 +23,283 @@ SPDX_S ?= "${S}"
python do_spdx () {
import os, sys
- import json
+ import json, shutil
info = {}
- info['workdir'] = (d.getVar('WORKDIR', True) or "")
- info['sourcedir'] = (d.getVar('SPDX_S', True) or "")
- info['pn'] = (d.getVar( 'PN', True ) or "")
- info['pv'] = (d.getVar( 'PV', True ) or "")
- info['src_uri'] = (d.getVar( 'SRC_URI', True ) or "")
- info['spdx_version'] = (d.getVar('SPDX_VERSION', True) or '')
- info['data_license'] = (d.getVar('DATA_LICENSE', True) or '')
-
- spdx_sstate_dir = (d.getVar('SPDXSSTATEDIR', True) or "")
- manifest_dir = (d.getVar('SPDX_MANIFEST_DIR', True) or "")
+ info['workdir'] = d.getVar('WORKDIR', True)
+ info['sourcedir'] = d.getVar('SPDX_S', True)
+ info['pn'] = d.getVar('PN', True)
+ info['pv'] = d.getVar('PV', True)
+ info['spdx_version'] = d.getVar('SPDX_VERSION', True)
+ info['data_license'] = d.getVar('DATA_LICENSE', True)
+
+ sstatedir = d.getVar('SPDXSSTATEDIR', True)
+ sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
+
+ manifest_dir = d.getVar('SPDX_MANIFEST_DIR', True)
info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
- sstatefile = os.path.join(spdx_sstate_dir,
- info['pn'] + info['pv'] + ".spdx" )
- info['spdx_temp_dir'] = (d.getVar('SPDX_TEMP_DIR', True) or "")
- info['tar_file'] = os.path.join( info['workdir'], info['pn'] + ".tar.gz" )
+ info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR', True)
+ info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
+
+ # Make sure important dirs exist
+ try:
+ bb.utils.mkdirhier(manifest_dir)
+ bb.utils.mkdirhier(sstatedir)
+ bb.utils.mkdirhier(info['spdx_temp_dir'])
+ except OSError as e:
+ bb.error("SPDX: Could not set up required directories: " + str(e))
+ return
## get everything from cache. use it to decide if
## something needs to be rerun
- cur_ver_code = get_ver_code( info['sourcedir'] )
+ cur_ver_code = get_ver_code(info['sourcedir'])
cache_cur = False
- if not os.path.exists( spdx_sstate_dir ):
- bb.utils.mkdirhier( spdx_sstate_dir )
- if not os.path.exists( info['spdx_temp_dir'] ):
- bb.utils.mkdirhier( info['spdx_temp_dir'] )
- if os.path.exists( sstatefile ):
+ if os.path.exists(sstatefile):
## cache for this package exists. read it in
- cached_spdx = get_cached_spdx( sstatefile )
+ cached_spdx = get_cached_spdx(sstatefile)
if cached_spdx['PackageVerificationCode'] == cur_ver_code:
- bb.warn(info['pn'] + "'s ver code same as cache's. do nothing")
+ bb.warn("SPDX: Verification code for " + info['pn']
+ + "is same as cache's. do nothing")
cache_cur = True
else:
- local_file_info = setup_foss_scan( info,
- True, cached_spdx['Files'] )
+ local_file_info = setup_foss_scan(info, True, cached_spdx['Files'])
else:
- local_file_info = setup_foss_scan( info, False, None )
+ local_file_info = setup_foss_scan(info, False, None)
if cache_cur:
spdx_file_info = cached_spdx['Files']
+ foss_package_info = cached_spdx['Package']
+ foss_license_info = cached_spdx['Licenses']
else:
## setup fossology command
- foss_server = (d.getVar('FOSS_SERVER', True) or "")
- foss_flags = (d.getVar('FOSS_WGET_FLAGS', True) or "")
+ foss_server = d.getVar('FOSS_SERVER', True)
+ foss_flags = d.getVar('FOSS_WGET_FLAGS', True)
+ foss_full_spdx = d.getVar('FOSS_FULL_SPDX', True) == "true" or False
foss_command = "wget %s --post-file=%s %s"\
- % (foss_flags,info['tar_file'],foss_server)
+ % (foss_flags, info['tar_file'], foss_server)
- #bb.warn(info['pn'] + json.dumps(local_file_info))
- foss_file_info = run_fossology( foss_command )
- spdx_file_info = create_spdx_doc( local_file_info, foss_file_info )
- ## write to cache
- write_cached_spdx(sstatefile,cur_ver_code,spdx_file_info)
+ foss_result = run_fossology(foss_command, foss_full_spdx)
+ if foss_result is not None:
+ (foss_package_info, foss_file_info, foss_license_info) = foss_result
+ spdx_file_info = create_spdx_doc(local_file_info, foss_file_info)
+ ## write to cache
+ write_cached_spdx(sstatefile, cur_ver_code, foss_package_info,
+ spdx_file_info, foss_license_info)
+ else:
+ bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command)
+ return
## Get document and package level information
- spdx_header_info = get_header_info(info, cur_ver_code, spdx_file_info)
+ spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info)
## CREATE MANIFEST
- create_manifest(info,spdx_header_info,spdx_file_info)
+ create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info)
## clean up the temp stuff
- remove_dir_tree( info['spdx_temp_dir'] )
+ shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True)
if os.path.exists(info['tar_file']):
- remove_file( info['tar_file'] )
+ remove_file(info['tar_file'])
}
addtask spdx after do_patch before do_configure
-def create_manifest(info,header,files):
- with open(info['outfile'], 'w') as f:
+def create_manifest(info, header, files, licenses):
+ import codecs
+ with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f:
+ # Write header
f.write(header + '\n')
+
+ # Write file data
for chksum, block in files.iteritems():
+ f.write("FileName: " + block['FileName'] + '\n')
for key, value in block.iteritems():
- f.write(key + ": " + value)
- f.write('\n')
+ if not key == 'FileName':
+ f.write(key + ": " + value + '\n')
f.write('\n')
-def get_cached_spdx( sstatefile ):
+ # Write license data
+ for id, block in licenses.iteritems():
+ f.write("LicenseID: " + id + '\n')
+ for key, value in block.iteritems():
+ f.write(key + ": " + value + '\n')
+ f.write('\n')
+
+def get_cached_spdx(sstatefile):
import json
+ import codecs
cached_spdx_info = {}
- with open( sstatefile, 'r' ) as f:
+ with codecs.open(sstatefile, mode='r', encoding='utf-8') as f:
try:
cached_spdx_info = json.load(f)
except ValueError as e:
cached_spdx_info = None
return cached_spdx_info
-def write_cached_spdx( sstatefile, ver_code, files ):
+def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info):
import json
+ import codecs
spdx_doc = {}
spdx_doc['PackageVerificationCode'] = ver_code
spdx_doc['Files'] = {}
spdx_doc['Files'] = files
- with open( sstatefile, 'w' ) as f:
+ spdx_doc['Package'] = {}
+ spdx_doc['Package'] = package_info
+ spdx_doc['Licenses'] = {}
+ spdx_doc['Licenses'] = license_info
+ with codecs.open(sstatefile, mode='w', encoding='utf-8') as f:
f.write(json.dumps(spdx_doc))
-def setup_foss_scan( info, cache, cached_files ):
+def setup_foss_scan(info, cache, cached_files):
import errno, shutil
import tarfile
file_info = {}
cache_dict = {}
- for f_dir, f in list_files( info['sourcedir'] ):
- full_path = os.path.join( f_dir, f )
+ for f_dir, f in list_files(info['sourcedir']):
+ full_path = os.path.join(f_dir, f)
abs_path = os.path.join(info['sourcedir'], full_path)
- dest_dir = os.path.join( info['spdx_temp_dir'], f_dir )
- dest_path = os.path.join( info['spdx_temp_dir'], full_path )
- try:
- stats = os.stat(abs_path)
- except OSError as e:
- bb.warn( "Stat failed" + str(e) + "\n")
- continue
-
- checksum = hash_file( abs_path )
- mtime = time.asctime(time.localtime(stats.st_mtime))
-
- ## retain cache information if it exists
- file_info[checksum] = {}
- if cache and checksum in cached_files:
- file_info[checksum] = cached_files[checksum]
- else:
- file_info[checksum]['FileName'] = full_path
-
- try:
- os.makedirs( dest_dir )
- except OSError as e:
- if e.errno == errno.EEXIST and os.path.isdir(dest_dir):
- pass
+ dest_dir = os.path.join(info['spdx_temp_dir'], f_dir)
+ dest_path = os.path.join(info['spdx_temp_dir'], full_path)
+
+ checksum = hash_file(abs_path)
+ if not checksum is None:
+ file_info[checksum] = {}
+ ## retain cache information if it exists
+ if cache and checksum in cached_files:
+ file_info[checksum] = cached_files[checksum]
+ ## have the file included in what's sent to the FOSSology server
else:
- bb.warn( "mkdir failed " + str(e) + "\n" )
- continue
-
- if(cache and checksum not in cached_files) or not cache:
- try:
- shutil.copyfile( abs_path, dest_path )
- except shutil.Error as e:
- bb.warn( str(e) + "\n" )
- except IOError as e:
- bb.warn( str(e) + "\n" )
+ file_info[checksum]['FileName'] = full_path
+ try:
+ bb.utils.mkdirhier(dest_dir)
+ shutil.copyfile(abs_path, dest_path)
+ except OSError as e:
+ bb.warn("SPDX: mkdirhier failed: " + str(e))
+ except shutil.Error as e:
+ bb.warn("SPDX: copyfile failed: " + str(e))
+ except IOError as e:
+ bb.warn("SPDX: copyfile failed: " + str(e))
+ else:
+ bb.warn("SPDX: Could not get checksum for file: " + f)
- with tarfile.open( info['tar_file'], "w:gz" ) as tar:
- tar.add( info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']) )
- tar.close()
+ with tarfile.open(info['tar_file'], "w:gz") as tar:
+ tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']))
return file_info
-
-def remove_dir_tree( dir_name ):
- import shutil
+def remove_file(file_name):
try:
- shutil.rmtree( dir_name )
- except:
- pass
-
-def remove_file( file_name ):
- try:
- os.remove( file_name )
+ os.remove(file_name)
except OSError as e:
pass
-def list_files( dir ):
- for root, subFolders, files in os.walk( dir ):
+def list_files(dir):
+ for root, subFolders, files in os.walk(dir):
for f in files:
- rel_root = os.path.relpath( root, dir )
+ rel_root = os.path.relpath(root, dir)
yield rel_root, f
return
-def hash_file( file_name ):
+def hash_file(file_name):
try:
- f = open( file_name, 'rb' )
- data_string = f.read()
+ with open(file_name, 'rb') as f:
+ data_string = f.read()
+ sha1 = hash_string(data_string)
+ return sha1
except:
- return None
- finally:
- f.close()
- sha1 = hash_string( data_string )
- return sha1
+ return None
-def hash_string( data ):
+def hash_string(data):
import hashlib
sha1 = hashlib.sha1()
- sha1.update( data )
+ sha1.update(data)
return sha1.hexdigest()
-def run_fossology( foss_command ):
+def run_fossology(foss_command, full_spdx):
import string, re
import subprocess
p = subprocess.Popen(foss_command.split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
foss_output, foss_error = p.communicate()
-
- records = []
- records = re.findall('FileName:.*?</text>', foss_output, re.S)
+ if p.returncode != 0:
+ return None
+
+ foss_output = unicode(foss_output, "utf-8")
+ foss_output = string.replace(foss_output, '\r', '')
+
+ # Package info
+ package_info = {}
+ if full_spdx:
+ # All mandatory, only one occurance
+ package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
+ package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
+ package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
+ # These may be more than one
+ package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output)
+ else:
+ DEFAULT = "NOASSERTION"
+ package_info['PackageCopyrightText'] = "<text>" + DEFAULT + "</text>"
+ package_info['PackageLicenseDeclared'] = DEFAULT
+ package_info['PackageLicenseConcluded'] = DEFAULT
+ package_info['PackageLicenseInfoFromFiles'] = []
+ # File info
file_info = {}
+ records = []
+ # FileName is also in PackageFileName, so we match on FileType as well.
+ records = re.findall('FileName:.*?FileType:.*?</text>', foss_output, re.S)
for rec in records:
- rec = string.replace( rec, '\r', '' )
- chksum = re.findall( 'FileChecksum: SHA1: (.*)\n', rec)[0]
+ chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0]
file_info[chksum] = {}
- file_info[chksum]['FileCopyrightText'] = re.findall( 'FileCopyrightText: '
+ file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: '
+ '(.*?</text>)', rec, re.S )[0]
- fields = ['FileType','LicenseConcluded',
- 'LicenseInfoInFile','FileName']
+ fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile']
for field in fields:
file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
- return file_info
+ # Licenses
+ license_info = {}
+ licenses = []
+ licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S)
+ for lic in licenses:
+ license_id = re.findall('LicenseID: (.*)\n', lic)[0]
+ license_info[license_id] = {}
+ license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?</text>)', lic, re.S)[0]
+ license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0]
+
+ return (package_info, file_info, license_info)
-def create_spdx_doc( file_info, scanned_files ):
+def create_spdx_doc(file_info, scanned_files):
import json
## push foss changes back into cache
for chksum, lic_info in scanned_files.iteritems():
if chksum in file_info:
- file_info[chksum]['FileName'] = file_info[chksum]['FileName']
file_info[chksum]['FileType'] = lic_info['FileType']
file_info[chksum]['FileChecksum: SHA1'] = chksum
file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
else:
- bb.warn(lic_info['FileName'] + " : " + chksum
+ bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum
+ " : is not in the local file info: "
- + json.dumps(lic_info,indent=1))
+ + json.dumps(lic_info, indent=1))
return file_info
-def get_ver_code( dirname ):
+def get_ver_code(dirname):
chksums = []
- for f_dir, f in list_files( dirname ):
- try:
- stats = os.stat(os.path.join(dirname,f_dir,f))
- except OSError as e:
- bb.warn( "Stat failed" + str(e) + "\n")
- continue
- chksums.append(hash_file(os.path.join(dirname,f_dir,f)))
- ver_code_string = ''.join( chksums ).lower()
- ver_code = hash_string( ver_code_string )
+ for f_dir, f in list_files(dirname):
+ hash = hash_file(os.path.join(dirname, f_dir, f))
+ if not hash is None:
+ chksums.append(hash)
+ else:
+ bb.warn("SPDX: Could not hash file: " + path)
+ ver_code_string = ''.join(chksums).lower()
+ ver_code = hash_string(ver_code_string)
return ver_code
-def get_header_info( info, spdx_verification_code, spdx_files ):
+def get_header_info(info, spdx_verification_code, package_info):
"""
Put together the header SPDX information.
Eventually this needs to become a lot less
@@ -275,14 +310,12 @@ def get_header_info( info, spdx_verification_code, spdx_files ):
head = []
DEFAULT = "NOASSERTION"
- #spdx_verification_code = get_ver_code( info['sourcedir'] )
- package_checksum = ''
- if os.path.exists(info['tar_file']):
- package_checksum = hash_file( info['tar_file'] )
- else:
+ package_checksum = hash_file(info['tar_file'])
+ if package_checksum is None:
package_checksum = DEFAULT
## document level information
+ head.append("## SPDX Document Information")
head.append("SPDXVersion: " + info['spdx_version'])
head.append("DataLicense: " + info['data_license'])
head.append("DocumentComment: <text>SPDX for "
@@ -290,9 +323,11 @@ def get_header_info( info, spdx_verification_code, spdx_files ):
head.append("")
## Creator information
- now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+ ## Note that this does not give time in UTC.
+ now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
head.append("## Creation Information")
- head.append("Creator: fossology-spdx")
+ ## Tools are supposed to have a version, but FOSSology+SPDX provides none.
+ head.append("Creator: Tool: FOSSology+SPDX")
head.append("Created: " + now)
head.append("CreatorComment: <text>UNO</text>")
head.append("")
@@ -301,21 +336,26 @@ def get_header_info( info, spdx_verification_code, spdx_files ):
head.append("## Package Information")
head.append("PackageName: " + info['pn'])
head.append("PackageVersion: " + info['pv'])
- head.append("PackageDownloadLocation: " + DEFAULT)
- head.append("PackageSummary: <text></text>")
head.append("PackageFileName: " + os.path.basename(info['tar_file']))
head.append("PackageSupplier: Person:" + DEFAULT)
+ head.append("PackageDownloadLocation: " + DEFAULT)
+ head.append("PackageSummary: <text></text>")
head.append("PackageOriginator: Person:" + DEFAULT)
head.append("PackageChecksum: SHA1: " + package_checksum)
head.append("PackageVerificationCode: " + spdx_verification_code)
head.append("PackageDescription: <text>" + info['pn']
+ " version " + info['pv'] + "</text>")
head.append("")
- head.append("PackageCopyrightText: <text>" + DEFAULT + "</text>")
+ head.append("PackageCopyrightText: "
+ + package_info['PackageCopyrightText'])
head.append("")
- head.append("PackageLicenseDeclared: " + DEFAULT)
- head.append("PackageLicenseConcluded: " + DEFAULT)
- head.append("PackageLicenseInfoFromFiles: " + DEFAULT)
+ head.append("PackageLicenseDeclared: "
+ + package_info['PackageLicenseDeclared'])
+ head.append("PackageLicenseConcluded: "
+ + package_info['PackageLicenseConcluded'])
+
+ for licref in package_info['PackageLicenseInfoFromFiles']:
+ head.append("PackageLicenseInfoFromFiles: " + licref)
head.append("")
## header for file level
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index ace6bdb57a..2f0632af89 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -25,6 +25,8 @@ SSTATE_EXTRAPATH[vardepvalue] = ""
SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
# Avoid docbook/sgml catalog warnings for now
SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+# Archive the sources for many architectures in one deploy folder
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
SSTATE_SCAN_FILES ?= "*.la *-config *_config"
SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index a7dd0aa854..eeca9dedd2 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -149,14 +149,26 @@ python toaster_image_dumpdata() {
image_name = d.getVar('IMAGE_NAME', True);
image_info_data = {}
+ artifact_info_data = {}
+ # collect all artifacts
for dirpath, dirnames, filenames in os.walk(deploy_dir_image):
for fn in filenames:
- if fn.startswith(image_name):
- image_output = os.path.join(dirpath, fn)
- image_info_data[image_output] = os.stat(image_output).st_size
+ try:
+ if fn.startswith(image_name):
+ image_output = os.path.join(dirpath, fn)
+ image_info_data[image_output] = os.stat(image_output).st_size
+ else:
+ import stat
+ artifact_path = os.path.join(dirpath, fn)
+ filestat = os.stat(artifact_path)
+ if not os.path.islink(artifact_path):
+ artifact_info_data[artifact_path] = filestat.st_size
+ except OSError as e:
+ bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d)
+ bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize",artifact_info_data), d)
}
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index d5b9675b7f..670e93b1de 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -7,6 +7,9 @@ REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
# Create environment setup script
+ sdkpathnative=${7:-${SDKPATHNATIVE}}
+ prefix=${6:-${prefix_nativesdk}}
+ bindir=${5:-${bindir_nativesdk}}
libdir=${4:-${libdir}}
sysroot=${3:-${SDKTARGETSYSROOT}}
multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
@@ -16,16 +19,17 @@ toolchain_create_sdk_env_script () {
echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
EXTRAPATH=""
for i in ${CANADIANEXTRAOS}; do
- EXTRAPATH="$EXTRAPATH:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_ARCH}${TARGET_VENDOR}-$i"
+ EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}'$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo 'export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/${TARGET_SYS}'$EXTRAPATH':$CCACHE_PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
- echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
+ echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
- echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
- echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script
+ echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
+ echo "export PYTHONHOME=$sdkpathnative$prefix" >> $script
toolchain_shared_env_script
}
@@ -37,6 +41,7 @@ toolchain_create_tree_env_script () {
rm -f $script
touch $script
echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
+ echo 'export CCACHE_PATH=${STAGING_DIR_NATIVE}/usr/bin:${CCACHE_PATH}' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
@@ -93,7 +98,7 @@ EOF
#we get the cached site config in the runtime
TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
-TOOLCHAIN_NEED_CONFIGSITE_CACHE = "${TCLIBC} ncurses"
+TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "${TCLIBC} ncurses"
#This function create a site config file
toolchain_create_sdk_siteconfig () {
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index 8ac1b71bc2..cb061af348 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -31,31 +31,19 @@ python () {
return
ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
- if len(ubootconfig) > 1:
- raise bb.parse.SkipPackage('You can only have a single default for UBOOT_CONFIG.')
+ if len(ubootconfig) > 0:
+ for config in ubootconfig:
+ for f, v in ubootconfigflags.items():
+ if config == f:
+ items = v.split(',')
+ if items[0] and len(items) > 2:
+ raise bb.parse.SkipPackage('Only config,images can be specified!')
+ d.appendVar('UBOOT_MACHINE', ' ' + items[0])
+ # IMAGE_FSTYPES appending
+ if len(items) > 1 and items[1]:
+ bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
+ d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
+ break
elif len(ubootconfig) == 0:
- raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
- ubootconfig = ubootconfig[0]
-
- for f, v in ubootconfigflags.items():
- if f == 'defaultval':
- continue
-
- items = v.split(',')
- if items[0] and len(items) > 2:
- raise bb.parse.SkipPackage('Only config,images can be specified!')
-
- if ubootconfig == f:
- bb.debug(1, "Setting UBOOT_MACHINE to %s." % items[0])
- d.setVar('UBOOT_MACHINE', items[0])
-
- # IMAGE_FSTYPES appending
- if len(items) > 1 and items[1]:
- bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
- d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
-
- # Go out as we found a match!
- break
- else:
- raise bb.parse.SkipPackage("UBOOT_CONFIG %s is not supported" % ubootconfig)
+ raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index 9f2c250d03..e6d78703a7 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -229,16 +229,16 @@ python populate_packages_updatealternatives () {
#bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX') + provider)
- bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg)
+ bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
postinst += alt_setup_links
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or '#!/bin/sh\n'
- postrm += alt_remove_links
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ prerm = d.getVar('pkg_prerm_%s' % pkg, True) or '#!/bin/sh\n'
+ prerm += alt_remove_links
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
}
python package_do_filedeps_append () {
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index bc1aa7dad6..a9c0323f95 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -48,9 +48,9 @@ fi
updatercd_postrm() {
if type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
- OPT="-r $D"
+ OPT="-f -r $D"
else
- OPT=""
+ OPT="-f"
fi
update-rc.d $OPT ${INITSCRIPT_NAME} remove
fi
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index 0b9a843b24..e443f845f7 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -24,6 +24,16 @@ if test "x$D" != "x"; then
# Installing into a sysroot
SYSROOT="$D"
OPT="--root $D"
+
+ # Make sure login.defs is there, this is to make debian package backend work
+ # correctly while doing rootfs.
+ # The problem here is that if /etc/login.defs is treated as a config file for
+ # shadow package, then while performing preinsts for packages that depend on
+ # shadow, there might only be /etc/login.def.dpkg-new there in root filesystem.
+ if [ ! -e $D${sysconfdir}/login.defs -a -e $D${sysconfdir}/login.defs.dpkg-new ]; then
+ cp $D${sysconfdir}/login.defs.dpkg-new $D${sysconfdir}/login.defs
+ fi
+
# user/group lookups should match useradd/groupadd --root
export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}"
fi
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
index c47b1eb810..4398a25154 100644
--- a/meta/classes/useradd_base.bbclass
+++ b/meta/classes/useradd_base.bbclass
@@ -29,7 +29,7 @@ perform_groupadd () {
fi
count=`expr $count + 1`
if test $count = $retries; then
- bbfatal "Tried running groupadd command $retries times without scucess, giving up"
+ bbfatal "Tried running groupadd command $retries times without success, giving up"
fi
sleep $count
done
@@ -57,7 +57,7 @@ perform_useradd () {
fi
count=`expr $count + 1`
if test $count = $retries; then
- bbfatal "Tried running useradd command $retries times without scucess, giving up"
+ bbfatal "Tried running useradd command $retries times without success, giving up"
fi
sleep $count
done
@@ -99,7 +99,7 @@ perform_groupmems () {
rm -f $rootdir${sysconfdir}/gshadow
rm -f $rootdir${sysconfdir}/gshadow-
fi
- bbfatal "Tried running groupmems command $retries times without scucess, giving up"
+ bbfatal "Tried running groupmems command $retries times without success, giving up"
fi
sleep $count
done
@@ -131,7 +131,7 @@ perform_groupdel () {
fi
count=`expr $count + 1`
if test $count = $retries; then
- bbfatal "Tried running groupdel command $retries times without scucess, giving up"
+ bbfatal "Tried running groupdel command $retries times without success, giving up"
fi
sleep $count
done
@@ -159,7 +159,7 @@ perform_userdel () {
fi
count=`expr $count + 1`
if test $count = $retries; then
- bbfatal "Tried running userdel command $retries times without scucess, giving up"
+ bbfatal "Tried running userdel command $retries times without success, giving up"
fi
sleep $count
done
@@ -189,7 +189,7 @@ perform_groupmod () {
fi
count=`expr $count + 1`
if test $count = $retries; then
- bbfatal "Tried running groupmod command $retries times without scucess, giving up"
+ bbfatal "Tried running groupmod command $retries times without success, giving up"
fi
sleep $count
done
@@ -219,7 +219,7 @@ perform_usermod () {
fi
count=`expr $count + 1`
if test $count = $retries; then
- bbfatal "Tried running usermod command $retries times without scucess, giving up"
+ bbfatal "Tried running usermod command $retries times without success, giving up"
fi
sleep $count
done
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
index 0b7803b251..9ff664ac41 100644
--- a/meta/classes/vala.bbclass
+++ b/meta/classes/vala.bbclass
@@ -1,6 +1,3 @@
-# Vala has problems with multiple concurrent invocations
-PARALLEL_MAKE = ""
-
# Everyone needs vala-native and targets need vala, too,
# because that is where target builds look for .vapi files.
#