summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass7
-rw-r--r--meta/classes/archiver.bbclass9
-rw-r--r--meta/classes/autotools.bbclass69
-rw-r--r--meta/classes/base.bbclass168
-rw-r--r--meta/classes/binconfig-disabled.bbclass16
-rw-r--r--meta/classes/bluetooth.bbclass14
-rw-r--r--meta/classes/boot-directdisk.bbclass24
-rw-r--r--meta/classes/bootimg.bbclass42
-rw-r--r--meta/classes/buildhistory.bbclass58
-rw-r--r--meta/classes/buildstats.bbclass4
-rw-r--r--meta/classes/chrpath.bbclass3
-rw-r--r--meta/classes/cmake.bbclass18
-rw-r--r--meta/classes/cml1.bbclass7
-rw-r--r--meta/classes/compress_doc.bbclass260
-rw-r--r--meta/classes/core-image.bbclass8
-rw-r--r--meta/classes/cpan.bbclass4
-rw-r--r--meta/classes/cpan_build.bbclass5
-rw-r--r--meta/classes/cross-canadian.bbclass16
-rw-r--r--meta/classes/crosssdk.bbclass3
-rw-r--r--meta/classes/debian.bbclass16
-rw-r--r--meta/classes/devshell.bbclass4
-rw-r--r--meta/classes/distrodata.bbclass492
-rw-r--r--meta/classes/distutils.bbclass8
-rw-r--r--meta/classes/distutils3.bbclass4
-rw-r--r--meta/classes/externalsrc.bbclass41
-rw-r--r--meta/classes/fontcache.bbclass9
-rw-r--r--meta/classes/gnomebase.bbclass14
-rw-r--r--meta/classes/grub-efi.bbclass6
-rw-r--r--meta/classes/gtk-doc.bbclass2
-rw-r--r--meta/classes/gummiboot.bbclass4
-rw-r--r--meta/classes/icecc.bbclass15
-rw-r--r--meta/classes/image-buildinfo.bbclass69
-rw-r--r--meta/classes/image-live.bbclass2
-rw-r--r--meta/classes/image-mklibs.bbclass2
-rw-r--r--meta/classes/image-swab.bbclass2
-rw-r--r--meta/classes/image-vmdk.bbclass2
-rw-r--r--meta/classes/image.bbclass115
-rw-r--r--meta/classes/image_types.bbclass43
-rw-r--r--meta/classes/insane.bbclass258
-rw-r--r--meta/classes/kernel-arch.bbclass1
-rw-r--r--meta/classes/kernel-module-split.bbclass16
-rw-r--r--meta/classes/kernel-yocto.bbclass256
-rw-r--r--meta/classes/kernel.bbclass182
-rw-r--r--meta/classes/kernelsrc.bbclass10
-rw-r--r--meta/classes/libc-common.bbclass19
-rw-r--r--meta/classes/libc-package.bbclass5
-rw-r--r--meta/classes/license.bbclass111
-rw-r--r--meta/classes/linux-kernel-base.bbclass13
-rw-r--r--meta/classes/meta.bbclass2
-rw-r--r--meta/classes/metadata_scm.bbclass2
-rw-r--r--meta/classes/module-base.bbclass12
-rw-r--r--meta/classes/module.bbclass6
-rw-r--r--meta/classes/multilib.bbclass6
-rw-r--r--meta/classes/multilib_global.bbclass117
-rw-r--r--meta/classes/native.bbclass27
-rw-r--r--meta/classes/nativesdk.bbclass4
-rw-r--r--meta/classes/oelint.bbclass251
-rw-r--r--meta/classes/own-mirrors.bbclass1
-rw-r--r--meta/classes/package.bbclass402
-rw-r--r--meta/classes/package_deb.bbclass19
-rw-r--r--meta/classes/package_ipk.bbclass31
-rw-r--r--meta/classes/package_rpm.bbclass57
-rw-r--r--meta/classes/packagegroup.bbclass7
-rw-r--r--meta/classes/patch.bbclass1
-rw-r--r--meta/classes/pixbufcache.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass260
-rw-r--r--meta/classes/populate_sdk_deb.bbclass13
-rw-r--r--meta/classes/populate_sdk_ext.bbclass217
-rw-r--r--meta/classes/populate_sdk_ipk.bbclass3
-rw-r--r--meta/classes/populate_sdk_rpm.bbclass16
-rw-r--r--meta/classes/prserv.bbclass31
-rw-r--r--meta/classes/ptest-gnome.bbclass8
-rw-r--r--meta/classes/pythonnative.bbclass2
-rw-r--r--meta/classes/qemu.bbclass28
-rw-r--r--meta/classes/qmake_base.bbclass6
-rw-r--r--meta/classes/qt4e.bbclass3
-rw-r--r--meta/classes/qt4x11.bbclass8
-rw-r--r--meta/classes/report-error.bbclass17
-rw-r--r--meta/classes/rm_work.bbclass21
-rw-r--r--meta/classes/rootfs_deb.bbclass15
-rw-r--r--meta/classes/rootfs_ipk.bbclass5
-rw-r--r--meta/classes/rootfs_rpm.bbclass21
-rw-r--r--meta/classes/sanity.bbclass120
-rw-r--r--meta/classes/siteconfig.bbclass8
-rw-r--r--meta/classes/siteinfo.bbclass16
-rw-r--r--meta/classes/spdx.bbclass338
-rw-r--r--meta/classes/sstate.bbclass112
-rw-r--r--meta/classes/staging.bbclass1
-rw-r--r--meta/classes/syslinux.bbclass4
-rw-r--r--meta/classes/systemd.bbclass7
-rw-r--r--meta/classes/testimage.bbclass109
-rw-r--r--meta/classes/toaster.bbclass56
-rw-r--r--meta/classes/toolchain-scripts.bbclass37
-rw-r--r--meta/classes/uboot-config.bbclass40
-rw-r--r--meta/classes/uninative.bbclass44
-rw-r--r--meta/classes/update-rc.d.bbclass7
-rw-r--r--meta/classes/useradd-staticids.bbclass8
-rw-r--r--meta/classes/useradd.bbclass14
-rw-r--r--meta/classes/utils.bbclass6
-rw-r--r--meta/classes/vala.bbclass3
100 files changed, 2983 insertions, 2029 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
index d41dd4bee8..4bc99272c4 100644
--- a/meta/classes/allarch.bbclass
+++ b/meta/classes/allarch.bbclass
@@ -28,9 +28,16 @@ python () {
d.setVar("SDK_ARCH", "none")
d.setVar("SDK_CC_ARCH", "none")
+ # Avoid this being unnecessarily different due to nuances of
+ # the target machine that aren't important for "all" arch
+ # packages.
+ d.setVar("LDFLAGS", "")
+
# No need to do shared library processing or debug symbol handling
d.setVar("EXCLUDE_FROM_SHLIBS", "1")
d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
d.setVar("INHIBIT_PACKAGE_STRIP", "1")
+ elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
+ bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True))
}
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index efd413bdc4..b598aa3ad6 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -136,7 +136,7 @@ python do_ar_original() {
bb.note('Archiving the original source...')
fetch = bb.fetch2.Fetch([], d)
for url in fetch.urls:
- local = fetch.localpath(url)
+ local = fetch.localpath(url).rstrip("/");
if os.path.isfile(local):
shutil.copy(local, ar_outdir)
elif os.path.isdir(local):
@@ -146,7 +146,12 @@ python do_ar_original() {
fetch.unpack(tmpdir, (url,))
os.chdir(tmpdir)
- tarname = os.path.join(ar_outdir, basename + '.tar.gz')
+ # We eliminate any AUTOINC+ in the revision.
+ try:
+ src_rev = bb.fetch2.get_srcrev(d).replace('AUTOINC+','')
+ except:
+ src_rev = 'NOREV'
+ tarname = os.path.join(ar_outdir, basename + '.' + src_rev + '.tar.gz')
tar = tarfile.open(tarname, 'w:gz')
tar.add('.')
tar.close()
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 0dc1e6b8b7..046e83dc9b 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -27,7 +27,7 @@ inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+export CONFIG_SITE = "${@siteinfo_get_files(d, False)}"
acpaths = "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
@@ -49,19 +49,12 @@ export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
export LD_FOR_BUILD = "${BUILD_LD}"
export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
-def autotools_set_crosscompiling(d):
- if not bb.data.inherits_class('native', d):
- return " cross_compiling=yes"
- return ""
-
def append_libtool_sysroot(d):
# Only supply libtool sysroot option for non-native packages
if not bb.data.inherits_class('native', d):
return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
return ""
-# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
-
CONFIGUREOPTS = " --build=${BUILD_SYS} \
--host=${HOST_SYS} \
--target=${TARGET_SYS} \
@@ -116,7 +109,12 @@ autotools_preconfigure() {
else
# At least remove the .la files since automake won't automatically
# regenerate them even if CFLAGS/LDFLAGS are different
- cd ${S}; find ${S} -name \*.la -delete
+ cd ${S}
+ if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
+ echo "Running \"${MAKE} clean\" in ${S}"
+ ${MAKE} clean
+ fi
+ find ${S} -name \*.la -delete
fi
fi
fi
@@ -142,23 +140,60 @@ python autotools_copy_aclocals () {
return
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ #bb.warn(str(taskdepdata))
pn = d.getVar("PN", True)
aclocaldir = d.getVar("ACLOCALDIR", True)
oe.path.remove(aclocaldir)
bb.utils.mkdirhier(aclocaldir)
+ start = None
configuredeps = []
+
for dep in taskdepdata:
data = taskdepdata[dep]
- if data[1] == "do_configure" and data[0] != pn:
- configuredeps.append(data[0])
+ if data[1] == "do_configure" and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to find configure tasks which are either from <target> -> <target>
+ # or <native> -> <native> but not <target> -> <native> unless they're direct
+ # dependencies. This mirrors what would get restored from sstate.
+ done = [dep]
+ next = [dep]
+ while next:
+ new = []
+ for dep in next:
+ data = taskdepdata[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ done.append(datadep)
+ if (not data[0].endswith("-native")) and taskdepdata[datadep][0].endswith("-native") and dep != start:
+ continue
+ new.append(datadep)
+ if taskdepdata[datadep][1] == "do_configure":
+ configuredeps.append(taskdepdata[datadep][0])
+ next = new
+
+ #configuredeps2 = []
+ #for dep in taskdepdata:
+ # data = taskdepdata[dep]
+ # if data[1] == "do_configure" and data[0] != pn:
+ # configuredeps2.append(data[0])
+ #configuredeps.sort()
+ #configuredeps2.sort()
+ #bb.warn(str(configuredeps))
+ #bb.warn(str(configuredeps2))
cp = []
+ siteconf = []
for c in configuredeps:
if c.endswith("-native"):
manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
elif c.startswith("nativesdk-"):
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}-%s.populate_sysroot" % c)
- elif "-cross-" in c or "-crosssdk-" in c:
+ manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
+ elif "-cross-" in c or "-crosssdk" in c:
continue
else:
manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
@@ -167,6 +202,8 @@ python autotools_copy_aclocals () {
for l in f:
if "/aclocal/" in l and l.strip().endswith(".m4"):
cp.append(l.strip())
+ elif "config_site.d/" in l:
+ cp.append(l.strip())
except:
bb.warn("%s not found" % manifest)
@@ -174,8 +211,10 @@ python autotools_copy_aclocals () {
t = os.path.join(aclocaldir, os.path.basename(c))
if not os.path.exists(t):
os.symlink(c, t)
+
+ d.setVar("CONFIG_SITE", siteinfo_get_files(d, False))
}
-autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH BB_TASKDEPDATA"
+autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
autotools_do_configure() {
# WARNING: gross hack follows:
@@ -202,7 +241,7 @@ autotools_do_configure() {
else
acpaths="${acpaths}"
fi
- AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
+ AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
automake --version
echo "AUTOV is $AUTOV"
if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index c0d2c8ec88..2c2d0192f1 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -94,9 +94,27 @@ def extra_path_elements(d):
PATH_prepend = "${@extra_path_elements(d)}"
+def get_lic_checksum_file_list(d):
+ filelist = []
+ lic_files = d.getVar("LIC_FILES_CHKSUM", True) or ''
+
+ urls = lic_files.split()
+ for url in urls:
+ # We only care about items that are absolute paths since
+ # any others should be covered by SRC_URI.
+ try:
+ path = bb.fetch.decodeurl(url)[2]
+ if path[0] == '/':
+ filelist.append(path + ":" + str(os.path.exists(path)))
+ except bb.fetch.MalformedUrl:
+ raise bb.build.FuncFailed(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+ return " ".join(filelist)
+
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
+do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
+do_fetch[vardeps] += "SRCREV"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
@@ -112,7 +130,6 @@ python base_do_fetch() {
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
-do_unpack[cleandirs] = "${S}/patches"
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
if len(src_uri) == 0:
@@ -120,11 +137,21 @@ python base_do_unpack() {
rootdir = d.getVar('WORKDIR', True)
+ # Ensure that we cleanup ${S}/patches
+ # TODO: Investigate if we can remove
+ # the entire ${S} in this case.
+ s_dir = d.getVar('S', True)
+ p_dir = os.path.join(s_dir, 'patches')
+ bb.utils.remove(p_dir, True)
+
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(rootdir)
except bb.fetch2.BBFetchException as e:
raise bb.build.FuncFailed(e)
+
+ if not os.path.exists(s_dir):
+ bb.warn("%s ('S') don't exist, you must set 'S' to a proper value" % s_dir)
}
def pkgarch_mapping(d):
@@ -133,113 +160,6 @@ def pkgarch_mapping(d):
if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
d.setVar("TUNE_PKGARCH", "armv7a")
-def preferred_ml_updates(d):
- # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
- # we need to mirror these variables in the multilib case;
- multilibs = d.getVar('MULTILIBS', True) or ""
- if not multilibs:
- return
-
- prefixes = []
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib':
- prefixes.append(eext[1])
-
- versions = []
- providers = []
- for v in d.keys():
- if v.startswith("PREFERRED_VERSION_"):
- versions.append(v)
- if v.startswith("PREFERRED_PROVIDER_"):
- providers.append(v)
-
- for v in versions:
- val = d.getVar(v, False)
- pkg = v.replace("PREFERRED_VERSION_", "")
- if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
- continue
- if '-cross-' in pkg and '${' in pkg:
- for p in prefixes:
- localdata = bb.data.createCopy(d)
- override = ":virtclass-multilib-" + p
- localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
- newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
- if newname != v:
- newval = localdata.expand(val)
- d.setVar(newname, newval)
- # Avoid future variable key expansion
- vexp = d.expand(v)
- if v != vexp and d.getVar(v, False):
- d.renameVar(v, vexp)
- continue
- for p in prefixes:
- newname = "PREFERRED_VERSION_" + p + "-" + pkg
- if not d.getVar(newname, False):
- d.setVar(newname, val)
-
- for prov in providers:
- val = d.getVar(prov, False)
- pkg = prov.replace("PREFERRED_PROVIDER_", "")
- if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
- continue
- if 'cross-canadian' in pkg:
- for p in prefixes:
- localdata = bb.data.createCopy(d)
- override = ":virtclass-multilib-" + p
- localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
- newname = localdata.expand(prov)
- if newname != prov:
- newval = localdata.expand(val)
- d.setVar(newname, newval)
- # Avoid future variable key expansion
- provexp = d.expand(prov)
- if prov != provexp and d.getVar(prov, False):
- d.renameVar(prov, provexp)
- continue
- virt = ""
- if pkg.startswith("virtual/"):
- pkg = pkg.replace("virtual/", "")
- virt = "virtual/"
- for p in prefixes:
- if pkg != "kernel":
- newval = p + "-" + val
-
- # implement variable keys
- localdata = bb.data.createCopy(d)
- override = ":virtclass-multilib-" + p
- localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
- newname = localdata.expand(prov)
- if newname != prov and not d.getVar(newname, False):
- d.setVar(newname, localdata.expand(newval))
-
- # implement alternative multilib name
- newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
- if not d.getVar(newname, False):
- d.setVar(newname, newval)
- # Avoid future variable key expansion
- provexp = d.expand(prov)
- if prov != provexp and d.getVar(prov, False):
- d.renameVar(prov, provexp)
-
-
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
- extramp = []
- for p in mp:
- if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
- continue
- virt = ""
- if p.startswith("virtual/"):
- p = p.replace("virtual/", "")
- virt = "virtual/"
- for pref in prefixes:
- extramp.append(virt + pref + "-" + p)
- d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
-
-
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS", True) or "").split()
layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
@@ -290,7 +210,6 @@ python base_eventhandler() {
e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
e.data.setVar('BB_VERSION', bb.__version__)
pkgarch_mapping(e.data)
- preferred_ml_updates(e.data)
oe.utils.features_backfill("DISTRO_FEATURES", e.data)
oe.utils.features_backfill("MACHINE_FEATURES", e.data)
@@ -323,15 +242,29 @@ python base_eventhandler() {
}
+CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
+CLEANBROKEN = "0"
+
addtask configure after do_patch
-do_configure[dirs] = "${S} ${B}"
+do_configure[dirs] = "${B}"
do_configure[deptask] = "do_populate_sysroot"
base_do_configure() {
- :
+ if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
+ if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
+ cd ${B}
+ if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
+ oe_runmake clean
+ fi
+ find ${B} -name \*.la -delete
+ fi
+ fi
+ if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
+ fi
}
addtask compile after do_configure
-do_compile[dirs] = "${S} ${B}"
+do_compile[dirs] = "${B}"
base_do_compile() {
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
oe_runmake || die "make failed"
@@ -341,7 +274,7 @@ base_do_compile() {
}
addtask install after do_compile
-do_install[dirs] = "${D} ${S} ${B}"
+do_install[dirs] = "${D} ${B}"
# Remove and re-create ${D} so that is it guaranteed to be empty
do_install[cleandirs] = "${D}"
@@ -354,8 +287,6 @@ base_do_package() {
}
addtask build after do_populate_sysroot
-do_build = ""
-do_build[func] = "1"
do_build[noexec] = "1"
do_build[recrdeptask] += "do_deploy"
do_build () {
@@ -442,8 +373,6 @@ python () {
extrardeps = []
extraconf = []
for flag, flagval in sorted(pkgconfigflags.items()):
- if flag == "defaultval":
- continue
items = flagval.split(",")
num = len(items)
if num > 4:
@@ -471,7 +400,7 @@ python () {
# obsolete. Return a warning to the user.
princ = d.getVar('PRINC', True)
if princ and princ != "0":
- bb.warn("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
+ bb.error("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
pr = d.getVar('PR', True)
pr_prefix = re.search("\D+",pr)
prval = re.search("\d+",pr)
@@ -487,6 +416,7 @@ python () {
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
if bb.data.inherits_class('license', d):
+ check_license_format(d)
unmatched_license_flag = check_license_flags(d)
if unmatched_license_flag:
bb.debug(1, "Skipping %s because it has a restricted license not"
@@ -540,6 +470,8 @@ python () {
check_license = False
if check_license and bad_licenses:
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+
whitelist = []
for lic in bad_licenses:
for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]:
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
index 1308358fec..595cd096f5 100644
--- a/meta/classes/binconfig-disabled.bbclass
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -2,11 +2,27 @@
# Class to disable binconfig files instead of installing them
#
+# The list of scripts which should be disabled.
+BINCONFIG ?= ""
+
FILES_${PN}-dev += "${bindir}/*-config"
do_install_append () {
for x in ${BINCONFIG}; do
echo "#!/bin/sh" > ${D}$x
+ # Make the disabled script emit invalid parameters for those configure
+ # scripts which call it without checking the return code.
+ echo "echo '--should-not-have-used-$x'" >> ${D}$x
echo "exit 1" >> ${D}$x
done
}
+
+SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
+
+binconfig_disabled_sysroot_preprocess () {
+ for x in ${BINCONFIG}; do
+ configname=`basename $x`
+ install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ done
+}
diff --git a/meta/classes/bluetooth.bbclass b/meta/classes/bluetooth.bbclass
new file mode 100644
index 0000000000..f88b4ae5b8
--- /dev/null
+++ b/meta/classes/bluetooth.bbclass
@@ -0,0 +1,14 @@
+# Avoid code duplication in bluetooth-dependent recipes.
+
+# Define a variable that expands to the recipe (package) providing core
+# bluetooth support on the platform:
+# "" if bluetooth is not in DISTRO_FEATURES
+# else "bluez5" if bluez5 is in DISTRO_FEATURES
+# else "bluez4"
+
+# Use this with:
+# inherit bluetooth
+# PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}
+# PACKAGECONFIG[bluez4] = "--enable-bluez4,--disable-bluez4,bluez4"
+
+BLUEZ ?= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', bb.utils.contains('DISTRO_FEATURES', 'bluez5', 'bluez5', 'bluez4', d), '', d)}"
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
index 0da9932f4f..44f738b02e 100644
--- a/meta/classes/boot-directdisk.bbclass
+++ b/meta/classes/boot-directdisk.bbclass
@@ -20,6 +20,7 @@
# ${ROOTFS} - the rootfs image to incorporate
do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
parted-native:do_populate_sysroot \
@@ -69,12 +70,21 @@ boot_direct_populate() {
install -d $dest
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz
-
- if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
- install -m 0644 ${INITRD} $dest/initrd
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/bzImage $dest/vmlinuz
+
+ # initrd is made of concatenation of multiple filesystem images
+ if [ -n "${INITRD}" ]; then
+ rm -f $dest/initrd
+ for fs in ${INITRD}
+ do
+ if [ -s "${fs}" ]; then
+ cat ${fs} >> $dest/initrd
+ else
+ bbfatal "${fs} is invalid. initrd image creation failed."
+ fi
+ done
+ chmod 0644 $dest/initrd
fi
-
}
build_boot_dd() {
@@ -93,9 +103,9 @@ build_boot_dd() {
if [ "${IS_VMDK}" = "true" ]; then
if [ "x${AUTO_SYSLINUXMENU}" = "x1" ] ; then
- install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 ${HDDDIR}${SYSLINUXDIR}/vesamenu.c32
+ install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 $HDDDIR/${SYSLINUXDIR}/
if [ "x${SYSLINUX_SPLASH}" != "x" ] ; then
- install -m 0644 ${SYSLINUX_SPLASH} ${HDDDIR}${SYSLINUXDIR}/splash.lss
+ install -m 0644 ${SYSLINUX_SPLASH} $HDDDIR/${SYSLINUXDIR}/splash.lss
fi
fi
fi
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
index 7678567600..b1c03ba068 100644
--- a/meta/classes/bootimg.bbclass
+++ b/meta/classes/bootimg.bbclass
@@ -18,15 +18,17 @@
# an hdd)
# External variables (also used by syslinux.bbclass)
-# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
# ${NOISO} - skip building the ISO image if set to 1
# ${NOHDD} - skip building the HDD image if set to 1
+# ${HDDIMG_ID} - FAT image volume-id
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
cdrtools-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}"
PACKAGES = " "
@@ -65,10 +67,20 @@ populate() {
install -d ${DEST}
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
-
- if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
- install -m 0644 ${INITRD} ${DEST}/initrd
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/bzImage ${DEST}/vmlinuz
+
+ # initrd is made of concatenation of multiple filesystem images
+ if [ -n "${INITRD}" ]; then
+ rm -f ${DEST}/initrd
+ for fs in ${INITRD}
+ do
+ if [ -s "${fs}" ]; then
+ cat ${fs} >> ${DEST}/initrd
+ else
+ bbfatal "${fs} is invalid. initrd image creation failed."
+ fi
+ done
+ chmod 0644 ${DEST}/initrd
fi
if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
@@ -79,10 +91,19 @@ populate() {
build_iso() {
# Only create an ISO if we have an INITRD and NOISO was not set
- if [ -z "${INITRD}" ] || [ ! -s "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
+ if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
bbnote "ISO image will not be created."
return
fi
+ # ${INITRD} is a list of multiple filesystem images
+ for fs in ${INITRD}
+ do
+ if [ ! -s "${fs}" ]; then
+ bbnote "ISO image will not be created. ${fs} is invalid."
+ return
+ fi
+ done
+
populate ${ISODIR}
@@ -193,7 +214,14 @@ build_fat_img() {
FATSIZE="-F 32"
fi
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} ${BLOCKS}
+ if [ -z "${HDDIMG_ID}" ]; then
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ ${BLOCKS}
+ else
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ ${BLOCKS} -i ${HDDIMG_ID}
+ fi
+
# Copy FATSOURCEDIR recursively into the image file directly
mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
}
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 20382ce410..211dcf18b5 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -17,7 +17,7 @@ BUILDHISTORY_COMMIT ?= "0"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
-SSTATEPOSTINSTFUNCS += "buildhistory_emit_pkghistory"
+SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
# We want to avoid influence the signatures of sstate tasks - first the function itself:
sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
@@ -155,7 +155,7 @@ python buildhistory_emit_pkghistory() {
with open(os.path.join(pkgdata_dir, pn)) as f:
for line in f.readlines():
if line.startswith('PACKAGES: '):
- packages = squashspaces(line.split(': ', 1)[1])
+ packages = oe.utils.squashspaces(line.split(': ', 1)[1])
break
except IOError as e:
if e.errno == errno.ENOENT:
@@ -181,7 +181,7 @@ python buildhistory_emit_pkghistory() {
rcpinfo.pe = pe
rcpinfo.pv = pv
rcpinfo.pr = pr
- rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
+ rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS', True) or ""))
rcpinfo.packages = packages
write_recipehistory(rcpinfo, d)
@@ -222,13 +222,13 @@ python buildhistory_emit_pkghistory() {
pkginfo.pkge = pkge
pkginfo.pkgv = pkgv
pkginfo.pkgr = pkgr
- pkginfo.rprovides = sortpkglist(squashspaces(pkgdata.get('RPROVIDES', "")))
- pkginfo.rdepends = sortpkglist(squashspaces(pkgdata.get('RDEPENDS', "")))
- pkginfo.rrecommends = sortpkglist(squashspaces(pkgdata.get('RRECOMMENDS', "")))
- pkginfo.rsuggests = sortpkglist(squashspaces(pkgdata.get('RSUGGESTS', "")))
- pkginfo.rreplaces = sortpkglist(squashspaces(pkgdata.get('RREPLACES', "")))
- pkginfo.rconflicts = sortpkglist(squashspaces(pkgdata.get('RCONFLICTS', "")))
- pkginfo.files = squashspaces(pkgdata.get('FILES', ""))
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
+ pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
+ pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
for filevar in pkginfo.filevars:
pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
@@ -374,7 +374,7 @@ buildhistory_get_installed() {
printf "" > $1/installed-package-sizes.tmp
cat $pkgcache | while read pkg pkgfile pkgarch
do
- size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
+ size=`oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" ${pkg}_${pkgarch}`
if [ "$size" != "" ] ; then
echo "$size $pkg" >> $1/installed-package-sizes.tmp
fi
@@ -461,10 +461,10 @@ END
echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
# Add some configuration information
- echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id
+ echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id.txt
- cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id <<END
-${@buildhistory_get_layers(d)}
+ cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id.txt <<END
+${@buildhistory_get_build_id(d)}
END
}
@@ -484,8 +484,9 @@ END
echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
}
-# By prepending we get in before the removal of packaging files
-ROOTFS_POSTPROCESS_COMMAND =+ " buildhistory_list_installed_image ;\
+# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
+# unneeded packages but before the removal of packaging files
+ROOTFS_POSTUNINSTALL_COMMAND += " buildhistory_list_installed_image ;\
buildhistory_get_image_installed ; "
IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
@@ -498,11 +499,23 @@ POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;
SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; "
-def buildhistory_get_layers(d):
+def buildhistory_get_build_id(d):
if d.getVar('BB_WORKERCONTEXT', True) != '1':
return ""
- layertext = "Configured metadata layers:\n%s\n" % '\n'.join(get_layers_branch_rev(d))
- return layertext
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+ statuslines = []
+ for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
+ g = globals()
+ if func not in g:
+ bb.warn("Build configuration function '%s' does not exist" % func)
+ else:
+ flines = g[func](localdata)
+ if flines:
+ statuslines.extend(flines)
+
+ statusheader = d.getVar('BUILDCFG_HEADER', True)
+ return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
def buildhistory_get_metadata_revs(d):
# We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
@@ -513,11 +526,6 @@ def buildhistory_get_metadata_revs(d):
for i in layers]
return '\n'.join(medadata_revs)
-
-def squashspaces(string):
- import re
- return re.sub("\s+", " ", string).strip()
-
def outputvars(vars, listvars, d):
vars = vars.split()
listvars = listvars.split()
@@ -526,7 +534,7 @@ def outputvars(vars, listvars, d):
value = d.getVar(var, True) or ""
if var in listvars:
# Squash out spaces
- value = squashspaces(value)
+ value = oe.utils.squashspaces(value)
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
index 89ae72c679..48442641d4 100644
--- a/meta/classes/buildstats.bbclass
+++ b/meta/classes/buildstats.bbclass
@@ -52,8 +52,8 @@ def set_device(e):
# If we end up hitting one of these fs, we'll just skip diskstats collection.
############################################################################
device=os.stat(tmpdir)
- majordev=os.major(device.st_dev)
- minordev=os.minor(device.st_dev)
+ majordev=os.major(long(device.st_dev))
+ minordev=os.minor(long(device.st_dev))
############################################################################
# Bug 1700:
# Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
index 7765020e98..7a5d9602f5 100644
--- a/meta/classes/chrpath.bbclass
+++ b/meta/classes/chrpath.bbclass
@@ -10,6 +10,8 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
if p.returncode != 0:
return
+ # Handle RUNPATH as well as RPATH
+ err = err.replace("RUNPATH=","RPATH=")
# Throw away everything other than the rpath list
curr_rpath = err.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
@@ -54,7 +56,6 @@ def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
continue
newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
- bb.warn("%s %s %s %s" % (fpath, rpath, newpath, rootdir))
p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
err, out = p.communicate()
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index c9c15f3076..3549c38f15 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -1,3 +1,6 @@
+# Path to the CMake file to process.
+OECMAKE_SOURCEPATH ?= "${S}"
+
DEPENDS_prepend = "cmake-native "
B = "${WORKDIR}/build"
@@ -10,10 +13,11 @@ inherit autotools
# C/C++ Compiler (without cpu arch/tune arguments)
OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
+OECMAKE_AR ?= "${AR}"
# Compiler flags
OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
-OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} -fpermissive"
+OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG"
OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
@@ -31,10 +35,14 @@ set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).
set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
+set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
+set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
+set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
+set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "ASM FLAGS for release" )
set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
@@ -65,14 +73,16 @@ EOF
addtask generate_toolchain_file after do_patch before do_configure
cmake_do_configure() {
- if [ "${OECMAKE_BUILDPATH}" -o "${OECMAKE_SOURCEPATH}" ]; then
- bbnote "cmake.bbclass no longer uses OECMAKE_SOURCEPATH and OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
+ if [ "${OECMAKE_BUILDPATH}" ]; then
+ bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
fi
if [ "${S}" != "${B}" ]; then
rm -rf ${B}
mkdir -p ${B}
cd ${B}
+ else
+ find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
fi
# Just like autotools cmake can use a site file to cache result that need generated binaries to run
@@ -84,7 +94,7 @@ cmake_do_configure() {
cmake \
${OECMAKE_SITEFILE} \
- ${S} \
+ ${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
-DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
-DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index 34c0c4e6c7..43acfd531b 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -9,10 +9,11 @@ addtask configure after do_unpack do_patch before do_compile
inherit terminal
-OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS HOST_LOADLIBES TERMINFO"
+OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
HOSTLDFLAGS = "${BUILD_LDFLAGS}"
-HOST_LOADLIBES = "-lncurses"
+CROSS_CURSES_LIB = "-lncurses -ltinfo"
+CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
python do_menuconfig() {
@@ -58,7 +59,7 @@ python do_diffconfig() {
bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
if isdiff:
- statement = 'diff -Nurp ' + configorig + ' ' + config + '| sed -n "s/^\+//p" >' + fragment
+ statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
subprocess.call(statement, shell=True)
shutil.copy(configorig, config)
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
new file mode 100644
index 0000000000..9b58d82ce5
--- /dev/null
+++ b/meta/classes/compress_doc.bbclass
@@ -0,0 +1,260 @@
+# Compress man pages in ${mandir} and info pages in ${infodir}
+#
+# 1. The doc will be compressed to gz format by default.
+#
+# 2. It will automatically correct the compressed doc which is not
+# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
+# of ${DOC_COMPRESS} policy
+#
+# 3. It is easy to add a new type compression by editing
+# local.conf, such as:
+# DOC_COMPRESS_LIST_append = ' abc'
+# DOC_COMPRESS = 'abc'
+# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
+# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
+
+# All supported compression policy
+DOC_COMPRESS_LIST ?= "gz xz bz2"
+
+# Compression policy, must be one of ${DOC_COMPRESS_LIST}
+DOC_COMPRESS ?= "gz"
+
+# Compression shell command
+DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
+DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
+DOC_COMPRESS_CMD[xz] ?= "xz -v"
+
+# Decompression shell command
+DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
+DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
+DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
+
+PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
+python package_do_compress_doc() {
+ compress_mode = d.getVar('DOC_COMPRESS', True)
+ compress_list = (d.getVar('DOC_COMPRESS_LIST', True) or '').split()
+ if compress_mode not in compress_list:
+ bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
+
+ dvar = d.getVar('PKGD', True)
+ compress_cmds = {}
+ decompress_cmds = {}
+ for mode in compress_list:
+ compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
+ decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
+
+ mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True))
+ if os.path.exists(mandir):
+ # Decompress doc files which format is not compress_mode
+ decompress_doc(mandir, compress_mode, decompress_cmds)
+ compress_doc(mandir, compress_mode, compress_cmds)
+
+ infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir", True))
+ if os.path.exists(infodir):
+ # Decompress doc files which format is not compress_mode
+ decompress_doc(infodir, compress_mode, decompress_cmds)
+ compress_doc(infodir, compress_mode, compress_cmds)
+}
+
+def _get_compress_format(file, compress_format_list):
+ for compress_format in compress_format_list:
+ compress_suffix = '.' + compress_format
+ if file.endswith(compress_suffix):
+ return compress_format
+
+ return ''
+
+# Collect hardlinks to dict, each element in dict lists hardlinks
+# which points to the same doc file.
+# {hardlink10: [hardlink11, hardlink12],,,}
+# The hardlink10, hardlink11 and hardlink12 are the same file.
+def _collect_hardlink(hardlink_dict, file):
+ for hardlink in hardlink_dict:
+ # Add to the existed hardlink
+ if os.path.samefile(hardlink, file):
+ hardlink_dict[hardlink].append(file)
+ return hardlink_dict
+
+ hardlink_dict[file] = []
+ return hardlink_dict
+
+def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
+ for target in hardlink_dict:
+ if decompress:
+ compress_format = _get_compress_format(target, shell_cmds.keys())
+ cmd = "%s -f %s" % (shell_cmds[compress_format], target)
+ bb.note('decompress hardlink %s' % target)
+ else:
+ cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
+ bb.note('compress hardlink %s' % target)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ return
+
+ for hardlink_dup in hardlink_dict[target]:
+ if decompress:
+ # Remove compress suffix
+ compress_suffix = '.' + compress_format
+ new_hardlink = hardlink_dup[:-len(compress_suffix)]
+ new_target = target[:-len(compress_suffix)]
+ else:
+ # Append compress suffix
+ compress_suffix = '.' + compress_mode
+ new_hardlink = hardlink_dup + compress_suffix
+ new_target = target + compress_suffix
+
+ bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
+ if not os.path.exists(new_hardlink):
+ os.link(new_target, new_hardlink)
+ if os.path.exists(hardlink_dup):
+ os.unlink(hardlink_dup)
+
+def _process_symlink(file, compress_format, decompress=False):
+ compress_suffix = '.' + compress_format
+ if decompress:
+ # Remove compress suffix
+ new_linkname = file[:-len(compress_suffix)]
+ new_source = os.readlink(file)[:-len(compress_suffix)]
+ else:
+ # Append compress suffix
+ new_linkname = file + compress_suffix
+ new_source = os.readlink(file) + compress_suffix
+
+ bb.note('symlink %s-->%s' % (new_linkname, new_source))
+ if not os.path.exists(new_linkname):
+ os.symlink(new_source, new_linkname)
+
+ os.unlink(file)
+
+def _is_info(file):
+ flags = '.info .info-'.split()
+ for flag in flags:
+ if flag in os.path.basename(file):
+ return True
+
+ return False
+
+def _is_man(file):
+ import re
+
+ # It refers MANSECT-var in man(1.6g)'s man.config
+ # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
+ # Not start with '.', and contain the above colon-seperate element
+ p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
+ if p.search(file):
+ return True
+
+ return False
+
+def _is_compress_doc(file, compress_format_list):
+ compress_format = _get_compress_format(file, compress_format_list)
+ compress_suffix = '.' + compress_format
+ if file.endswith(compress_suffix):
+ # Remove the compress suffix
+ uncompress_file = file[:-len(compress_suffix)]
+ if _is_info(uncompress_file) or _is_man(uncompress_file):
+ return True, compress_format
+
+ return False, ''
+
+def compress_doc(topdir, compress_mode, compress_cmds):
+ hardlink_dict = {}
+ for root, dirs, files in os.walk(topdir):
+ for f in files:
+ file = os.path.join(root, f)
+ if os.path.isdir(file):
+ continue
+
+ if _is_info(file) or _is_man(file):
+ # Symlink
+ if os.path.islink(file):
+ _process_symlink(file, compress_mode)
+ # Hardlink
+ elif os.lstat(file).st_nlink > 1:
+ _collect_hardlink(hardlink_dict, file)
+ # Normal file
+ elif os.path.isfile(file):
+ cmd = "%s %s" % (compress_cmds[compress_mode], file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ continue
+ bb.note('compress file %s' % file)
+
+ _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
+
+# Decompress doc files which format is not compress_mode
+def decompress_doc(topdir, compress_mode, decompress_cmds):
+ hardlink_dict = {}
+ decompress = True
+ for root, dirs, files in os.walk(topdir):
+ for f in files:
+ file = os.path.join(root, f)
+ if os.path.isdir(file):
+ continue
+
+ res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
+ # Decompress files which format is not compress_mode
+ if res and compress_mode!=compress_format:
+ # Symlink
+ if os.path.islink(file):
+ _process_symlink(file, compress_format, decompress)
+ # Hardlink
+ elif os.lstat(file).st_nlink > 1:
+ _collect_hardlink(hardlink_dict, file)
+ # Normal file
+ elif os.path.isfile(file):
+ cmd = "%s %s" % (decompress_cmds[compress_format], file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ continue
+ bb.note('decompress file %s' % file)
+
+ _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
+
+python compress_doc_updatealternatives () {
+ if not bb.data.inherits_class('update-alternatives', d):
+ return
+
+ mandir = d.getVar("mandir", True)
+ infodir = d.getVar("infodir", True)
+ compress_mode = d.getVar('DOC_COMPRESS', True)
+ for pkg in (d.getVar('PACKAGES', True) or "").split():
+ old_names = (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split()
+ new_names = []
+ for old_name in old_names:
+ old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name, True)
+ old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', old_name, True) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or \
+ d.getVar('ALTERNATIVE_TARGET', True) or \
+ old_link
+ # Sometimes old_target is specified as relative to the link name.
+ old_target = os.path.join(os.path.dirname(old_link), old_target)
+
+ # The updatealternatives used for compress doc
+ if mandir in old_target or infodir in old_target:
+ new_name = old_name + '.' + compress_mode
+ new_link = old_link + '.' + compress_mode
+ new_target = old_target + '.' + compress_mode
+ d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
+ d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
+ if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True):
+ d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
+ d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
+ elif d.getVarFlag('ALTERNATIVE_TARGET', old_name, True):
+ d.delVarFlag('ALTERNATIVE_TARGET', old_name)
+ d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
+ elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True):
+ d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
+ elif d.getVar('ALTERNATIVE_TARGET', old_name, True):
+ d.setVar('ALTERNATIVE_TARGET', new_target)
+
+ new_names.append(new_name)
+
+ if new_names:
+ d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
+}
+
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index 1b36cba773..a78f93405b 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -7,8 +7,8 @@ LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d
# IMAGE_FEATURES control content of the core reference images
#
-# By default we install packagegroup-core-boot and packagegroup-base packages - this gives us
-# working (console only) rootfs.
+# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
+# this gives us working (console only) rootfs.
#
# Available IMAGE_FEATURES:
#
@@ -30,6 +30,7 @@ LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
# - doc-pkgs - documentation packages for all installed packages in the rootfs
+# - ptest-pkgs - ptest packages for all ptest-enabled recipes
# - read-only-rootfs - tweaks an image to support read-only rootfs
#
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
@@ -72,8 +73,5 @@ inherit image
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
-# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_empty_root_password ; ",d)}'
-
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index 7088039fa0..e2bbd2f63a 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -47,8 +47,8 @@ cpan_do_compile () {
cpan_do_install () {
oe_runmake DESTDIR="${D}" install_vendor
- for PERLSCRIPT in `grep -rIEl '#!${bindir}/perl-native.*/perl' ${D}`; do
- sed -i -e 's|^#!${bindir}/perl-native.*/perl|#!/usr/bin/env nativeperl|' $PERLSCRIPT
+ for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
+ sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
done
}
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
index 5b0ad61b4c..2eb8162314 100644
--- a/meta/classes/cpan_build.bbclass
+++ b/meta/classes/cpan_build.bbclass
@@ -3,6 +3,8 @@
#
inherit cpan-base perlnative
+EXTRA_CPAN_BUILD_FLAGS ?= ""
+
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
@@ -36,7 +38,8 @@ cpan_build_do_configure () {
--install_path script=${bindir} \
--install_path bin=${bindir} \
--install_path bindoc=${mandir}/man1 \
- --install_path libdoc=${mandir}/man3
+ --install_path libdoc=${mandir}/man3 \
+ ${EXTRA_CPAN_BUILD_FLAGS}
}
cpan_build_do_compile () {
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
index 25e246ddc3..a8565e91e3 100644
--- a/meta/classes/cross-canadian.bbclass
+++ b/meta/classes/cross-canadian.bbclass
@@ -16,6 +16,7 @@ STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${S
#
PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
CANADIANEXTRAOS = ""
+MODIFYTOS ??= "1"
python () {
archs = d.getVar('PACKAGE_ARCHS', True).split()
sdkarchs = []
@@ -23,6 +24,9 @@ python () {
sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
+ # Allow the following code segment to be disabled, e.g. meta-environment
+ if d.getVar("MODIFYTOS", True) != "1":
+ return
# PowerPC can build "linux" and "linux-gnuspe"
tarch = d.getVar("TARGET_ARCH", True)
if tarch == "powerpc":
@@ -84,11 +88,12 @@ EXTRANATIVEPATH += "chrpath-native"
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
-target_libdir := "${libdir}"
-target_includedir := "${includedir}"
-target_base_libdir := "${base_libdir}"
+target_base_prefix := "${base_prefix}"
target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
+target_base_libdir = "${target_base_prefix}/${baselib}"
+target_libdir = "${target_exec_prefix}/${baselib}"
+target_includedir := "${includedir}"
# Change to place files in SDKPATH
base_prefix = "${SDKPATHNATIVE}"
@@ -110,6 +115,7 @@ export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
do_populate_sysroot[stamp-extra-info] = ""
+do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
@@ -119,8 +125,8 @@ TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
# If MLPREFIX is set by multilib code, shlibs
# points to the wrong place so force it
-SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs"
-SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs"
+SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
+SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
cross_canadian_bindirlinks () {
for i in ${CANADIANEXTRAOS}
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
index 261a37465e..87d5cf5d37 100644
--- a/meta/classes/crosssdk.bbclass
+++ b/meta/classes/crosssdk.bbclass
@@ -1,6 +1,7 @@
inherit cross
CLASSOVERRIDE = "class-crosssdk"
+MACHINEOVERRIDES = ""
PACKAGE_ARCH = "${SDK_ARCH}"
python () {
# set TUNE_PKGARCH to SDK_ARCH
@@ -30,6 +31,6 @@ do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
# Need to force this to ensure consitency accross architectures
-EXTRA_OECONF_FPU = ""
+EXTRA_OECONF_GCC_FLOAT = ""
USE_NLS = "no"
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
index d7ea151a5d..c859703669 100644
--- a/meta/classes/debian.bbclass
+++ b/meta/classes/debian.bbclass
@@ -8,6 +8,11 @@
#
# Better expressed as ensure all RDEPENDS package before we package
# This means we can't have circular RDEPENDS/RRECOMMENDS
+
+AUTO_LIBNAME_PKGS = "${PACKAGES}"
+
+inherit package
+
DEBIANRDEP = "do_packagedata"
do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
@@ -47,6 +52,13 @@ python debian_package_name_hook () {
return 0
return (s[stat.ST_MODE] & stat.S_IEXEC)
+ def add_rprovides(pkg, d):
+ newpkg = d.getVar('PKG_' + pkg)
+ if newpkg and newpkg != pkg:
+ provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split()
+ if pkg not in provs:
+ d.appendVar('RPROVIDES_' + pkg, " " + pkg)
+
def auto_libname(packages, orig_pkg):
sonames = []
has_bins = 0
@@ -94,6 +106,7 @@ python debian_package_name_hook () {
(pkgname, devname) = soname_result
for pkg in packages.split():
if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)):
+ add_rprovides(pkg, d)
continue
debian_pn = d.getVar('DEBIANNAME_' + pkg)
if debian_pn:
@@ -108,6 +121,9 @@ python debian_package_name_hook () {
newpkg = mlpre + newpkg
if newpkg != pkg:
d.setVar('PKG_' + pkg, newpkg)
+ add_rprovides(pkg, d)
+ else:
+ add_rprovides(orig_pkg, d)
# reversed sort is needed when some package is substring of another
# ie in ncurses we get without reverse sort:
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index 41164a3f33..4451436473 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -17,7 +17,9 @@ python do_devshell () {
addtask devshell after do_patch
-do_devshell[dirs] = "${S}"
+# The directory that the terminal starts in
+DEVSHELL_STARTDIR ?= "${S}"
+do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
do_devshell[nostamp] = "1"
# devshell and fakeroot/pseudo need careful handling since only the final
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
index a890de7911..83aa381fe7 100644
--- a/meta/classes/distrodata.bbclass
+++ b/meta/classes/distrodata.bbclass
@@ -268,240 +268,6 @@ python do_checkpkg() {
import tempfile
import subprocess
- """
- sanity check to ensure same name and type. Match as many patterns as possible
- such as:
- gnome-common-2.20.0.tar.gz (most common format)
- gtk+-2.90.1.tar.gz
- xf86-input-synaptics-12.6.9.tar.gz
- dri2proto-2.3.tar.gz
- blktool_4.orig.tar.gz
- libid3tag-0.15.1b.tar.gz
- unzip552.tar.gz
- icu4c-3_6-src.tgz
- genext2fs_1.3.orig.tar.gz
- gst-fluendo-mp3
- """
- prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*\+?[\-_]" # match most patterns which uses "-" as separator to version digits
- prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
- prefix3 = "[0-9]+[\-]?[a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
- prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
- ver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"#"((\d+[\.\-_[a-z]])+)"
- # src.rpm extension was added only for rpm package. Can be removed if the rpm
- # packaged will always be considered as having to be manually upgraded
- suffix = "(tar\.gz|tgz|tar\.bz2|tar\.lz4|zip|xz|rpm|bz2|lz4|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
-
- suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "tar.lz4", "bz2", "lz4", "orig.tar.gz", "src.tar.gz", "src.rpm", "src.tgz", "svnr\d+.tar.bz2", "stable.tar.gz", "src.rpm")
- sinterstr = "(?P<name>%s?)v?(?P<ver>%s)(\-source)?" % (prefix, ver_regex)
- sdirstr = "(?P<name>%s)\.?v?(?P<ver>%s)(\-source)?[\.\-](?P<type>%s$)" % (prefix, ver_regex, suffix)
-
- def parse_inter(s):
- m = re.search(sinterstr, s)
- if not m:
- return None
- else:
- return (m.group('name'), m.group('ver'), "")
-
- def parse_dir(s):
- m = re.search(sdirstr, s)
- if not m:
- return None
- else:
- return (m.group('name'), m.group('ver'), m.group('type'))
-
- def modelate_version(version):
- if version[0] in ['.', '-']:
- if version[1].isdigit():
- version = version[1] + version[0] + version[2:len(version)]
- else:
- version = version[1:len(version)]
-
- version = re.sub('\-', '.', version)
- version = re.sub('_', '.', version)
- version = re.sub('(rc)+', '.-1.', version)
- version = re.sub('(alpha)+', '.-3.', version)
- version = re.sub('(beta)+', '.-2.', version)
- if version[0] == 'v':
- version = version[1:len(version)]
- return version
-
- """
- Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
- purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
- for simplicity as it's somehow difficult to get from various upstream format
- """
- def __vercmp(old, new):
- (on, ov, ot) = old
- (en, ev, et) = new
- if on != en or (et and et not in suffixtuple):
- return False
- ov = modelate_version(ov)
- ev = modelate_version(ev)
-
- result = bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
- if result < 0:
- return True
- else:
- return False
-
- """
- wrapper for fetch upstream directory info
- 'url' - upstream link customized by regular expression
- 'd' - database
- 'tmpf' - tmpfile for fetcher output
- We don't want to exit whole build due to one recipe error. So handle all exceptions
- gracefully w/o leaking to outer.
- """
- def internal_fetch_wget(url, ud, d, tmpf):
- status = "ErrFetchUnknown"
-
- agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
- fetchcmd = "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"%s\" '%s'" % (tmpf.name, agent, url)
- try:
- fetcher = bb.fetch2.wget.Wget(d)
- fetcher._runwget(ud, d, fetchcmd, True)
- status = "SUCC"
- except bb.fetch2.BBFetchException, e:
- status = "ErrFetch"
-
- return status
-
- """
- Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
- 'url' - upstream link customized by regular expression
- 'd' - database
- 'curver' - current version
- Return new version if success, or else error in "Errxxxx" style
- """
- def check_new_dir(url, curver, ud, d):
- pn = d.getVar('PN', True)
- f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
- status = internal_fetch_wget(url, ud, d, f)
- fhtml = f.read()
- if status == "SUCC" and len(fhtml):
- newver = parse_inter(curver)
-
- """
- match "*4.1/">*4.1/ where '*' matches chars
- N.B. add package name, only match for digits
- """
- regex = d.getVar('REGEX', True)
- if regex == '':
- regex = "^%s" %prefix
- m = re.search("^%s" % regex, curver)
- if m:
- s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
- else:
- s = "(\d+[\.\-_])+\d+/?"
-
- searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
-
- reg = re.compile(searchstr)
- valid = 0
- for line in fhtml.split("\n"):
- if line.find(curver) >= 0:
- valid = 1
- m = reg.search(line)
- if m:
- ver = m.group().split("\"")[1]
- ver = ver.strip("/")
- ver = parse_inter(ver)
- if ver and __vercmp(newver, ver) == True:
- newver = ver
-
- """Expect a match for curver in directory list, or else it indicates unknown format"""
- if not valid:
- status = "ErrParseInterDir"
- else:
- """rejoin the path name"""
- status = newver[0] + newver[1]
- elif not len(fhtml):
- status = "ErrHostNoDir"
-
- f.close()
- if status != "ErrHostNoDir" and re.match("Err", status):
- logpath = d.getVar('LOG_DIR', True)
- subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
- os.unlink(f.name)
- return status
-
- """
- Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
- 'url' - upstream link customized by regular expression
- 'd' - database
- 'curname' - current package name
- Return new version if success, or else error in "Errxxxx" style
- """
- def check_new_version(url, curname, ud, d):
- """possible to have no version in pkg name, such as spectrum-fw"""
- if not re.search("\d+", curname):
- return pcurver
- pn = d.getVar('PN', True)
- newver_regex = d.getVar('REGEX', True)
- f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
- status = internal_fetch_wget(url, ud, d, f)
- fhtml = f.read()
-
- if status == "SUCC" and len(fhtml):
- newver = parse_dir(curname)
-
- if not newver_regex:
- """this is the default matching pattern, if recipe does not """
- """provide a regex expression """
- """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
- pn1 = re.search("^%s" % prefix, curname).group()
- s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
- searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
- reg = searchstr
- else:
- reg = newver_regex
- valid = 0
- count = 0
- for line in fhtml.split("\n"):
- if pn == 'kconfig-frontends':
- m = re.findall(reg, line)
- if m:
- valid = 1
- for match in m:
- (on, ov, oe) = newver
- ver = (on, match[0], oe)
- if ver and __vercmp(newver, ver) == True:
- newver = ver
- continue
- count += 1
- m = re.search(reg, line)
- if m:
- valid = 1
- if not newver_regex:
- ver = m.group().split("\"")[1].split("/")[-1]
- if ver == "download":
- ver = m.group().split("\"")[1].split("/")[-2]
- ver = parse_dir(ver)
- else:
- """ we cheat a little here, but we assume that the
- regular expression in the recipe will extract exacly
- the version """
- (on, ov, oe) = newver
- ver = (on, m.group('pver'), oe)
- if ver and __vercmp(newver, ver) == True:
- newver = ver
- """Expect a match for curver in directory list, or else it indicates unknown format"""
- if not valid:
- status = "ErrParseDir"
- else:
- """newver still contains a full package name string"""
- status = re.sub('_', '.', newver[1])
- elif not len(fhtml):
- status = "ErrHostNoDir"
-
- f.close()
- """if host hasn't directory information, no need to save tmp file"""
- if status != "ErrHostNoDir" and re.match("Err", status):
- logpath = d.getVar('LOG_DIR', True)
- subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
- os.unlink(f.name)
- return status
-
"""first check whether a uri is provided"""
src_uri = d.getVar('SRC_URI', True)
if not src_uri:
@@ -543,9 +309,6 @@ python do_checkpkg() {
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
- chk_uri = d.getVar('REGEX_URI', True)
- if not chk_uri:
- chk_uri = src_uri
pdesc = localdata.getVar('DESCRIPTION', True)
pgrp = localdata.getVar('SECTION', True)
if localdata.getVar('PRSPV', True):
@@ -562,232 +325,63 @@ python do_checkpkg() {
psrcuri = localdata.getVar('SRC_URI', True)
maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ """ Get upstream version version """
+ pupver = None
+ pstatus = "ErrUnknown"
found = 0
+
for uri in src_uri.split():
- m = re.compile('(?P<type>[^:]*)').match(uri)
- if not m:
- raise MalformedUrl(uri)
- elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
- found = 1
- pproto = m.group('type')
- break
+ m = re.compile('(?P<type>[^:]*)').match(uri)
+ if not m:
+ raise MalformedUrl(uri)
+ elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
+ found = 1
+ psrcuri = uri
+ pproto = m.group('type')
+ break
if not found:
pproto = "file"
- pupver = "N/A"
- pstatus = "ErrUnknown"
-
- (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(uri)
- if type in ['http', 'https', 'ftp']:
- if d.getVar('PRSPV', True):
- pcurver = d.getVar('PRSPV', True)
- else:
- pcurver = d.getVar('PV', True)
- else:
- if d.getVar('PRSPV', True):
- pcurver = d.getVar('PRSPV', True)
- else:
- pcurver = d.getVar("SRCREV", True)
-
-
- if type in ['http', 'https', 'ftp']:
- ud = bb.fetch2.FetchData(uri, d)
- newver = pcurver
- altpath = path
- dirver = "-"
- curname = "-"
-
- """
- match version number amid the path, such as "5.7" in:
- http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
- N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
- """
- m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
- if m:
- altpath = path.split(m.group())[0]
- dirver = m.group().strip("/")
-
- """use new path and remove param. for wget only param is md5sum"""
- alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
- my_uri = d.getVar('REGEX_URI', True)
- if my_uri:
- if d.getVar('PRSPV', True):
- newver = d.getVar('PRSPV', True)
- else:
- newver = d.getVar('PV', True)
- else:
- newver = check_new_dir(alturi, dirver, ud, d)
- altpath = path
- if not re.match("Err", newver) and dirver != newver:
- altpath = altpath.replace(dirver, newver, True)
- # For folder in folder cases - try to enter the folder again and then try parsing
- """Now try to acquire all remote files in current directory"""
- if not re.match("Err", newver):
- curname = altpath.split("/")[-1]
-
- """get remote name by skipping pacakge name"""
- m = re.search(r"/.*/", altpath)
- if not m:
- altpath = "/"
- else:
- altpath = m.group()
-
- chk_uri = d.getVar('REGEX_URI', True)
- if not chk_uri:
- alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
- else:
- alturi = chk_uri
- newver = check_new_version(alturi, curname, ud, d)
- while(newver == "ErrHostNoDir"):
- if alturi == "/download":
- break
- else:
- alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
- newver = check_new_version(alturi, curname, ud, d)
- if not re.match("Err", newver):
- pupver = newver
- if pupver != pcurver:
- pstatus = "UPDATE"
- else:
- pstatus = "MATCH"
-
- if re.match("Err", newver):
- pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
- elif type == 'git':
- if user:
- gituser = user + '@'
- else:
- gituser = ""
-
- if 'protocol' in parm:
- gitproto = parm['protocol']
- else:
- gitproto = "git"
- # Get all tags and HEAD
- if d.getVar('GIT_REGEX', True):
- gitcmd = "git ls-remote %s://%s%s%s %s 2>&1" % (gitproto, gituser, host, path, d.getVar('GIT_REGEX', True))
- else:
- gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
- gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
-
- tmp = os.popen(gitcmd).read()
- if 'unable to connect' in tmp:
- tmp = None
- tmp2 = os.popen(gitcmd2).read()
- if 'unable to connect' in tmp2:
- tmp2 = None
- #This is for those repos have tag like: refs/tags/1.2.2
- phash = pversion.rsplit("+")[-1]
- if tmp:
- tmpline = tmp.split("\n")
- verflag = 0
- pupver = pversion
- for line in tmpline:
- if len(line)==0:
- break;
- puptag = line.split("/")[-1]
- upstr_regex = d.getVar('REGEX', True)
- if upstr_regex:
- puptag = re.search(upstr_regex, puptag)
- else:
- puptag = re.search("(?P<pver>([0-9][\.|_]?)+)", puptag)
- if puptag == None:
- continue
- puptag = puptag.group('pver')
- puptag = re.sub("_",".",puptag)
- plocaltag = pupver.split("+git")[0]
- if "git" in plocaltag:
- plocaltag = plocaltag.split("-")[0]
- result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
-
- if result > 0:
- verflag = 1
- pupver = puptag
- elif verflag == 0 :
- pupver = plocaltag
- #This is for those no tag repo
- elif tmp2:
+ if pproto in ['http', 'https', 'ftp', 'git']:
+ try:
+ ud = bb.fetch2.FetchData(psrcuri, d)
+ pupver = ud.method.latest_versionstring(ud, d)
+ if pproto == 'git':
+ if pupver == "":
pupver = pversion.rsplit("+")[0]
- phash = pupver
- else:
- pstatus = "ErrGitAccess"
- if not ('ErrGitAccess' in pstatus):
-
- latest_head = tmp2.rsplit("\t")[0][:7]
- tmp3 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pversion)
- tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pupver)
- if not tmp4:
- tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)', pupver)
-
- if tmp3:
- # Get status of the package - MATCH/UPDATE
- result = bb.utils.vercmp(("0", tmp3.group('git_ver'), ""), ("0",tmp3.group('git_ver') , ""))
- # Get the latest tag
- pstatus = 'MATCH'
- if result < 0:
- latest_pv = tmp3.group('git_ver')
- else:
- latest_pv = pupver
- if not(tmp3.group('head_md5')[:7] in latest_head) or not(latest_head in tmp3.group('head_md5')[:7]):
- pstatus = 'UPDATE'
-
- git_prefix = tmp3.group('git_prefix')
- pupver = latest_pv + tmp3.group('git_prefix') + latest_head
- else:
- if not tmp3:
- bb.plain("#DEBUG# Package %s: current version (%s) doesn't match the usual pattern" %(pname, pversion))
- elif type == 'svn':
- ud = bb.fetch2.FetchData(uri, d)
-
- svnFetcher = bb.fetch2.svn.Svn(d)
- svnFetcher.urldata_init(ud, d)
- try:
- pupver = svnFetcher.latest_revision(ud, d, ud.names[0])
- except bb.fetch2.FetchError:
- pstatus = "ErrSvnAccess"
-
- if pupver:
- if pupver in pversion:
- pstatus = "MATCH"
- else:
- pstatus = "UPDATE"
- else:
- pstatus = "ErrSvnAccess"
-
- if 'rev' in ud.parm:
- pcurver = ud.parm['rev']
-
- if pstatus != "ErrSvnAccess":
- tag = pversion.rsplit("+svn")[0]
- svn_prefix = re.search('(\+svn[r|\-]?)', pversion)
- if tag and svn_prefix:
- pupver = tag + svn_prefix.group() + pupver
-
- elif type == 'cvs':
- pupver = "HEAD"
- pstatus = "UPDATE"
- elif type == 'file':
- """local file is always up-to-date"""
- pupver = pcurver
- pstatus = "MATCH"
+ if re.search(pversion, "gitrAUTOINC"):
+ pupver += "+gitrAUTOINC+"
+ else:
+ pupver += "+gitAUTOINC+"
+ latest_revision = ud.method.latest_revision(ud, d, ud.names[0])
+ pupver += latest_revision[:10]
+ except Exception as inst:
+ bb.warn("%s: unexpected error: %s" % (pname, repr(inst)))
+ pstatus = "ErrAccess"
+ elif pproto == "file":
+ """Local files are always updated"""
+ pupver = pversion
else:
- pstatus = "ErrUnsupportedProto"
+ pstatus = "ErrUnsupportedProto"
+ bb.note("do_checkpkg, protocol %s isn't implemented" % pproto)
- if re.match("Err", pstatus):
- pstatus += ":%s%s" % (host, path)
+ if not pupver:
+ pupver = "N/A"
+ elif pupver == pversion:
+ pstatus = "MATCH"
+ else:
+ pstatus = "UPDATE"
"""Read from manual distro tracking fields as alternative"""
pmver = d.getVar("RECIPE_UPSTREAM_VERSION", True)
if not pmver:
- pmver = "N/A"
- pmstatus = "ErrNoRecipeData"
+ pmver = "N/A"
+ pmstatus = "ErrNoRecipeData"
+ elif pmver == pupver:
+ pmstatus = "MATCH"
else:
- if pmver == pcurver:
- pmstatus = "MATCH"
- else:
- pmstatus = "UPDATE"
+ pmstatus = "UPDATE"
- psrcuri = psrcuri.split()[0]
pdepends = "".join(pdepends.split("\t"))
pdesc = "".join(pdesc.split("\t"))
no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
index 12f26036fa..6ed7ecc99f 100644
--- a/meta/classes/distutils.bbclass
+++ b/meta/classes/distutils.bbclass
@@ -42,12 +42,8 @@ distutils_do_install() {
bbfatal "${PYTHON_PN} setup.py install execution failed."
# support filenames with *spaces*
- find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
- # only modify file if it contains path to avoid recompilation on the target
- if grep -q "${D}" "$i"; then
- sed -i -e s:${D}::g "$i"
- fi
- done
+ # only modify file if it contains path to avoid recompilation on the target
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
if test -e ${D}${bindir} ; then
for i in ${D}${bindir}/* ; do \
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
index bbd645cc63..e909ef41b6 100644
--- a/meta/classes/distutils3.bbclass
+++ b/meta/classes/distutils3.bbclass
@@ -64,9 +64,7 @@ distutils3_do_install() {
bbfatal "${PYTHON_PN} setup.py install execution failed."
# support filenames with *spaces*
- find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
- sed -i -e s:${D}::g "$i"
- done
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
if test -e ${D}${bindir} ; then
for i in ${D}${bindir}/* ; do \
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 2ac62747a2..75bdb7a14d 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -35,7 +35,17 @@ python () {
d.setVar('B', externalsrcbuild)
else:
d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
- d.setVar('SRC_URI', '')
+
+ srcuri = (d.getVar('SRC_URI', True) or '').split()
+ local_srcuri = []
+ for uri in srcuri:
+ if uri.startswith('file://'):
+ local_srcuri.append(uri)
+ d.setVar('SRC_URI', ' '.join(local_srcuri))
+
+ if '{SRCPV}' in d.getVar('PV', False):
+ # Dummy value because the default function can't be called with blank SRC_URI
+ d.setVar('SRCPV', '999')
tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
@@ -47,7 +57,36 @@ python () {
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock")
+ # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
+ cleandirs = d.getVarFlag(task, 'cleandirs', False)
+ if cleandirs:
+ cleandirs = cleandirs.split()
+ setvalue = False
+ if '${S}' in cleandirs:
+ cleandirs.remove('${S}')
+ setvalue = True
+ if externalsrcbuild == externalsrc and '${B}' in cleandirs:
+ cleandirs.remove('${B}')
+ setvalue = True
+ if setvalue:
+ d.setVarFlag(task, 'cleandirs', ' '.join(cleandirs))
+
+ fetch_tasks = ['do_fetch', 'do_unpack']
+ # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
+ d.appendVarFlag('do_configure', 'deps', ['do_unpack'])
+
for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
+ if local_srcuri and task in fetch_tasks:
+ continue
bb.build.deltask(task, d)
+
+ d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
+
+ # Ensure compilation happens every time
+ d.setVarFlag('do_compile', 'nostamp', '1')
}
+python externalsrc_compile_prefunc() {
+ # Make it obvious that this is happening, since forgetting about it could lead to much confusion
+ bb.warn('Compiling %s from external source %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
+}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index afd3fd2252..dfbdfa1e02 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -12,19 +12,22 @@ FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
fontcache_common() {
if [ "x$D" != "x" ] ; then
$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \
- libdir=${libdir} base_libdir=${base_libdir}
+ libdir=${libdir} base_libdir=${base_libdir} localstatedir=${localstatedir}
else
fc-cache
fi
}
-python populate_packages_append() {
+python () {
font_pkgs = d.getVar('FONT_PACKAGES', True).split()
deps = d.getVar("FONT_EXTRA_RDEPENDS", True)
for pkg in font_pkgs:
if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
+}
+python add_fontcache_postinsts() {
+ for pkg in d.getVar('FONT_PACKAGES', True).split():
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
if not postinst:
@@ -38,3 +41,5 @@ python populate_packages_append() {
postrm += d.getVar('fontcache_common', True)
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
+
+PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index 739bf60649..6ca13cb1e0 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -9,13 +9,13 @@ SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}
DEPENDS += "gnome-common-native"
FILES_${PN} += "${datadir}/application-registry \
- ${datadir}/mime-info \
- ${datadir}/mime/packages \
- ${datadir}/mime/application \
- ${datadir}/gnome-2.0 \
- ${datadir}/polkit* \
- ${datadir}/GConf \
- ${datadir}/glib-2.0/schemas \
+ ${datadir}/mime-info \
+ ${datadir}/mime/packages \
+ ${datadir}/mime/application \
+ ${datadir}/gnome-2.0 \
+ ${datadir}/polkit* \
+ ${datadir}/GConf \
+ ${datadir}/glib-2.0/schemas \
"
FILES_${PN}-doc += "${datadir}/devhelp"
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
index 5c80c177de..47bd35e049 100644
--- a/meta/classes/grub-efi.bbclass
+++ b/meta/classes/grub-efi.bbclass
@@ -7,7 +7,7 @@
# Provide grub-efi specific functions for building bootable images.
# External variables
-# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
# ${LABELS} - a list of targets for the automatic config
@@ -15,8 +15,8 @@
# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
-do_bootimg[depends] += "grub-efi:do_deploy"
-do_bootdirectdisk[depends] += "grub-efi:do_deploy"
+do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
+do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy"
GRUB_SERIAL ?= "console=ttyS0,115200"
GRUBCFG = "${S}/grub.cfg"
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
index fb7863e99b..e32f98dcfc 100644
--- a/meta/classes/gtk-doc.bbclass
+++ b/meta/classes/gtk-doc.bbclass
@@ -21,3 +21,5 @@ EXTRA_OECONF_append = "\
do_configure_prepend () {
( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} )
}
+
+inherit pkgconfig
diff --git a/meta/classes/gummiboot.bbclass b/meta/classes/gummiboot.bbclass
index 021465201f..dae19775c3 100644
--- a/meta/classes/gummiboot.bbclass
+++ b/meta/classes/gummiboot.bbclass
@@ -6,8 +6,8 @@
# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
# (images built by bootimage.bbclass or boot-directdisk.bbclass)
-do_bootimg[depends] += "gummiboot:do_deploy"
-do_bootdirectdisk[depends] += "gummiboot:do_deploy"
+do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy"
+do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy"
EFIDIR = "/EFI/BOOT"
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index 3ec8c0667d..2f9e3cf8ef 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -91,6 +91,10 @@ def create_path(compilers, bb, d):
return staging
def use_icc(bb,d):
+ if d.getVar('ICECC_DISABLED') == "1":
+ # don't even try it, when explicitly disabled
+ return "no"
+
# allarch recipes don't use compiler
if icc_is_allarch(bb, d):
return "no"
@@ -133,8 +137,7 @@ def use_icc(bb,d):
return "yes"
def icc_is_allarch(bb, d):
- return \
- bb.data.inherits_class("allarch", d);
+ return d.getVar("PACKAGE_ARCH") == "all"
def icc_is_kernel(bb, d):
return \
@@ -148,10 +151,6 @@ def icc_is_native(bb, d):
# Don't pollute allarch signatures with TARGET_FPU
icc_version[vardepsexclude] += "TARGET_FPU"
def icc_version(bb, d):
- if d.getVar('ICECC_DISABLED') == "1":
- # don't even try it, when explicitly disabled
- return ""
-
if use_icc(bb, d) == "no":
return ""
@@ -179,7 +178,7 @@ def icc_version(bb, d):
return tar_file
def icc_path(bb,d):
- if d.getVar('ICECC_DISABLED') == "1":
+ if use_icc(bb, d) == "no":
# don't create unnecessary directories when icecc is disabled
return
@@ -246,7 +245,7 @@ def set_icecc_env():
return
set_icecc_env() {
- if [ "${ICECC_DISABLED}" = "1" ]
+ if [ "${@use_icc(bb, d)}" = "no" ]
then
return
fi
diff --git a/meta/classes/image-buildinfo.bbclass b/meta/classes/image-buildinfo.bbclass
new file mode 100644
index 0000000000..aa17cc8f9e
--- /dev/null
+++ b/meta/classes/image-buildinfo.bbclass
@@ -0,0 +1,69 @@
+#
+# Writes build information to target filesystem on /etc/build
+#
+# Copyright (C) 2014 Intel Corporation
+# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
+#
+# Licensed under the MIT license, see COPYING.MIT for details
+#
+# Usage: add INHERIT += "image-buildinfo" to your conf file
+#
+
+# Desired variables to display
+IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
+
+# From buildhistory.bbclass
+def image_buildinfo_outputvars(vars, listvars, d):
+ vars = vars.split()
+ listvars = listvars.split()
+ ret = ""
+ for var in vars:
+ value = d.getVar(var, True) or ""
+ if (d.getVarFlag(var, 'type') == "list"):
+ value = oe.utils.squashspaces(value)
+ ret += "%s = %s\n" % (var, value)
+ return ret.rstrip('\n')
+
+# Gets git branch's status (clean or dirty)
+def get_layer_git_status(path):
+ f = os.popen("cd %s; git diff --stat 2>&1 | tail -n 1" % path)
+ data = f.read()
+ if f.close() is None:
+ if len(data) != 0:
+ return "-- modified"
+ return ""
+
+# Returns layer revisions along with their respective status
+def get_layer_revs(d):
+ layers = (d.getVar("BBLAYERS", True) or "").split()
+ medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \
+ base_get_metadata_git_branch(i, None).strip(), \
+ base_get_metadata_git_revision(i, None), \
+ get_layer_git_status(i)) \
+ for i in layers]
+ return '\n'.join(medadata_revs)
+
+def buildinfo_target(d):
+ # Get context
+ if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ return ""
+ # Single and list variables to be read
+ vars = (d.getVar("IMAGE_BUILDINFO_VARS", True) or "")
+ listvars = (d.getVar("IMAGE_BUILDINFO_LVARS", True) or "")
+ return image_buildinfo_outputvars(vars, listvars, d)
+
+# Write build information to target filesystem
+buildinfo () {
+cat > ${IMAGE_ROOTFS}${sysconfdir}/build << END
+-----------------------
+Build Configuration: |
+-----------------------
+${@buildinfo_target(d)}
+-----------------------
+Layer Revisions: |
+-----------------------
+${@get_layer_revs(d)}
+END
+}
+
+IMAGE_PREPROCESS_COMMAND += "buildinfo;"
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index c7e6937fa9..7b770fb353 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -2,7 +2,7 @@
AUTO_SYSLINUXCFG = "1"
INITRD_IMAGE ?= "core-image-minimal-initramfs"
INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
-SYSLINUX_ROOT = "root=/dev/ram0 "
+SYSLINUX_ROOT = "root=/dev/ram0"
SYSLINUX_TIMEOUT ?= "10"
SYSLINUX_LABELS ?= "boot install"
LABELS_append = " ${SYSLINUX_LABELS} "
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
index 11f082b373..c455a8e2d4 100644
--- a/meta/classes/image-mklibs.bbclass
+++ b/meta/classes/image-mklibs.bbclass
@@ -16,7 +16,7 @@ mklibs_optimize_image_doit() {
> ${WORKDIR}/mklibs/executables.list
case ${TARGET_ARCH} in
- powerpc | mips | microblaze )
+ powerpc | mips | mipsel | microblaze )
dynamic_loader="${base_libdir}/ld.so.1"
;;
powerpc64)
diff --git a/meta/classes/image-swab.bbclass b/meta/classes/image-swab.bbclass
index 124a090605..89318560db 100644
--- a/meta/classes/image-swab.bbclass
+++ b/meta/classes/image-swab.bbclass
@@ -11,7 +11,7 @@ BB_DEFAULT_TASK = "generate_swabber_report"
# Ideally these should be fixed but as a temporary measure disable parallel
# builds for troublesome recipes
PARALLEL_MAKE_pn-openssl = ""
-PARALLEL_MAKE_pn-eglibc = ""
+PARALLEL_MAKE_pn-glibc = ""
PARALLEL_MAKE_pn-glib-2.0 = ""
PARALLEL_MAKE_pn-libxml2 = ""
PARALLEL_MAKE_pn-readline = ""
diff --git a/meta/classes/image-vmdk.bbclass b/meta/classes/image-vmdk.bbclass
index 703c00ea3c..77b7facd41 100644
--- a/meta/classes/image-vmdk.bbclass
+++ b/meta/classes/image-vmdk.bbclass
@@ -1,7 +1,7 @@
#NOISO = "1"
-SYSLINUX_ROOT = "root=/dev/hda2 "
+SYSLINUX_ROOT ?= "root=/dev/sda2"
SYSLINUX_PROMPT ?= "0"
SYSLINUX_TIMEOUT ?= "10"
SYSLINUX_LABELS = "boot"
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index a03b880402..89eb5f378e 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -1,6 +1,6 @@
inherit rootfs_${IMAGE_PKGTYPE}
-inherit populate_sdk_base
+inherit populate_sdk_ext
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
@@ -22,7 +22,7 @@ inherit ${TESTIMAGECLASS}
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password post-install-logging"
# rootfs bootstrap install
ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
@@ -52,7 +52,10 @@ def check_image_features(d):
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
for feature in features:
if feature not in valid_features:
- bb.fatal("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
+ if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
+ raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
+ else:
+ raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
IMAGE_INSTALL ?= ""
IMAGE_INSTALL[type] = "list"
@@ -63,6 +66,7 @@ PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
EXCLUDE_FROM_WORLD = "1"
USE_DEVFS ?= "1"
+USE_DEPMOD ?= "1"
PID = "${@os.getpid()}"
@@ -72,10 +76,37 @@ LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
LDCONFIGDEPEND_libc-uclibc = ""
LDCONFIGDEPEND_libc-musl = ""
-do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
-do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
+do_rootfs[depends] += " \
+ makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
+ virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
+ virtual/kernel:do_packagedata"
do_rootfs[recrdeptask] += "do_packagedata"
-do_rootfs[vardeps] += "BAD_RECOMMENDATIONS NO_RECOMMENDATIONS"
+
+def command_variables(d):
+ return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
+ 'IMAGE_PREPROCESS_COMMAND','ROOTFS_POSTPROCESS_COMMAND','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS']
+
+python () {
+ variables = command_variables(d)
+ for var in variables:
+ if d.getVar(var):
+ d.setVarFlag(var, 'func', '1')
+}
+
+def rootfs_variables(d):
+ from oe.rootfs import variable_depends
+ variables = ['IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPEDEP_','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS','SDK_OS',
+ 'SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT','SDKTARGETSYSROOT','MULTILIBRE_ALLOW_REP',
+ 'MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
+ 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
+ 'COMPRESSIONTYPES']
+ variables.extend(command_variables(d))
+ variables.extend(variable_depends(d))
+ return " ".join(variables)
+
+do_rootfs[vardeps] += "${@rootfs_variables(d)}"
do_build[depends] += "virtual/kernel:do_deploy"
@@ -120,17 +151,6 @@ python () {
d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features)))
- # Ensure we have the vendor list for complementary package handling
- ml_vendor_list = ""
- multilibs = d.getVar('MULTILIBS', True) or ""
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib':
- localdata = bb.data.createCopy(d)
- vendor = localdata.getVar("TARGET_VENDOR_virtclass-multilib-" + eext[1], False)
- ml_vendor_list += " " + vendor
- d.setVar('MULTILIB_VENDORS', ml_vendor_list)
-
check_image_features(d)
initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
if initramfs_image != "":
@@ -138,18 +158,23 @@ python () {
d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_rootfs" % initramfs_image)
}
-IMAGE_CLASSES ?= "image_types"
+IMAGE_CLASSES += "image_types"
inherit ${IMAGE_CLASSES}
IMAGE_POSTPROCESS_COMMAND ?= ""
-MACHINE_POSTPROCESS_COMMAND ?= ""
+
+# Zap the root password if debug-tweaks feature is not enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "ssh_allow_empty_password; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
+
# Enable postinst logging if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "postinst_enable_logging; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
+
# Write manifest
IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
-ROOTFS_POSTPROCESS_COMMAND =+ "write_image_manifest ; "
+ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
# Set default target for systemd images
@@ -161,7 +186,9 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
-PSEUDO_PASSWD = "${IMAGE_ROOTFS}"
+# Prefer image, but use the fallback files for lookups if the image ones
+# aren't yet available.
+PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
do_rootfs[dirs] = "${TOPDIR}"
do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
@@ -172,17 +199,17 @@ do_rootfs[cleandirs] += "${S}"
do_rootfs[umask] = "022"
# A hook function to support read-only-rootfs IMAGE_FEATURES
-# Currently, it only supports sysvinit system.
read_only_rootfs_hook () {
+ # Tweak the mount option and fs_passno for rootfs in fstab
+ sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+
if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
- # Tweak the mount option and fs_passno for rootfs in fstab
- sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
- # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
+ # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
fi
- # Run populate-volatile.sh at rootfs time to set up basic files
- # and directories to support read-only rootfs.
+ # Run populate-volatile.sh at rootfs time to set up basic files
+ # and directories to support read-only rootfs.
if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
@@ -199,6 +226,27 @@ read_only_rootfs_hook () {
fi
fi
fi
+
+ if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
+ # Update user database files so that services don't fail for a read-only systemd system
+ for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+ [ -e $conffile ] || continue
+ grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
+ if [ "$type" = "u" ]; then
+ useradd_params=""
+ [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
+ [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
+ useradd_params="$useradd_params --system $name"
+ eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
+ elif [ "$type" = "g" ]; then
+ groupadd_params=""
+ [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
+ groupadd_params="$groupadd_params --system $name"
+ eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
+ fi
+ done
+ done
+ fi
}
PACKAGE_EXCLUDE ??= ""
@@ -350,16 +398,10 @@ python write_image_manifest () {
image_manifest.write(image_list_installed_packages(d, 'ver'))
}
-# Make login manager(s) enable automatic login.
-# Useful for devices where we do not want to log in at all (e.g. phones)
-set_image_autologin () {
- sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin
-}
-
# Can be use to create /etc/timestamp during image construction to give a reasonably
# sane default time setting
rootfs_update_timestamp () {
- date -u +%4Y%2m%2d%2H%2M >${IMAGE_ROOTFS}/etc/timestamp
+ date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
}
# Prevent X from being started
@@ -393,6 +435,7 @@ do_compile[noexec] = "1"
do_install[noexec] = "1"
do_populate_sysroot[noexec] = "1"
do_package[noexec] = "1"
+do_package_qa[noexec] = "1"
do_packagedata[noexec] = "1"
do_package_write_ipk[noexec] = "1"
do_package_write_deb[noexec] = "1"
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index 99a07da0fc..98a08bf107 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -13,7 +13,7 @@ def imagetypes_getdepends(d):
deps = []
ctypes = d.getVar('COMPRESSIONTYPES', True).split()
for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
- if type == "vmdk" or type == "live" or type == "iso" or type == "hddimg":
+ if type in ["vmdk", "live", "iso", "hddimg"]:
type = "ext3"
basetype = type
for ctype in ctypes:
@@ -21,6 +21,8 @@ def imagetypes_getdepends(d):
basetype = type[:-len("." + ctype)]
adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
break
+ for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split():
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % typedepends, True) , deps)
adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
depstr = ""
@@ -56,20 +58,30 @@ IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
+MIN_BTRFS_SIZE ?= "16384"
IMAGE_CMD_btrfs () {
- touch ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
- mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
+ if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
+ dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs count=${ROOTFS_SIZE} bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
+ else
+ bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
+ fi
}
IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
IMAGE_CMD_tar = "tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar -C ${IMAGE_ROOTFS} ."
+do_rootfs[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
(cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
- if [ ! -e ${IMAGE_ROOTFS}/init ]; then
- mkdir -p ${WORKDIR}/cpio_append
- touch ${WORKDIR}/cpio_append/init
+ if [ ! -L ${IMAGE_ROOTFS}/init -a ! -e ${IMAGE_ROOTFS}/init ]; then
+ if [ -L ${IMAGE_ROOTFS}/sbin/init -o -e ${IMAGE_ROOTFS}/sbin/init ]; then
+ ln -sf /sbin/init ${WORKDIR}/cpio_append/init
+ else
+ touch ${WORKDIR}/cpio_append/init
+ fi
(cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
fi
}
@@ -96,6 +108,8 @@ IMAGE_CMD_ubi () {
mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}
ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg
}
+IMAGE_TYPEDEP_ubi = "ubifs"
+
IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
EXTRA_IMAGECMD = ""
@@ -121,12 +135,27 @@ IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
+IMAGE_DEPENDS_squashfs-lzo = "squashfs-tools-native"
IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
IMAGE_DEPENDS_ubi = "mtd-utils-native"
IMAGE_DEPENDS_ubifs = "mtd-utils-native"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
-IMAGE_TYPES = "jffs2 jffs2.sum cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs iso hddimg squashfs squashfs-xz ubi ubifs tar tar.gz tar.bz2 tar.xz tar.lz4 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 vmdk elf"
+IMAGE_TYPES = " \
+ jffs2 jffs2.sum \
+ cramfs \
+ ext2 ext2.gz ext2.bz2 ext2.lzma \
+ ext3 ext3.gz \
+ btrfs \
+ iso \
+ hddimg \
+ squashfs squashfs-xz squashfs-lzo \
+ ubi ubifs \
+ tar tar.gz tar.bz2 tar.xz tar.lz4 \
+ cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
+ vmdk \
+ elf \
+"
COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum"
COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index c8fa7116b3..061ce5b620 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -17,9 +17,6 @@
# files under exec_prefix
-PACKAGE_DEPENDS += "${QADEPENDS}"
-PACKAGEFUNCS += " do_package_qa "
-
# unsafe-references-in-binaries requires prelink-rtld from
# prelink-native, but we don't want this DEPENDS for -native builds
QADEPENDS = "prelink-native"
@@ -32,12 +29,13 @@ QA_SANE = "True"
WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
textrel already-stripped incompatible-license files-invalid \
installed-vs-shipped compile-host-path install-host-path \
- pn-overrides infodir \
+ pn-overrides infodir build-deps file-rdeps \
+ unknown-configure-option \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
split-strip packages-list pkgv-undefined var-undefined \
- version-going-backwards \
+ version-going-backwards expanded-d \
"
ALL_QA = "${WARN_QA} ${ERROR_QA}"
@@ -154,24 +152,24 @@ def package_qa_clean_path(path,d):
""" Remove the common prefix from the path. In this case it is the TMPDIR"""
return path.replace(d.getVar('TMPDIR',True),"")
-def package_qa_write_error(error, d):
+def package_qa_write_error(type, error, d):
logfile = d.getVar('QA_LOGFILE', True)
if logfile:
p = d.getVar('P', True)
f = file( logfile, "a+")
- print >> f, "%s: %s" % (p, error)
+ print >> f, "%s: %s [%s]" % (p, error, type)
f.close()
def package_qa_handle_error(error_class, error_msg, d):
- package_qa_write_error(error_msg, d)
+ package_qa_write_error(error_class, error_msg, d)
if error_class in (d.getVar("ERROR_QA", True) or "").split():
- bb.error("QA Issue: %s" % error_msg)
+ bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
d.setVar("QA_SANE", False)
return False
elif error_class in (d.getVar("WARN_QA", True) or "").split():
- bb.warn("QA Issue: %s" % error_msg)
+ bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
else:
- bb.note("QA Issue: %s" % error_msg)
+ bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
return True
QAPATHTEST[libexec] = "package_qa_check_libexec"
@@ -183,7 +181,7 @@ def package_qa_check_libexec(path,name, d, elf, messages):
return True
if 'libexec' in path.split(os.path.sep):
- messages.append("%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
+ messages["libexec"] = "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec)
return False
return True
@@ -211,7 +209,7 @@ def package_qa_check_rpath(file,name, d, elf, messages):
rpath = m.group(1)
for dir in bad_dirs:
if dir in rpath:
- messages.append("package %s contains bad RPATH %s in file %s" % (name, rpath, file))
+ messages["rpaths"] = "package %s contains bad RPATH %s in file %s" % (name, rpath, file)
QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
def package_qa_check_useless_rpaths(file, name, d, elf, messages):
@@ -241,7 +239,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
- messages.append("%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
+ messages["useless-rpaths"] = "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath)
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
@@ -250,8 +248,8 @@ def package_qa_check_dev(path, name, d, elf, messages):
"""
if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
- messages.append("non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ messages["dev-so"] = "non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d))
QAPATHTEST[staticdev] = "package_qa_check_staticdev"
def package_qa_check_staticdev(path, name, d, elf, messages):
@@ -263,8 +261,8 @@ def package_qa_check_staticdev(path, name, d, elf, messages):
"""
if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
- messages.append("non -staticdev package contains static .a library: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ messages["staticdev"] = "non -staticdev package contains static .a library: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d))
def package_qa_check_libdir(d):
"""
@@ -318,8 +316,8 @@ def package_qa_check_dbg(path, name, d, elf, messages):
if not "-dbg" in name and not "-ptest" in name:
if '.debug' in path.split(os.path.sep):
- messages.append("non debug package contains .debug directory: %s path %s" % \
- (name, package_qa_clean_path(path,d)))
+ messages["debug-files"] = "non debug package contains .debug directory: %s path %s" % \
+ (name, package_qa_clean_path(path,d))
QAPATHTEST[perms] = "package_qa_check_perm"
def package_qa_check_perm(path,name,d, elf, messages):
@@ -464,16 +462,16 @@ def package_qa_check_arch(path,name,d, elf, messages):
# Check the architecture and endiannes of the binary
if not ((machine == elf.machine()) or \
- ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32"))):
- messages.append("Architecture did not match (%d to %d) on %s" % \
- (machine, elf.machine(), package_qa_clean_path(path,d)))
+ ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
+ messages["arch"] = "Architecture did not match (%d to %d) on %s" % \
+ (machine, elf.machine(), package_qa_clean_path(path,d))
elif not ((bits == elf.abiSize()) or \
- ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32"))):
- messages.append("Bit size did not match (%d to %d) %s on %s" % \
- (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
+ ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
+ messages["arch"] = "Bit size did not match (%d to %d) %s on %s" % \
+ (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d))
elif not littleendian == elf.isLittleEndian():
- messages.append("Endiannes did not match (%d to %d) on %s" % \
- (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
+ messages["arch"] = "Endiannes did not match (%d to %d) on %s" % \
+ (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d))
QAPATHTEST[desktop] = "package_qa_check_desktop"
def package_qa_check_desktop(path, name, d, elf, messages):
@@ -485,7 +483,7 @@ def package_qa_check_desktop(path, name, d, elf, messages):
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
- messages.append("Desktop file issue: " + l.strip())
+ messages["desktop"] = "Desktop file issue: " + l.strip()
QAPATHTEST[textrel] = "package_qa_textrel"
def package_qa_textrel(path, name, d, elf, messages):
@@ -509,7 +507,7 @@ def package_qa_textrel(path, name, d, elf, messages):
sane = False
if not sane:
- messages.append("ELF binary '%s' has relocations in .text" % path)
+ messages["textrel"] = "ELF binary '%s' has relocations in .text" % path
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
@@ -544,7 +542,7 @@ def package_qa_hash_style(path, name, d, elf, messages):
sane = True
if has_syms and not sane:
- messages.append("No GNU_HASH in the elf binary: '%s'" % path)
+ messages["ldflags"] = "No GNU_HASH in the elf binary: '%s'" % path
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
@@ -564,7 +562,7 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
with open(path) as f:
file_content = f.read()
if tmpdir in file_content:
- messages.append("File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
+ messages["buildpaths"] = "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d)
QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -583,7 +581,7 @@ def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
if rdep.startswith("%sxorg-abi-" % mlprefix):
return
- messages.append("Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
+ messages["xorg-driver-abi"] = "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path))
QAPATHTEST[infodir] = "package_qa_check_infodir"
def package_qa_check_infodir(path, name, d, elf, messages):
@@ -593,7 +591,7 @@ def package_qa_check_infodir(path, name, d, elf, messages):
infodir = d.expand("${infodir}/dir")
if infodir in path:
- messages.append("The /usr/share/info/dir file is not meant to be shipped in a particular package.")
+ messages["infodir"] = "The /usr/share/info/dir file is not meant to be shipped in a particular package."
QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
@@ -606,7 +604,7 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
tmpdir = d.getVar('TMPDIR', True)
if target.startswith(tmpdir):
trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
- messages.append("Symlink %s in %s points to TMPDIR" % (trimmed, name))
+ messages["symlink-to-sysroot"] = "Symlink %s in %s points to TMPDIR" % (trimmed, name)
def package_qa_check_license(workdir, d):
"""
@@ -738,8 +736,8 @@ def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
target_os = d.getVar('TARGET_OS', True)
target_arch = d.getVar('TARGET_ARCH', True)
- warnings = []
- errors = []
+ warnings = {}
+ errors = {}
for path in pkgfiles[package]:
elf = oe.qa.ELFFile(path)
try:
@@ -752,15 +750,13 @@ def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
func(path, package, d, elf, errors)
for w in warnings:
- bb.warn("QA Issue: %s" % w)
- package_qa_write_error(w, d)
+ package_qa_handle_error(w, warnings[w], d)
for e in errors:
- bb.error("QA Issue: %s" % e)
- package_qa_write_error(e, d)
+ package_qa_handle_error(e, errors[e], d)
return len(errors) == 0
-def package_qa_check_rdepends(pkg, pkgdest, skip, d):
+def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# Don't do this check for kernel/module recipes, there aren't too many debug/development
# packages and you can get false positives e.g. on kernel-module-lirc-dev
if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
@@ -776,13 +772,102 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, d):
rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
# Now do the sanity check!!!
- for rdepend in rdepends:
- if "-dbg" in rdepend and "debug-deps" not in skip:
- error_msg = "%s rdepends on %s" % (pkg,rdepend)
- sane = package_qa_handle_error("debug-deps", error_msg, d)
- if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
- error_msg = "%s rdepends on %s" % (pkg, rdepend)
- sane = package_qa_handle_error("dev-deps", error_msg, d)
+ if "build-deps" not in skip:
+ for rdepend in rdepends:
+ if "-dbg" in rdepend and "debug-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkg,rdepend)
+ sane = package_qa_handle_error("debug-deps", error_msg, d)
+ if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkg, rdepend)
+ sane = package_qa_handle_error("dev-deps", error_msg, d)
+ if rdepend not in packages:
+ rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ continue
+ if not rdep_data or not 'PN' in rdep_data:
+ pkgdata_dir = d.getVar("PKGDATA_DIR", True)
+ try:
+ possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
+ except OSError:
+ possibles = []
+ for p in possibles:
+ rdep_data = oe.packagedata.read_subpkgdata(p, d)
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ break
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ continue
+ error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
+ sane = package_qa_handle_error("build-deps", error_msg, d)
+
+ if "file-rdeps" not in skip:
+ ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
+ if bb.data.inherits_class('nativesdk', d):
+ ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl'])
+ # For Saving the FILERDEPENDS
+ filerdepends = set()
+ rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
+ for key in rdep_data:
+ if key.startswith("FILERDEPENDS_"):
+ for subkey in rdep_data[key].split():
+ filerdepends.add(subkey)
+ filerdepends -= ignored_file_rdeps
+
+ if filerdepends:
+ next = rdepends
+ done = rdepends[:]
+ # Find all the rdepends on the dependency chain
+ while next:
+ new = []
+ for rdep in next:
+ rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
+ sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
+ if not sub_rdeps:
+ continue
+ for sub_rdep in sub_rdeps.split():
+ if sub_rdep in done:
+ continue
+ if not sub_rdep.startswith('(') and \
+ oe.packagedata.has_subpkgdata(sub_rdep, d):
+ # It's a new rdep
+ done.append(sub_rdep)
+ new.append(sub_rdep)
+ next = new
+
+ # Add the rprovides of itself
+ if pkg not in done:
+ done.insert(0, pkg)
+
+ # The python is not a package, but python-core provides it, so
+ # skip checking /usr/bin/python if python is in the rdeps, in
+ # case there is a RDEPENDS_pkg = "python" in the recipe.
+ for py in [ d.getVar('MLPREFIX', True) + "python", "python" ]:
+ if py in done:
+ filerdepends.discard("/usr/bin/python")
+ done.remove(py)
+ for rdep in done:
+ # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
+ rdep_rprovides = set()
+ rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
+ for key in rdep_data:
+ if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
+ for subkey in rdep_data[key].split():
+ rdep_rprovides.add(subkey)
+ # Add the files list to the rprovides
+ if key == "FILES_INFO":
+ # Use eval() to make it as a dict
+ for subkey in eval(rdep_data[key]):
+ rdep_rprovides.add(subkey)
+ filerdepends -= rdep_rprovides
+ if not filerdepends:
+ # Break if all the file rdepends are met
+ break
+ else:
+ # Clear it for the next loop
+ rdep_rprovides.clear()
+ if filerdepends:
+ error_msg = "%s requires %s, but no providers in its RDEPENDS" % \
+ (pkg, ', '.join(str(e) for e in filerdepends))
+ sane = package_qa_handle_error("file-rdeps", error_msg, d)
return sane
@@ -822,12 +907,42 @@ def package_qa_check_deps(pkg, pkgdest, skip, d):
return sane
+QAPATHTEST[expanded-d] = "package_qa_check_expanded_d"
+def package_qa_check_expanded_d(path,name,d,elf,messages):
+ """
+ Check for the expanded D (${D}) value in pkg_* and FILES
+ variables, warn the user to use it correctly.
+ """
+
+ sane = True
+ expanded_d = d.getVar('D',True)
+
+ # Get packages for current recipe and iterate
+ packages = d.getVar('PACKAGES', True).split(" ")
+ for pak in packages:
+ # Go through all variables and check if expanded D is found, warn the user accordingly
+ for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
+ bbvar = d.getVar(var + "_" + pak)
+ if bbvar:
+ # Bitbake expands ${D} within bbvar during the previous step, so we check for its expanded value
+ if expanded_d in bbvar:
+ if var == 'FILES':
+ messages["expanded-d"] = "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak
+ sane = False
+ else:
+ messages["expanded-d"] = "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak)
+ sane = False
+ return sane
+
# The PACKAGE FUNC to scan each package
python do_package_qa () {
import subprocess
+ import oe.packagedata
bb.note("DO PACKAGE QA")
+ bb.build.exec_func("read_subpackage_metadata", d)
+
logdir = d.getVar('T', True)
pkg = d.getVar('PN', True)
@@ -855,6 +970,15 @@ python do_package_qa () {
pkgdest = d.getVar('PKGDEST', True)
packages = d.getVar('PACKAGES', True)
+ cpath = oe.cachedpath.CachedPath()
+ global pkgfiles
+ pkgfiles = {}
+ for pkg in (packages or "").split():
+ pkgfiles[pkg] = []
+ for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
+ for file in files:
+ pkgfiles[pkg].append(walkroot + os.sep + file)
+
# no packages should be scanned
if not packages:
return
@@ -864,6 +988,11 @@ python do_package_qa () {
# The package name matches the [a-z0-9.+-]+ regular expression
pkgname_pattern = re.compile("^[a-z0-9.+-]+$")
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ taskdeps = set()
+ for dep in taskdepdata:
+ taskdeps.add(taskdepdata[dep][0])
+
g = globals()
walk_sane = True
rdepends_sane = True
@@ -894,7 +1023,7 @@ python do_package_qa () {
path = "%s/%s" % (pkgdest, package)
if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
walk_sane = False
- if not package_qa_check_rdepends(package, pkgdest, skip, d):
+ if not package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d):
rdepends_sane = False
if not package_qa_check_deps(package, pkgdest, skip, d):
deps_sane = False
@@ -909,6 +1038,16 @@ python do_package_qa () {
bb.note("DONE with PACKAGE QA")
}
+do_package_qa[rdeptask] = "do_packagedata"
+addtask do_package_qa after do_packagedata do_package before do_build
+
+SSTATETASKS += "do_package_qa"
+do_package_qa[sstate-inputdirs] = ""
+do_package_qa[sstate-outputdirs] = ""
+python do_package_qa_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_qa_setscene
python do_qa_staging() {
bb.note("QA checking staging")
@@ -1001,12 +1140,25 @@ do_configure[postfuncs] += "do_qa_configure "
python () {
tests = d.getVar('ALL_QA', True).split()
if "desktop" in tests:
- d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native")
+ d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
###########################################################################
# Check various variables
###########################################################################
+ # Checking ${FILESEXTRAPATHS}
+ extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ if '__default' not in extrapaths.split(":"):
+ msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
+ msg += "type of assignment, and don't forget the colon.\n"
+ msg += "Please assign it with the format of:\n"
+ msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
+ msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
+ msg += "in your bbappend file\n\n"
+ msg += "Your incorrect assignment is:\n"
+ msg += "%s\n" % extrapaths
+ bb.warn(msg)
+
if d.getVar('do_stage', True) is not None:
bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True))
@@ -1018,9 +1170,13 @@ python () {
issues = []
if (d.getVar('PACKAGES', True) or "").split():
+ for dep in (d.getVar('QADEPENDS', True) or "").split():
+ d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
if d.getVar(var):
issues.append(var)
+ else:
+ d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index bbcfa15b84..6a6ad91866 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -40,7 +40,6 @@ def map_uboot_arch(a, d):
if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
elif re.match('i.86$', a): return 'x86'
- elif re.match('arm64$', a): return 'arm'
return a
export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index 0c045c91e9..32b80856e0 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -70,12 +70,12 @@ python split_kernel_module_packages () {
m = kerverrexp.match(kernelver)
if m:
kernelver_stripped = m.group(1)
- staging_kernel_dir = d.getVar("STAGING_KERNEL_DIR", True)
+ staging_kernel_dir = d.getVar("STAGING_KERNEL_BUILDDIR", True)
system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
if not os.path.exists(system_map_file):
system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
if not os.path.exists(system_map_file):
- bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_DIR '%s'" % (kernelver, dvar, staging_kernel_dir))
+ bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_BUILDDIR '%s'" % (kernelver, dvar, staging_kernel_dir))
cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
f = os.popen(cmd, 'r')
@@ -132,12 +132,18 @@ python split_kernel_module_packages () {
# appropriate modprobe commands to the postinst
autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD", True) or "").split()
autoload = d.getVar('module_autoload_%s' % basename, True)
- if autoload:
- bb.error("KERNEL_MODULE_AUTOLOAD has replaced module_autoload_%s, please replace it!" % basename)
+ if autoload and autoload == basename:
+ bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
+ if autoload and basename not in autoloadlist:
+ bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
if basename in autoloadlist:
name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
f = open(name, 'w')
- f.write('%s\n' % basename)
+ if autoload:
+ for m in autoload.split():
+ f.write('%s\n' % m)
+ else:
+ f.write('%s\n' % basename)
f.close()
postinst = d.getVar('pkg_postinst_%s' % pkg, True)
if not postinst:
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 6010dc94e0..8db489964e 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -1,7 +1,5 @@
-S = "${WORKDIR}/linux"
-
# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch"
+SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_shared_workdir do_fetch do_unpack do_patch"
# returns local (absolute) path names for all valid patches in the
# src_uri
@@ -71,6 +69,11 @@ do_patch() {
fi
machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+ machine_srcrev="${SRCREV_machine}"
+ if [ -z "${machine_srcrev}" ]; then
+ # fallback to SRCREV if a non machine_meta tree is being built
+ machine_srcrev="${SRCREV}"
+ fi
# if we have a defined/set meta branch we should not be generating
# any meta data. The passed branch has what we need.
@@ -80,8 +83,7 @@ do_patch() {
createme ${createme_flags} ${ARCH} ${machine_branch}
if [ $? -ne 0 ]; then
- echo "ERROR. Could not create ${machine_branch}"
- exit 1
+ bbfatal "Could not create ${machine_branch}"
fi
sccs="${@" ".join(find_sccs(d))}"
@@ -110,36 +112,27 @@ do_patch() {
done
fi
- if [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ]; then
- updateme_flags="--branch ${machine_branch}"
- fi
-
# updates or generates the target description
updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
if [ $? -ne 0 ]; then
- echo "ERROR. Could not update ${machine_branch}"
- exit 1
+ bbfatal "Could not update ${machine_branch}"
fi
# executes and modifies the source tree as required
patchme ${KMACHINE}
if [ $? -ne 0 ]; then
- echo "ERROR. Could not apply patches for ${KMACHINE}."
- echo " Patch failures can be resolved in the devshell (bitbake -c devshell ${PN})"
- exit 1
+ bberror "Could not apply patches for ${KMACHINE}."
+ bbfatal "Patch failures can be resolved in the devshell (bitbake -c devshell ${PN})"
fi
- # Perform a final check. If something other than the default kernel
- # branch was requested, and that's not where we ended up, then we
- # should thrown an error, since we aren't building what was expected
- final_branch="$(git symbolic-ref HEAD 2>/dev/null)"
- final_branch=${final_branch##refs/heads/}
- if [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ] &&
- [ "${final_branch}" != "${machine_branch}" ]; then
- echo "ERROR: branch ${machine_branch} was requested, but was not properly"
- echo " configured to be built. The current branch is ${final_branch}"
- exit 1
+ # check to see if the specified SRCREV is reachable from the final branch.
+ # if it wasn't something wrong has happened, and we should error.
+ if [ "${machine_srcrev}" != "AUTOINC" ]; then
+ if ! [ "$(git rev-parse --verify ${machine_srcrev}~0)" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
+ bberror "SRCREV ${machine_srcrev} was specified, but is not reachable"
+ bbfatal "Check the BSP description for incorrect branch selection, or other errors."
+ fi
fi
}
@@ -183,36 +176,17 @@ do_kernel_checkout() {
cd ${S}
if [ ! -f "Makefile" ]; then
- echo "[ERROR]: S is not set to the linux source directory. Check "
- echo " the recipe and set S to the proper extracted subdirectory"
- exit 1
+ bberror "S is not set to the linux source directory. Check "
+ bbfatal "the recipe and set S to the proper extracted subdirectory"
fi
+ rm -f .gitignore
git init
git add .
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
+ git clean -d -f
fi
# end debare
- # If KMETA is defined, the branch must exist, but a machine branch
- # can be missing since it may be created later by the tools.
- if [ -n "${KMETA}" ]; then
- git branch -a --no-color | grep -q ${KMETA}
- if [ $? -ne 0 ]; then
- echo "ERROR. The branch '${KMETA}' is required and was not"
- echo "found. Ensure that the SRC_URI points to a valid linux-yocto"
- echo "kernel repository"
- exit 1
- fi
- fi
-
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- if [ "${KBRANCH}" != "${machine_branch}" ]; then
- echo "WARNING: The SRC_URI machine branch and KBRANCH are not the same."
- echo " KBRANCH will be adjusted to match, but this typically is a"
- echo " misconfiguration and should be checked."
- fi
-
# convert any remote branches to local tracking ones
for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
@@ -222,15 +196,27 @@ do_kernel_checkout() {
fi
done
+ # If KMETA is defined, the branch must exist, but a machine branch
+ # can be missing since it may be created later by the tools.
+ if [ -n "${KMETA}" ]; then
+ git show-ref --quiet --verify -- "refs/heads/${KMETA}"
+ if [ $? -eq 1 ]; then
+ bberror "The branch '${KMETA}' is required and was not found"
+ bberror "Ensure that the SRC_URI points to a valid linux-yocto"
+ bbfatal "kernel repository"
+ fi
+ fi
+
+
# Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
- if [ $? -eq 0 ]; then
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
- else
- echo "Not checking out ${machine_branch}, it will be created later"
- git checkout -f master
+ if [ $? -ne 0 ]; then
+ machine_branch="master"
fi
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
}
do_kernel_checkout[dirs] = "${S}"
@@ -238,7 +224,7 @@ addtask kernel_checkout before do_patch after do_unpack
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
- echo "[INFO] doing kernel configme"
+ bbnote "kernel configme"
export KMETA=${KMETA}
if [ -n "${KCONFIG_MODE}" ]; then
@@ -255,8 +241,7 @@ do_kernel_configme() {
PATH=${PATH}:${S}/scripts/util
configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
if [ $? -ne 0 ]; then
- echo "ERROR. Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
- exit 1
+ bbfatal "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
fi
echo "# Global settings from linux recipe" >> ${B}/.config
@@ -268,8 +253,6 @@ addtask kernel_configme after do_patch
python do_kernel_configcheck() {
import re, string, sys
- bb.plain("NOTE: validating kernel config, see log.do_kernel_configcheck for details")
-
# if KMETA isn't set globally by a recipe using this routine, we need to
# set the default to 'meta'. Otherwise, kconf_check is not passed a valid
# meta-series for processing
@@ -281,115 +264,99 @@ python do_kernel_configcheck() {
cmd = d.expand("cd ${S}; kconf_check -config- %s/meta-series ${S} ${B}" % kmeta)
ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
- config_check_visibility = d.getVar( "KCONF_AUDIT_LEVEL", True ) or 1
- if config_check_visibility == 1:
- bb.debug( 1, "%s" % result )
- else:
- bb.note( "%s" % result )
+ config_check_visibility = int(d.getVar( "KCONF_AUDIT_LEVEL", True ) or 0)
+ bsp_check_visibility = int(d.getVar( "KCONF_BSP_AUDIT_LEVEL", True ) or 0)
+
+ # if config check visibility is non-zero, report dropped configuration values
+ mismatch_file = "${S}/" + kmeta + "/" + "mismatch.cfg"
+ if os.path.exists(mismatch_file):
+ if config_check_visibility:
+ with open (mismatch_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+
+ # if config check visibility is level 2 or higher, report non-hardware options
+ nonhw_file = "${S}/" + kmeta + "/" + "nonhw_report.cfg"
+ if os.path.exists(nonhw_file):
+ if config_check_visibility > 1:
+ with open (nonhw_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: BSP specified non-hw configuration:\n\n%s" % results)
+
+ bsp_desc = "${S}/" + kmeta + "/" + "top_tgt"
+ if os.path.exists(bsp_desc) and bsp_check_visibility > 1:
+ with open (bsp_desc, "r") as myfile:
+ bsp_tgt = myfile.read()
+ m = re.match("^(.*)scratch.obj(.*)$", bsp_tgt)
+ if not m is None:
+ bb.warn( "[kernel]: An auto generated BSP description was used, this normally indicates a misconfiguration.\n" +
+ "Check that your machine (%s) has an associated kernel description." % "${MACHINE}" )
}
# Ensure that the branches (BSP and meta) are on the locations specified by
# their SRCREV values. If they are NOT on the right commits, the branches
# are corrected to the proper commit.
do_validate_branches() {
+ set +e
cd ${S}
export KMETA=${KMETA}
machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+ machine_srcrev="${SRCREV_machine}"
- set +e
# if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
# check and we can exit early
- if [ "${SRCREV_machine}" = "AUTOINC" ] || [ "${SRCREV_machine}" = "INVALID" ] ||
- [ "${SRCREV_machine}" = "" ]; then
- return
- fi
-
- # If something other than the default branch was requested, it must
- # exist in the tree, and it's a hard error if it wasn't
- git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
- if [ $? -eq 1 ]; then
- if [ -n "${KBRANCH_DEFAULT}" ] &&
- [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ]; then
- echo "ERROR: branch ${machine_branch} was set for kernel compilation, "
- echo " but it does not exist in the kernel repository."
- echo " Check the value of KBRANCH and ensure that it describes"
- echo " a valid banch in the source kernel repository"
- exit 1
+ if [ "${machine_srcrev}" = "AUTOINC" ]; then
+ bbnote "SRCREV validation is not required for AUTOREV"
+ elif [ "${machine_srcrev}" = "" ]; then
+ if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
+ # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
+ # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
+ # this case, we need to reset to the give SRCREV before heading to patching
+ bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
+ force_srcrev="${SRCREV}"
fi
- fi
-
- if [ -z "${SRCREV_machine}" ]; then
- target_branch_head="${SRCREV}"
else
- target_branch_head="${SRCREV_machine}"
- fi
-
- # $SRCREV could have also been AUTOINC, so check again
- if [ "${target_branch_head}" = "AUTOINC" ]; then
- return
- fi
-
- ref=`git show ${target_branch_head} 2>&1 | head -n1 || true`
- if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
- echo "ERROR ${target_branch_head} is not a valid commit ID."
- echo "The kernel source tree may be out of sync"
- exit 1
+ git cat-file -t ${machine_srcrev} > /dev/null
+ if [ $? -ne 0 ]; then
+ bberror "${machine_srcrev} is not a valid commit ID."
+ bbfatal "The kernel source tree may be out of sync"
+ fi
+ force_srcrev=${machine_srcrev}
fi
- containing_branches=`git branch --contains $target_branch_head | sed 's/^..//'`
- if [ -z "$containing_branches" ]; then
- echo "ERROR: SRCREV was set to \"$target_branch_head\", but no branches"
- echo " contain this commit"
- exit 1
- fi
+ ## KMETA branch validation.
+ target_meta_head="${SRCREV_meta}"
+ if [ "${target_meta_head}" = "AUTOINC" ] || [ "${target_meta_head}" = "" ]; then
+ bbnote "SRCREV validation skipped for AUTOREV or empty meta branch"
+ else
+ meta_head=`git show-ref -s --heads ${KMETA}`
- # force the SRCREV in each branch that contains the specified
- # SRCREV (if it isn't the current HEAD of that branch)
- git checkout -q master
- for b in $containing_branches; do
- branch_head=`git show-ref -s --heads ${b}`
- if [ "$branch_head" != "$target_branch_head" ]; then
- echo "[INFO] Setting branch $b to ${target_branch_head}"
- if [ "$b" = "master" ]; then
- git reset --hard $target_branch_head > /dev/null
- else
- git branch -D $b > /dev/null
- git branch $b $target_branch_head > /dev/null
- fi
+ git cat-file -t ${target_meta_head} > /dev/null
+ if [ $? -ne 0 ]; then
+ bberror "${target_meta_head} is not a valid commit ID"
+ bbfatal "The kernel source tree may be out of sync"
fi
- done
-
- ## KMETA branch validation.
- ## We do validation if the meta branch exists, and AUTOREV hasn't been set
- meta_head=`git show-ref -s --heads ${KMETA}`
- target_meta_head="${SRCREV_meta}"
- git show-ref --quiet --verify -- "refs/heads/${KMETA}"
- if [ $? -eq 0 ] && [ "${target_meta_head}" != "AUTOINC" ]; then
if [ "$meta_head" != "$target_meta_head" ]; then
- ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
- if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
- echo "ERROR ${target_meta_head} is not a valid commit ID"
- echo "The kernel source tree may be out of sync"
- exit 1
- else
- echo "[INFO] Setting branch ${KMETA} to ${target_meta_head}"
- git branch -m ${KMETA} ${KMETA}-orig
- git checkout -q -b ${KMETA} ${target_meta_head}
- if [ $? -ne 0 ];then
- echo "ERROR: could not checkout ${KMETA} branch from known hash ${target_meta_head}"
- exit 1
- fi
+ bbnote "Setting branch ${KMETA} to ${target_meta_head}"
+ git branch -m ${KMETA} ${KMETA}-orig
+ git checkout -q -b ${KMETA} ${target_meta_head}
+ if [ $? -ne 0 ];then
+ bbfatal "Could not checkout ${KMETA} branch from known hash ${target_meta_head}"
fi
fi
fi
- git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
- if [ $? -eq 0 ]; then
- # restore the branch for builds
- git checkout -q -f ${machine_branch}
- else
- git checkout -q master
+ git checkout -q -f ${machine_branch}
+ if [ -n "${force_srcrev}" ]; then
+ # see if the branch we are about to patch has been properly reset to the defined
+ # SRCREV .. if not, we reset it.
+ branch_head=`git rev-parse HEAD`
+ if [ "${force_srcrev}" != "${branch_head}" ]; then
+ current_branch=`git rev-parse --abbrev-ref HEAD`
+ git branch "$current_branch-orig"
+ git reset --hard ${force_srcrev}
+ fi
fi
}
@@ -405,8 +372,7 @@ do_kernel_link_vmlinux() {
ln -sf ../../../vmlinux
}
-OE_TERMINAL_EXPORTS += "GUILT_BASE KBUILD_OUTPUT"
-GUILT_BASE = "meta"
+OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
KBUILD_OUTPUT = "${B}"
python () {
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 6ed1cb73c8..125ed88406 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -1,7 +1,12 @@
inherit linux-kernel-base kernel-module-split
PROVIDES += "virtual/kernel"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native"
+
+S = "${STAGING_KERNEL_DIR}"
+B = "${WORKDIR}/build"
+KBUILD_OUTPUT = "${B}"
+OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
# we include gcc above, we dont need virtual/libc
INHIBIT_DEFAULT_DEPS = "1"
@@ -12,7 +17,7 @@ INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
python __anonymous () {
- kerneltype = d.getVar('KERNEL_IMAGETYPE', True) or ''
+ kerneltype = d.getVar('KERNEL_IMAGETYPE', True)
if kerneltype == 'uImage':
depends = d.getVar("DEPENDS", True)
depends = "%s u-boot-mkimage-native" % depends
@@ -31,6 +36,26 @@ python __anonymous () {
d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
}
+# Old style kernels may set ${S} = ${WORKDIR}/git for example
+# We need to move these over to STAGING_KERNEL_DIR. We can't just
+# create the symlink in advance as the git fetcher can't cope with
+# the symlink.
+do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
+do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
+base_do_unpack_append () {
+ s = d.getVar("S", True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
+ s=s[:-1]
+ kernsrc = d.getVar("STAGING_KERNEL_DIR", True)
+ if s != kernsrc:
+ bb.utils.mkdirhier(kernsrc)
+ bb.utils.remove(kernsrc, recurse=True)
+ import subprocess
+ subprocess.call(d.expand("mv ${S} ${STAGING_KERNEL_DIR}"), shell=True)
+ os.symlink(kernsrc, s)
+}
+
inherit kernel-arch deploy
PACKAGES_DYNAMIC += "^kernel-module-.*"
@@ -55,7 +80,7 @@ KERNEL_IMAGEDEST = "boot"
#
export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
-KERNEL_VERSION = "${@get_kernelversion('${B}')}"
+KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
KERNEL_LOCALVERSION ?= ""
@@ -206,114 +231,70 @@ kernel_do_install() {
[ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
install -d ${D}${sysconfdir}/modules-load.d
install -d ${D}${sysconfdir}/modprobe.d
+}
+do_install[prefuncs] += "package_get_auto_pr"
- #
- # Support for external module building - create a minimal copy of the
- # kernel source tree.
- #
- kerneldir=${D}${KERNEL_SRC_PATH}
- install -d $kerneldir
-
- #
- # Store the kernel version in sysroots for module-base.bbclass
- #
-
- echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
-
- #
- # Store kernel image name to allow use during image generation
- #
+addtask shared_workdir after do_compile before do_install
- echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
+emit_depmod_pkgdata() {
+ # Stash data for depmod
+ install -d ${PKGDESTWORK}/kernel-depmod/
+ echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/kernel-depmod/kernel-abiversion
+ cp System.map ${PKGDESTWORK}/kernel-depmod/System.map-${KERNEL_VERSION}
+}
- #
- # Copy the entire source tree. In case an external build directory is
- # used, copy the build directory over first, then copy over the source
- # dir. This ensures the original Makefiles are used and not the
- # redirecting Makefiles in the build directory.
- #
- find . -depth -not -name "*.cmd" -not -name "*.o" -not -path "./Documentation*" -not -path "./source*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
- cp .config $kerneldir
- if [ "${S}" != "${B}" ]; then
- pwd="$PWD"
- cd "${S}"
- find . -depth -not -path "./Documentation*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
- cd "$pwd"
- fi
+PACKAGEFUNCS += "emit_depmod_pkgdata"
- # Test to ensure that the output file and image type are not actually
- # the same file. If hardlinking is used, they will be the same, and there's
- # no need to install.
- ! [ ${KERNEL_OUTPUT} -ef $kerneldir/${KERNEL_IMAGETYPE} ] && install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
- install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
+do_shared_workdir () {
+ cd ${B}
- # Dummy Makefile so the clean below works
- mkdir $kerneldir/Documentation
- touch $kerneldir/Documentation/Makefile
+ kerneldir=${STAGING_KERNEL_BUILDDIR}
+ install -d $kerneldir
#
- # Clean and remove files not needed for building modules.
- # Some distributions go through a lot more trouble to strip out
- # unecessary headers, for now, we just prune the obvious bits.
- #
- # We don't want to leave host-arch binaries in /sysroots, so
- # we clean the scripts dir while leaving the generated config
- # and include files.
+ # Store the kernel version in sysroots for module-base.bbclass
#
- oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean _mrproper_scripts
-
- # hide directories that shouldn't have their .c, s and S files deleted
- for d in tools scripts lib; do
- mv $kerneldir/$d $kerneldir/.$d
- done
- # delete .c, .s and .S files, unless we hid a directory as .<dir>. This technique is
- # much faster than find -prune and -exec
- find $kerneldir -not -path '*/\.*' -type f -name "*.[csS]" -delete
+ echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
- # put the hidden dirs back
- for d in tools scripts lib; do
- mv $kerneldir/.$d $kerneldir/$d
- done
+ # Copy files required for module builds
+ cp System.map $kerneldir/System.map-${KERNEL_VERSION}
+ cp Module.symvers $kerneldir/
+ cp .config $kerneldir/
+ mkdir -p $kerneldir/include/config
+ cp include/config/kernel.release $kerneldir/include/config/kernel.release
+
+ # We can also copy over all the generated files and avoid special cases
+ # like version.h, but we've opted to keep this small until file creep starts
+ # to happen
+ if [ -e include/linux/version.h ]; then
+ mkdir -p $kerneldir/include/linux
+ cp include/linux/version.h $kerneldir/include/linux/version.h
+ fi
# As of Linux kernel version 3.0.1, the clean target removes
# arch/powerpc/lib/crtsavres.o which is present in
# KBUILD_LDFLAGS_MODULE, making it required to build external modules.
if [ ${ARCH} = "powerpc" ]; then
- cp -l arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ mkdir -p $kerneldir/arch/powerpc/lib/
+ cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
fi
- # Necessary for building modules like compat-wireless.
- if [ -f include/generated/bounds.h ]; then
- cp -l include/generated/bounds.h $kerneldir/include/generated/bounds.h
- fi
+ mkdir -p $kerneldir/include/generated/
+ cp -fR include/generated/* $kerneldir/include/generated/
+
if [ -d arch/${ARCH}/include/generated ]; then
mkdir -p $kerneldir/arch/${ARCH}/include/generated/
- cp -flR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
- fi
-
- # Remove the following binaries which cause strip or arch QA errors
- # during do_package for cross-compiled platforms
- bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
- arch/powerpc/boot/mktree scripts/kconfig/zconf.tab.o \
- scripts/kconfig/conf.o scripts/kconfig/kxgettext.o"
- for entry in $bin_files; do
- rm -f $kerneldir/$entry
- done
-
- # kernels <2.6.30 don't have $kerneldir/tools directory so we check if it exists before calling sed
- if [ -f $kerneldir/tools/perf/Makefile ]; then
- # Fix SLANG_INC for slang.h
- sed -i 's#-I/usr/include/slang#-I=/usr/include/slang#g' $kerneldir/tools/perf/Makefile
+ cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
fi
}
-do_install[prefuncs] += "package_get_auto_pr"
-python sysroot_stage_all () {
- oe.path.copyhardlinktree(d.expand("${D}${KERNEL_SRC_PATH}"), d.expand("${SYSROOT_DESTDIR}${KERNEL_SRC_PATH}"))
+# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
+sysroot_stage_all () {
+ :
}
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call oldnoconfig || yes '' | oe_runmake oldconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig || yes '' | oe_runmake -C ${S} O=${B} oldconfig"
kernel_do_configure() {
# fixes extra + in /lib/modules/2.6.37+
@@ -322,6 +303,10 @@ kernel_do_configure() {
# $ make kernelrelease => 2.6.37+
touch ${B}/.scmversion ${S}/.scmversion
+ if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
+ mv "${S}/.config" "${B}/.config"
+ fi
+
# Copy defconfig to .config if .config does not exist. This allows
# recipes to manage the .config themselves in do_configure_prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
@@ -346,7 +331,7 @@ PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-mod
FILES_${PN} = ""
FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
-FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH}"
+FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build"
FILES_kernel-vmlinux = "/boot/vmlinux*"
FILES_kernel-modules = ""
RDEPENDS_kernel = "kernel-base"
@@ -374,7 +359,7 @@ pkg_postinst_kernel-base () {
}
pkg_postinst_kernel-image () {
- update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
+ update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
}
pkg_postrm_kernel-image () {
@@ -384,14 +369,12 @@ pkg_postrm_kernel-image () {
PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
python split_kernel_packages () {
- do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.bin$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
- do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.fw$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
- do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.cis$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
}
do_strip() {
if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
- if [[ "${KERNEL_IMAGETYPE}" != "vmlinux" ]]; then
+ if [ "${KERNEL_IMAGETYPE}" != "vmlinux" ]; then
bbwarn "image type will not be stripped (not supported): ${KERNEL_IMAGETYPE}"
return
fi
@@ -403,7 +386,7 @@ do_strip() {
gawk '{print $1}'`
for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
- if [[ "$headers" != *"$str"* ]]; then
+ if [ "$headers" != *"$str"* ]; then
bbwarn "Section not found: $str";
fi
@@ -422,10 +405,13 @@ addtask do_strip before do_sizecheck after do_kernel_link_vmlinux
# with a fixed length or there is a limit in transferring the kernel to memory
do_sizecheck() {
if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
- cd ${B}
- size=`ls -lL ${KERNEL_OUTPUT} | awk '{ print $5}'`
+ invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
+ if [ -n "$invalid" ]; then
+ die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integerx (The unit is Kbytes)"
+ fi
+ size=`du -ks ${B}/${KERNEL_OUTPUT} | awk '{ print $1}'`
if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
- die "This kernel (size=$size > ${KERNEL_IMAGE_MAXSIZE}) is too big for your device. Please reduce the size of the kernel by making more of it modular."
+ die "This kernel (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device. Please reduce the size of the kernel by making more of it modular."
fi
fi
}
@@ -496,7 +482,7 @@ kernel_do_deploy() {
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
do_deploy[prefuncs] += "package_get_auto_pr"
-addtask deploy before do_build after do_install
+addtask deploy after do_populate_sysroot
EXPORT_FUNCTIONS do_deploy
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
new file mode 100644
index 0000000000..9efd46a92d
--- /dev/null
+++ b/meta/classes/kernelsrc.bbclass
@@ -0,0 +1,10 @@
+S = "${STAGING_KERNEL_DIR}"
+do_fetch[noexec] = "1"
+do_unpack[depends] += "virtual/kernel:do_patch"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_package[depends] += "virtual/kernel:do_populate_sysroot"
+KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+
+inherit linux-kernel-base
+
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
index daf499d3eb..bbc80167dd 100644
--- a/meta/classes/libc-common.bbclass
+++ b/meta/classes/libc-common.bbclass
@@ -25,12 +25,19 @@ def get_libc_fpu_setting(bb, d):
python populate_packages_prepend () {
if d.getVar('DEBIAN_NAMES', True):
+ pkgs = d.getVar('PACKAGES', True).split()
bpn = d.getVar('BPN', True)
- d.setVar('PKG_'+bpn, 'libc6')
- d.setVar('PKG_'+bpn+'-dev', 'libc6-dev')
- d.setVar('PKG_'+bpn+'-dbg', 'libc6-dbg')
+ prefix = d.getVar('MLPREFIX', True) or ""
+ # Set the base package...
+ d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
+ libcprefix = prefix + bpn + '-'
+ for p in pkgs:
+ # And all the subpackages.
+ if p.startswith(libcprefix):
+ renamed = p.replace(bpn, 'libc6', 1)
+ d.setVar('PKG_' + p, renamed)
# For backward compatibility with old -dbg package
- d.appendVar('RPROVIDES_' + bpn + '-dbg', ' libc-dbg')
- d.appendVar('RCONFLICTS_' + bpn + '-dbg', ' libc-dbg')
- d.appendVar('RREPLACES_' + bpn + '-dbg', ' libc-dbg')
+ d.appendVar('RPROVIDES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
+ d.appendVar('RCONFLICTS_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
+ d.appendVar('RREPLACES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index c1bc399c18..793936e10b 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -268,6 +268,7 @@ python package_do_split_gconvs () {
locale_arch_options = { \
"arm": " --uint32-align=4 --little-endian ", \
"armeb": " --uint32-align=4 --big-endian ", \
+ "aarch64": " --uint32-align=4 --little-endian ", \
"aarch64_be": " --uint32-align=4 --big-endian ", \
"sh4": " --uint32-align=4 --big-endian ", \
"powerpc": " --uint32-align=4 --big-endian ", \
@@ -298,9 +299,7 @@ python package_do_split_gconvs () {
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
- qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
- if not qemu_options:
- qemu_options = d.getVar('QEMU_OPTIONS', True)
+ qemu_options = d.getVar('QEMU_OPTIONS', True)
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
-E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index 08f0665e7d..95e01213c0 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -17,12 +17,12 @@ do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
python write_package_manifest() {
- # Get list of installed packages
- license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
- bb.utils.mkdirhier(license_image_dir)
- from oe.rootfs import image_list_installed_packages
- open(os.path.join(license_image_dir, 'package.manifest'),
- 'w+').write(image_list_installed_packages(d))
+ # Get list of installed packages
+ license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
+ bb.utils.mkdirhier(license_image_dir)
+ from oe.rootfs import image_list_installed_packages
+ open(os.path.join(license_image_dir, 'package.manifest'),
+ 'w+').write(image_list_installed_packages(d))
}
license_create_manifest() {
@@ -49,24 +49,25 @@ license_create_manifest() {
pkged_pv="$(sed -n 's/^PV: //p' ${filename})"
pkged_name="$(basename $(readlink ${filename}))"
- pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
- if [ -z ${pkged_lic} ]; then
+ pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; p }" ${filename})"
+ if [ -z "${pkged_lic}" ]; then
# fallback checking value of LICENSE
- pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
+ pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; p }" ${filename})"
fi
echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_MANIFEST}
echo "PACKAGE VERSION:" ${pkged_pv} >> ${LICENSE_MANIFEST}
echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_MANIFEST}
- printf "LICENSE:" >> ${LICENSE_MANIFEST}
- for lic in ${pkged_lic}; do
+ echo "LICENSE:" ${pkged_lic} >> ${LICENSE_MANIFEST}
+ echo "" >> ${LICENSE_MANIFEST}
+
+ lics="$(echo ${pkged_lic} | sed "s/[|&()*]/ /g" | sed "s/ */ /g" )"
+ for lic in ${lics}; do
# to reference a license file trim trailing + symbol
if ! [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic%+}" ]; then
bbwarn "The license listed ${lic} was not in the licenses collected for ${pkged_pn}"
fi
- printf " ${lic}" >> ${LICENSE_MANIFEST}
done
- printf "\n\n" >> ${LICENSE_MANIFEST}
done
# Two options here:
@@ -79,7 +80,7 @@ license_create_manifest() {
if [ "${COPY_LIC_DIRS}" = "1" ]; then
for pkg in ${INSTALLED_PKGS}; do
mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
- pkged_pn="$(oe-pkgdata-util lookup-recipe ${PKGDATA_DIR} ${pkg})"
+ pkged_pn="$(oe-pkgdata-util -p ${PKGDATA_DIR} lookup-recipe ${pkg})"
for lic in `ls ${LICENSE_DIRECTORY}/${pkged_pn}`; do
# Really don't need to copy the generics as they're
# represented in the manifest and in the actual pkg licenses
@@ -145,9 +146,16 @@ def copy_license_files(lic_files_paths, destdir):
bb.utils.mkdirhier(destdir)
for (basename, path) in lic_files_paths:
try:
- ret = shutil.copyfile(path, os.path.join(destdir, basename))
+ src = path
+ dst = os.path.join(destdir, basename)
+ if os.path.exists(dst):
+ os.remove(dst)
+ if os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev):
+ os.link(src, dst)
+ else:
+ shutil.copyfile(src, dst)
except Exception as e:
- bb.warn("Could not copy license file %s: %s" % (basename, e))
+ bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
def find_license_files(d):
"""
@@ -264,10 +272,50 @@ def return_spdx(d, license):
"""
return d.getVarFlag('SPDXLICENSEMAP', license, True)
+def canonical_license(d, license):
+ """
+ Return the canonical (SPDX) form of the license if available (so GPLv3
+ becomes GPL-3.0), for the license named 'X+', return canonical form of
+ 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
+ or the passed license if there is no canonical form.
+ """
+ lic = d.getVarFlag('SPDXLICENSEMAP', license, True) or ""
+ if not lic and license.endswith('+'):
+ lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'), True)
+ if lic:
+ lic += '+'
+ return lic or license
+
+def expand_wildcard_licenses(d, wildcard_licenses):
+ """
+ Return actual spdx format license names if wildcard used. We expand
+ wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ """
+ import fnmatch
+ licenses = []
+ spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
+ for wld_lic in wildcard_licenses:
+ spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
+ licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
+
+ spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES') or '').split()
+ for wld_lic in wildcard_licenses:
+ licenses += fnmatch.filter(spdx_lics, wld_lic)
+
+ licenses = list(set(licenses))
+ return licenses
+
+def incompatible_license_contains(license, truevalue, falsevalue, d):
+ license = canonical_license(d, license)
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ return truevalue if license in bad_licenses else falsevalue
+
def incompatible_license(d, dont_want_licenses, package=None):
"""
- This function checks if a recipe has only incompatible licenses. It also take into consideration 'or'
- operand.
+ This function checks if a recipe has only incompatible licenses. It also
+ take into consideration 'or' operand. dont_want_licenses should be passed
+ as canonical (SPDX) names.
"""
import re
import oe.license
@@ -298,7 +346,7 @@ def incompatible_license(d, dont_want_licenses, package=None):
licenses = oe.license.flattened_licenses(license, choose_lic_set)
except oe.license.LicenseError as exc:
bb.fatal('%s: %s' % (d.getVar('P', True), exc))
- return any(not license_ok(l) for l in licenses)
+ return any(not license_ok(canonical_license(d, l)) for l in licenses)
def check_license_flags(d):
"""
@@ -361,12 +409,37 @@ def check_license_flags(d):
return unmatched_flag
return None
+def check_license_format(d):
+ """
+ This function checks if LICENSE is well defined,
+ Validate operators in LICENSES.
+ No spaces are allowed between LICENSES.
+ """
+ pn = d.getVar('PN', True)
+ licenses = d.getVar('LICENSE', True)
+ from oe.license import license_operator, license_operator_chars, license_pattern
+
+ elements = filter(lambda x: x.strip(), license_operator.split(licenses))
+ for pos, element in enumerate(elements):
+ if license_pattern.match(element):
+ if pos > 0 and license_pattern.match(elements[pos - 1]):
+ bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
+ 'must be separated by the following characters to indicate ' \
+ 'the license selection: %s' %
+ (pn, licenses, license_operator_chars))
+ elif not license_operator.match(element):
+ bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
+ 'in the valid list of separators (%s)' %
+ (pn, licenses, element, license_operator_chars))
+
SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+do_populate_lic_setscene[dirs] = "${LICSSTATEDIR}/${PN}"
+do_populate_lic_setscene[cleandirs] = "${LICSSTATEDIR}"
python do_populate_lic_setscene () {
sstate_setscene(d)
}
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
index 4f2b0a4a98..89ce71605c 100644
--- a/meta/classes/linux-kernel-base.bbclass
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -1,5 +1,5 @@
# parse kernel ABI version out of <linux/version.h>
-def get_kernelversion(p):
+def get_kernelversion_headers(p):
import re
fn = p + '/include/linux/utsrelease.h'
@@ -9,7 +9,6 @@ def get_kernelversion(p):
if not os.path.isfile(fn):
fn = p + '/include/linux/version.h'
- import re
try:
f = open(fn, 'r')
except IOError:
@@ -24,6 +23,16 @@ def get_kernelversion(p):
return m.group(1)
return None
+
+def get_kernelversion_file(p):
+ fn = p + '/kernel-abiversion'
+
+ try:
+ with open(fn, 'r') as f:
+ return f.readlines()[0].strip()
+ except IOError:
+ return None
+
def linux_module_packages(s, d):
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
index d35c40bccd..5e6890238b 100644
--- a/meta/classes/meta.bbclass
+++ b/meta/classes/meta.bbclass
@@ -1,4 +1,4 @@
PACKAGES = ""
-do_build[recrdeptask] = "do_build" \ No newline at end of file
+do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index ba0edf9486..237e61821d 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -60,7 +60,7 @@ def base_get_metadata_svn_revision(path, d):
try:
with open("%s/.svn/entries" % path) as f:
revision = f.readlines()[3].strip()
- except IOError, IndexError:
+ except (IOError, IndexError):
pass
return revision
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
index 9537ba9f43..3eb2e9226e 100644
--- a/meta/classes/module-base.bbclass
+++ b/meta/classes/module-base.bbclass
@@ -3,16 +3,24 @@ inherit kernel-arch
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
+# This points to the build artefacts from the main kernel build
+# such as .config and System.map
+# Confusingly it is not the module build output (which is ${B}) but
+# we didn't pick the name.
+export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
+
+export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"
+do_configure[depends] += "virtual/kernel:do_shared_workdir"
+
# Function to ensure the kernel scripts are created. Expected to
# be called before do_compile. See module.bbclass for an exmaple.
do_make_scripts() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
- -C ${STAGING_KERNEL_DIR} scripts
+ -C ${STAGING_KERNEL_DIR} O=${STAGING_KERNEL_BUILDDIR} scripts
}
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index ad6f7af1bb..a03cc74de4 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -6,21 +6,23 @@ addtask make_scripts after do_patch before do_compile
do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
do_make_scripts[deptask] = "do_populate_sysroot"
+EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
+
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
- KERNEL_SRC=${STAGING_KERNEL_DIR} \
KERNEL_VERSION=${KERNEL_VERSION} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
AR="${KERNEL_AR}" \
+ O=${STAGING_KERNEL_BUILDDIR} \
${MAKE_TARGETS}
}
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
- KERNEL_SRC=${STAGING_KERNEL_DIR} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ O=${STAGING_KERNEL_BUILDDIR} \
modules_install
}
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 9a1cb1d916..eea2fd59a1 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -21,6 +21,9 @@ python multilib_virtclass_handler () {
if bb.data.inherits_class('image', e.data):
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
+ target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
+ if target_vendor:
+ e.data.setVar("TARGET_VENDOR", target_vendor)
return
if bb.data.inherits_class('cross-canadian', e.data):
@@ -50,7 +53,7 @@ python multilib_virtclass_handler () {
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
# Expand the WHITELISTs with multilib prefix
- for whitelist in ["HOSTTOOLS_WHITELIST_GPLv3", "WHITELIST_GPLv3", "LGPLv2_WHITELIST_GPLv3"]:
+ for whitelist in ["HOSTTOOLS_WHITELIST_GPL-3.0", "WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
pkgs = e.data.getVar(whitelist, True)
for pkg in pkgs.split():
pkgs += " " + variant + "-" + pkg
@@ -60,6 +63,7 @@ python multilib_virtclass_handler () {
newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
+ e.data.setVar('DEFAULTTUNE_ML_%s' % variant, newtune)
}
addhandler multilib_virtclass_handler
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 3315ba9327..8ea2a5a4b8 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -1,11 +1,122 @@
-python multilib_virtclass_handler_global () {
- if not e.data:
+def preferred_ml_updates(d):
+ # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
+ # we need to mirror these variables in the multilib case;
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ if not multilibs:
return
- if isinstance(e, bb.event.RecipePreFinalise):
+ prefixes = []
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ prefixes.append(eext[1])
+
+ versions = []
+ providers = []
+ for v in d.keys():
+ if v.startswith("PREFERRED_VERSION_"):
+ versions.append(v)
+ if v.startswith("PREFERRED_PROVIDER_"):
+ providers.append(v)
+
+ for v in versions:
+ val = d.getVar(v, False)
+ pkg = v.replace("PREFERRED_VERSION_", "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if '-cross-' in pkg and '${' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(localdata)
+ newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
+ if newname != v:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ vexp = d.expand(v)
+ if v != vexp and d.getVar(v, False):
+ d.renameVar(v, vexp)
+ continue
+ for p in prefixes:
+ newname = "PREFERRED_VERSION_" + p + "-" + pkg
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
+
+ for prov in providers:
+ val = d.getVar(prov, False)
+ pkg = prov.replace("PREFERRED_PROVIDER_", "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if 'cross-canadian' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(localdata)
+ newname = localdata.expand(prov)
+ if newname != prov:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ provexp = d.expand(prov)
+ if prov != provexp and d.getVar(prov, False):
+ d.renameVar(prov, provexp)
+ continue
+ virt = ""
+ if pkg.startswith("virtual/"):
+ pkg = pkg.replace("virtual/", "")
+ virt = "virtual/"
+ for p in prefixes:
+ if pkg != "kernel":
+ newval = p + "-" + val
+
+ # implement variable keys
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(localdata)
+ newname = localdata.expand(prov)
+ if newname != prov and not d.getVar(newname, False):
+ d.setVar(newname, localdata.expand(newval))
+
+ # implement alternative multilib name
+ newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
+ if not d.getVar(newname, False):
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ provexp = d.expand(prov)
+ if prov != provexp and d.getVar(prov, False):
+ d.renameVar(prov, provexp)
+
+
+ mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ extramp = []
+ for p in mp:
+ if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
+ continue
+ virt = ""
+ if p.startswith("virtual/"):
+ p = p.replace("virtual/", "")
+ virt = "virtual/"
+ for pref in prefixes:
+ extramp.append(virt + pref + "-" + p)
+ d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+
+python multilib_virtclass_handler_vendor () {
+ if isinstance(e, bb.event.ConfigParsed):
for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
+ preferred_ml_updates(e.data)
+}
+addhandler multilib_virtclass_handler_vendor
+multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
+
+python multilib_virtclass_handler_global () {
+ if not e.data:
+ return
variant = e.data.getVar("BBEXTENDVARIANT", True)
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index 9dec318aa8..5ca5c95b4d 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -61,16 +61,17 @@ PTEST_ENABLED = "0"
export CONFIG_SITE = "${COREBASE}/meta/site/native"
# set the compiler as well. It could have been set to something else
-export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
-export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
-export FC = "${CCACHE}${HOST_PREFIX}gfortran ${HOST_CC_ARCH}"
-export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
-export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
-export CCLD = "${CC}"
-export AR = "${HOST_PREFIX}ar"
-export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
-export RANLIB = "${HOST_PREFIX}ranlib"
-export STRIP = "${HOST_PREFIX}strip"
+export CC = "${BUILD_CC}"
+export CXX = "${BUILD_CXX}"
+export FC = "${BUILD_FC}"
+export CPP = "${BUILD_CPP}"
+export LD = "${BUILD_LD}"
+export CCLD = "${BUILD_CCLD}"
+export AR = "${BUILD_AR}"
+export AS = "${BUILD_AS}"
+export RANLIB = "${BUILD_RANLIB}"
+export STRIP = "${BUILD_STRIP}"
+export NM = "${BUILD_NM}"
# Path prefixes
base_prefix = "${STAGING_DIR_NATIVE}"
@@ -109,6 +110,7 @@ PKG_CONFIG_SYSROOT_DIR = ""
# we dont want libc-uclibc or libc-glibc to kick in for native recipes
LIBCOVERRIDE = ""
CLASSOVERRIDE = "class-native"
+MACHINEOVERRIDES = ""
PATH_prepend = "${COREBASE}/scripts/native-intercept:"
@@ -130,7 +132,9 @@ python native_virtclass_handler () {
deps = bb.utils.explode_deps(deps)
newdeps = []
for dep in deps:
- if "-cross-" in dep:
+ if dep == pn:
+ continue
+ elif "-cross-" in dep:
newdeps.append(dep.replace("-cross", "-native"))
elif not dep.endswith("-native"):
newdeps.append(dep + "-native")
@@ -162,6 +166,7 @@ native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
deltask package
deltask packagedata
+deltask package_qa
deltask package_write_ipk
deltask package_write_deb
deltask package_write_rpm
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 7f94258bf1..30bcdfeb44 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -8,6 +8,7 @@ STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${S
NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
+MACHINEOVERRIDES = ""
#
# Update PACKAGE_ARCH and PACKAGE_ARCHS
@@ -24,6 +25,7 @@ EXTRANATIVEPATH += "chrpath-native"
STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
+PKGDATA_DIR = "${STAGING_DIR_HOST}/pkgdata"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -42,7 +44,7 @@ TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
TARGET_FPU = ""
-EXTRA_OECONF_FPU = ""
+EXTRA_OECONF_GCC_FLOAT = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
index f2e7540dcf..d00f468d9a 100644
--- a/meta/classes/oelint.bbclass
+++ b/meta/classes/oelint.bbclass
@@ -1,174 +1,85 @@
addtask lint before do_fetch
do_lint[nostamp] = "1"
python do_lint() {
- def testVar(var, explain=None):
- try:
- s = d[var]
- return s["content"]
- except KeyError:
- bb.error("%s is not set" % var)
- if explain: bb.note(explain)
- return None
-
-
- ##############################
- # Test that DESCRIPTION exists
- #
- testVar("DESCRIPTION")
-
-
- ##############################
- # Test that HOMEPAGE exists
- #
- s = testVar("HOMEPAGE")
- if s=="unknown":
- bb.error("HOMEPAGE is not set")
- elif not s.startswith("http://"):
- bb.error("HOMEPAGE doesn't start with http://")
-
-
-
- ##############################
- # Test for valid LICENSE
- #
- valid_licenses = {
- "GPL-2" : "GPLv2",
- "GPL LGPL FDL" : True,
- "GPL PSF" : True,
- "GPL/QPL" : True,
- "GPL" : True,
- "GPLv2" : True,
- "IBM" : True,
- "LGPL GPL" : True,
- "LGPL" : True,
- "MIT" : True,
- "OSL" : True,
- "Perl" : True,
- "Public Domain" : True,
- "QPL" : "GPL/QPL",
- }
- s = testVar("LICENSE")
- if s=="unknown":
- bb.error("LICENSE is not set")
- elif s.startswith("Vendor"):
- pass
- else:
- try:
- newlic = valid_licenses[s]
- if newlic == False:
- bb.note("LICENSE '%s' is not recommended" % s)
- elif newlic != True:
- bb.note("LICENSE '%s' is not recommended, better use '%s'" % (s, newsect))
- except:
- bb.note("LICENSE '%s' is not recommended" % s)
-
-
- ##############################
- # Test for valid MAINTAINER
- #
- s = testVar("MAINTAINER")
- if s=="OpenEmbedded Team <openembedded-devel@openembedded.org>":
- bb.error("explicit MAINTAINER is missing, using default")
- elif s and s.find("@") == -1:
- bb.error("You forgot to put an e-mail address into MAINTAINER")
-
-
- ##############################
- # Test for valid SECTION
- #
- # if Correct section: True section name is valid
- # False section name is invalid, no suggestion
- # string section name is invalid, better name suggested
- #
- valid_sections = {
- # Current Section Correct section
- "apps" : True,
- "audio" : True,
- "base" : True,
- "console/games" : True,
- "console/net" : "console/network",
- "console/network" : True,
- "console/utils" : True,
- "devel" : True,
- "developing" : "devel",
- "devel/python" : True,
- "fonts" : True,
- "games" : True,
- "games/libs" : True,
- "gnome/base" : True,
- "gnome/libs" : True,
- "gpe" : True,
- "gpe/libs" : True,
- "gui" : False,
- "libc" : "libs",
- "libs" : True,
- "libs/net" : True,
- "multimedia" : True,
- "net" : "network",
- "NET" : "network",
- "network" : True,
- "opie/applets" : True,
- "opie/applications" : True,
- "opie/base" : True,
- "opie/codecs" : True,
- "opie/decorations" : True,
- "opie/fontfactories" : True,
- "opie/fonts" : True,
- "opie/games" : True,
- "opie/help" : True,
- "opie/inputmethods" : True,
- "opie/libs" : True,
- "opie/multimedia" : True,
- "opie/pim" : True,
- "opie/setting" : "opie/settings",
- "opie/settings" : True,
- "opie/Shell" : False,
- "opie/styles" : True,
- "opie/today" : True,
- "scientific" : True,
- "utils" : True,
- "x11" : True,
- "x11/libs" : True,
- "x11/wm" : True,
- }
- s = testVar("SECTION")
- if s:
- try:
- newsect = valid_sections[s]
- if newsect == False:
- bb.note("SECTION '%s' is not recommended" % s)
- elif newsect != True:
- bb.note("SECTION '%s' is not recommended, better use '%s'" % (s, newsect))
- except:
- bb.note("SECTION '%s' is not recommended" % s)
-
- if not s.islower():
- bb.error("SECTION should only use lower case")
-
-
-
-
- ##############################
- # Test for valid PRIORITY
- #
- valid_priorities = {
- "standard" : True,
- "required" : True,
- "optional" : True,
- "extra" : True,
- }
- s = testVar("PRIORITY")
- if s:
- try:
- newprio = valid_priorities[s]
- if newprio == False:
- bb.note("PRIORITY '%s' is not recommended" % s)
- elif newprio != True:
- bb.note("PRIORITY '%s' is not recommended, better use '%s'" % (s, newprio))
- except:
- bb.note("PRIORITY '%s' is not recommended" % s)
-
- if not s.islower():
- bb.error("PRIORITY should only use lower case")
-
+ pkgname = d.getVar("PN", True)
+
+ ##############################
+ # Test that DESCRIPTION exists
+ #
+ description = d.getVar("DESCRIPTION")
+ if description[1:10] == '{SUMMARY}':
+ bb.warn("%s: DESCRIPTION is not set" % pkgname)
+
+
+ ##############################
+ # Test that HOMEPAGE exists
+ #
+ homepage = d.getVar("HOMEPAGE")
+ if homepage == '':
+ bb.warn("%s: HOMEPAGE is not set" % pkgname)
+ elif not homepage.startswith("http://") and not homepage.startswith("https://"):
+ bb.warn("%s: HOMEPAGE doesn't start with http:// or https://" % pkgname)
+
+
+ ##############################
+ # Test for valid SECTION
+ #
+ section = d.getVar("SECTION")
+ if section == '':
+ bb.warn("%s: SECTION is not set" % pkgname)
+ elif not section.islower():
+ bb.warn("%s: SECTION should only use lower case" % pkgname)
+
+
+ ##############################
+ # Check that all patches have Signed-off-by and Upstream-Status
+ #
+ srcuri = d.getVar("SRC_URI").split()
+ fpaths = (d.getVar('FILESPATH', True) or '').split(':')
+
+ def findPatch(patchname):
+ for dir in fpaths:
+ patchpath = dir + patchname
+ if os.path.exists(patchpath):
+ return patchpath
+
+ def findKey(path, key):
+ ret = True
+ f = file('%s' % path, mode = 'r')
+ line = f.readline()
+ while line:
+ if line.find(key) != -1:
+ ret = False
+ line = f.readline()
+ f.close()
+ return ret
+
+ length = len("file://")
+ for item in srcuri:
+ if item.startswith("file://"):
+ item = item[length:]
+ if item.endswith(".patch") or item.endswith(".diff"):
+ path = findPatch(item)
+ if findKey(path, "Signed-off-by"):
+ bb.warn("%s: %s doesn't have Signed-off-by" % (pkgname, item))
+ if findKey(path, "Upstream-Status"):
+ bb.warn("%s: %s doesn't have Upstream-Status" % (pkgname, item))
+
+
+ ##############################
+ # Check for ${PN} or ${P} usage in SRC_URI or S
+ # Should use ${BPN} or ${BP} instead to avoid breaking multilib
+ #
+ for s in srcuri:
+ if not s.startswith("file://"):
+ if not s.find("{PN}") == -1:
+ bb.warn("%s: should use BPN instead of PN in SRC_URI" % pkgname)
+ if not s.find("{P}") == -1:
+ bb.warn("%s: should use BP instead of P in SRC_URI" % pkgname)
+
+ srcpath = d.getVar("S")
+ if not srcpath.find("{PN}") == -1:
+ bb.warn("%s: should use BPN instead of PN in S" % pkgname)
+ if not srcpath.find("{P}") == -1:
+ bb.warn("%s: should use BP instead of P in S" % pkgname)
}
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index 8a6feaf4d5..77bf0c1c14 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -2,6 +2,7 @@ PREMIRRORS() {
cvs://.*/.* ${SOURCE_MIRROR_URL}
svn://.*/.* ${SOURCE_MIRROR_URL}
git://.*/.* ${SOURCE_MIRROR_URL}
+gitsm://.*/.* ${SOURCE_MIRROR_URL}
hg://.*/.* ${SOURCE_MIRROR_URL}
bzr://.*/.* ${SOURCE_MIRROR_URL}
svk://.*/.* ${SOURCE_MIRROR_URL}
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index ea7591855e..9f64ed77e5 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -208,16 +208,18 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
else:
the_files.append(aux_files_pattern_verbatim % m.group(1))
d.setVar('FILES_' + pkg, " ".join(the_files))
- if extra_depends != '':
- d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- d.setVar('DESCRIPTION_' + pkg, description % on)
- d.setVar('SUMMARY_' + pkg, summary % on)
- if postinst:
- d.setVar('pkg_postinst_' + pkg, postinst)
- if postrm:
- d.setVar('pkg_postrm_' + pkg, postrm)
else:
d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
+ if extra_depends != '':
+ d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
+ if not d.getVar('DESCRIPTION_' + pkg, True):
+ d.setVar('DESCRIPTION_' + pkg, description % on)
+ if not d.getVar('SUMMARY_' + pkg, True):
+ d.setVar('SUMMARY_' + pkg, summary % on)
+ if postinst:
+ d.setVar('pkg_postinst_' + pkg, postinst)
+ if postrm:
+ d.setVar('pkg_postrm_' + pkg, postrm)
if callable(hook):
hook(f, pkg, file_regex, output_pattern, m.group(1))
@@ -237,6 +239,66 @@ python () {
d.appendVarFlag('do_package', 'deptask', " do_packagedata")
}
+# Get a list of files from file vars by searching files under current working directory
+# The list contains symlinks, directories and normal files.
+def files_from_filevars(filevars):
+ import os,glob
+ cpath = oe.cachedpath.CachedPath()
+ files = []
+ for f in filevars:
+ if os.path.isabs(f):
+ f = '.' + f
+ if not f.startswith("./"):
+ f = './' + f
+ globbed = glob.glob(f)
+ if globbed:
+ if [ f ] != globbed:
+ files += globbed
+ continue
+ files.append(f)
+
+ for f in files:
+ if not cpath.islink(f):
+ if cpath.isdir(f):
+ newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
+ if newfiles:
+ files += newfiles
+
+ return files
+
+# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
+def get_conffiles(pkg, d):
+ pkgdest = d.getVar('PKGDEST', True)
+ root = os.path.join(pkgdest, pkg)
+ cwd = os.getcwd()
+ os.chdir(root)
+
+ conffiles = d.getVar('CONFFILES_%s' % pkg, True);
+ if conffiles == None:
+ conffiles = d.getVar('CONFFILES', True)
+ if conffiles == None:
+ conffiles = ""
+ conffiles = conffiles.split()
+ conf_orig_list = files_from_filevars(conffiles)
+
+ # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
+ conf_list = []
+ for f in conf_orig_list:
+ if os.path.isdir(f):
+ continue
+ if os.path.islink(f):
+ continue
+ if not os.path.exists(f):
+ continue
+ conf_list.append(f)
+
+ # Remove the leading './'
+ for i in range(0, len(conf_list)):
+ conf_list[i] = conf_list[i][1:]
+
+ os.chdir(cwd)
+ return conf_list
+
def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
@@ -374,6 +436,9 @@ def get_package_additional_metadata (pkg_type, d):
def runtime_mapping_rename (varname, pkg, d):
#bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
+ if bb.data.inherits_class('packagegroup', d):
+ return
+
new_depends = {}
deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
for depend in deps:
@@ -389,28 +454,53 @@ def runtime_mapping_rename (varname, pkg, d):
#
python package_get_auto_pr() {
- # per recipe PRSERV_HOST
+ import oe.prservice
+ import re
+
+ # Support per recipe PRSERV_HOST
pn = d.getVar('PN', True)
host = d.getVar("PRSERV_HOST_" + pn, True)
if not (host is None):
d.setVar("PRSERV_HOST", host)
- if d.getVar('PRSERV_HOST', True):
- try:
- auto_pr=prserv_get_pr_auto(d)
- except Exception as e:
- bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
- if auto_pr is None:
- if d.getVar('PRSERV_LOCKDOWN', True):
- bb.fatal("Can NOT get PRAUTO from lockdown exported file")
- else:
- bb.fatal("Can NOT get PRAUTO from remote PR service")
- return
- d.setVar('PRAUTO',str(auto_pr))
- else:
- pkgv = d.getVar("PKGV", True)
+ pkgv = d.getVar("PKGV", True)
+
+ # PR Server not active, handle AUTOINC
+ if not d.getVar('PRSERV_HOST', True):
if 'AUTOINC' in pkgv:
d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
+ return
+
+ auto_pr = None
+ pv = d.getVar("PV", True)
+ version = d.getVar("PRAUTOINX", True)
+ pkgarch = d.getVar("PACKAGE_ARCH", True)
+ checksum = d.getVar("BB_TASKHASH", True)
+
+ if d.getVar('PRSERV_LOCKDOWN', True):
+ auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
+ if auto_pr is None:
+ bb.fatal("Can NOT get PRAUTO from lockdown exported file")
+ d.setVar('PRAUTO',str(auto_pr))
+ return
+
+ try:
+ conn = d.getVar("__PRSERV_CONN", True)
+ if conn is None:
+ conn = oe.prservice.prserv_make_conn(d)
+ if conn is not None:
+ if "AUTOINC" in pkgv:
+ srcpv = bb.fetch2.get_srcrev(d)
+ base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
+ value = conn.getPR(base_ver, pkgarch, srcpv)
+ d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
+
+ auto_pr = conn.getPR(version, pkgarch, checksum)
+ except Exception as e:
+ bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
+ if auto_pr is None:
+ bb.fatal("Can NOT get PRAUTO from remote PR service")
+ d.setVar('PRAUTO',str(auto_pr))
}
LOCALEBASEPN ??= "${PN}"
@@ -928,13 +1018,7 @@ python split_and_strip_files () {
for f in kernmods:
sfiles.append((f, 16, strip))
-
- import multiprocessing
- nproc = multiprocessing.cpu_count()
- pool = bb.utils.multiprocessingpool(nproc)
- processed = list(pool.imap(oe.package.runstrip, sfiles))
- pool.close()
- pool.join()
+ oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
#
# End of strip
@@ -985,26 +1069,9 @@ python populate_packages () {
filesvar.replace("//", "/")
origfiles = filesvar.split()
- files = []
- for file in origfiles:
- if os.path.isabs(file):
- file = '.' + file
- if not file.startswith("./"):
- file = './' + file
- globbed = glob.glob(file)
- if globbed:
- if [ file ] != globbed:
- files += globbed
- continue
- files.append(file)
+ files = files_from_filevars(origfiles)
for file in files:
- if not cpath.islink(file):
- if cpath.isdir(file):
- newfiles = [ os.path.join(file,x) for x in os.listdir(file) ]
- if newfiles:
- files += newfiles
- continue
if (not cpath.islink(file)) and (not cpath.exists(file)):
continue
if file in seen:
@@ -1119,6 +1186,18 @@ python package_fixsymlinks () {
d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
+
+python package_package_name_hook() {
+ """
+ A package_name_hook function can be used to rewrite the package names by
+ changing PKG. For an example, see debian.bbclass.
+ """
+ pass
+}
+
+EXPORT_FUNCTIONS package_name_hook
+
+
PKGDESTWORK = "${WORKDIR}/pkgdata"
python emit_pkgdata() {
@@ -1134,11 +1213,11 @@ python emit_pkgdata() {
val = d.getVar('%s_%s' % (var, pkg), True)
if val:
f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
- return
+ return val
val = d.getVar('%s' % (var), True)
if val:
f.write('%s: %s\n' % (var, encode(val)))
- return
+ return val
def write_extra_pkgs(variants, pn, packages, pkgdatadir):
for variant in variants:
@@ -1207,7 +1286,7 @@ python emit_pkgdata() {
write_if_exists(sf, pkg, 'DESCRIPTION')
write_if_exists(sf, pkg, 'SUMMARY')
write_if_exists(sf, pkg, 'RDEPENDS')
- write_if_exists(sf, pkg, 'RPROVIDES')
+ rprov = write_if_exists(sf, pkg, 'RPROVIDES')
write_if_exists(sf, pkg, 'RRECOMMENDS')
write_if_exists(sf, pkg, 'RSUGGESTS')
write_if_exists(sf, pkg, 'RREPLACES')
@@ -1232,9 +1311,12 @@ python emit_pkgdata() {
sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
sf.close()
- # Symlinks needed for reverse lookups (from the final package name)
- subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
- oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
+ # Symlinks needed for rprovides lookup
+ if rprov:
+ for p in rprov.strip().split():
+ subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
+ bb.utils.mkdirhier(os.path.dirname(subdata_sym))
+ oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
if not allow_empty:
@@ -1243,6 +1325,10 @@ python emit_pkgdata() {
os.chdir(root)
g = glob('*')
if g or allow_empty == "1":
+ # Symlinks needed for reverse lookups (from the final package name)
+ subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
+ oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
+
packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
open(packagedfile, 'w').close()
@@ -1254,7 +1340,7 @@ python emit_pkgdata() {
bb.utils.unlockfile(lf)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -1292,12 +1378,7 @@ python package_do_filedeps() {
for files in chunks(pkgfiles[pkg], 100):
pkglist.append((pkg, files, rpmdeps, pkgdest))
- import multiprocessing
- nproc = multiprocessing.cpu_count()
- pool = bb.utils.multiprocessingpool(nproc)
- processed = list(pool.imap(oe.package.filedeprunner, pkglist))
- pool.close()
- pool.join()
+ processed = oe.utils.multiprocess_exec( pkglist, oe.package.filedeprunner)
provides_files = {}
requires_files = {}
@@ -1326,11 +1407,12 @@ python package_do_filedeps() {
d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
}
-SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs"
-SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs"
+SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2"
+SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
python package_do_shlibs() {
import re, pipes
+ import subprocess as sub
exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
if exclude_shlibs:
@@ -1353,65 +1435,48 @@ python package_do_shlibs() {
pkgdest = d.getVar('PKGDEST', True)
- shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
# Take shared lock since we're only reading, not writing
lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
- def read_shlib_providers():
- list_re = re.compile('^(.*)\.list$')
- # Go from least to most specific since the last one found wins
- for dir in reversed(shlibs_dirs):
- bb.debug(2, "Reading shlib providers in %s" % (dir))
- if not os.path.exists(dir):
- continue
- for file in os.listdir(dir):
- m = list_re.match(file)
- if m:
- dep_pkg = m.group(1)
- fd = open(os.path.join(dir, file))
- lines = fd.readlines()
- fd.close()
- ver_file = os.path.join(dir, dep_pkg + '.ver')
- lib_ver = None
- if os.path.exists(ver_file):
- fd = open(ver_file)
- lib_ver = fd.readline().rstrip()
- fd.close()
- for l in lines:
- shlib_provider[l.rstrip()] = (dep_pkg, lib_ver)
-
- def linux_so(file):
+ def linux_so(file, needed, sonames, renames, pkgver):
needs_ldconfig = False
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
+ rpath = []
+ for l in lines:
+ m = re.match("\s+RPATH\s+([^\s]*)", l)
+ if m:
+ rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
+ rpath = map(os.path.normpath, rpaths)
for l in lines:
m = re.match("\s+NEEDED\s+([^\s]*)", l)
if m:
- if m.group(1) not in needed[pkg]:
- needed[pkg].append(m.group(1))
- if m.group(1) not in needed_from:
- needed_from[m.group(1)] = []
- needed_from[m.group(1)].append(file)
+ dep = m.group(1)
+ if dep not in needed[pkg]:
+ needed[pkg].append((dep, file, rpath))
m = re.match("\s+SONAME\s+([^\s]*)", l)
if m:
this_soname = m.group(1)
- if not this_soname in sonames:
+ prov = (this_soname, ldir, pkgver)
+ if not prov in sonames:
# if library is private (only used by package) then do not build shlib for it
if not private_libs or this_soname not in private_libs:
- sonames.append(this_soname)
+ sonames.append(prov)
if libdir_re.match(os.path.dirname(file)):
needs_ldconfig = True
if snap_symlinks and (os.path.basename(file) != this_soname):
renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
return needs_ldconfig
- def darwin_so(file):
+ def darwin_so(file, needed, sonames, renames, pkgver):
if not os.path.exists(file):
return
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
def get_combinations(base):
#
@@ -1433,46 +1498,32 @@ python package_do_shlibs() {
combos = get_combinations(name)
for combo in combos:
if not combo in sonames:
- sonames.append(combo)
+ prov = (combo, ldir, pkgver)
+ sonames.append(prov)
if file.endswith('.dylib') or file.endswith('.so'):
- lafile = file.replace(os.path.join(pkgdest, pkg), d.getVar('PKGD', True))
- # Drop suffix
- lafile = lafile.rsplit(".",1)[0]
- lapath = os.path.dirname(lafile)
- lafile = os.path.basename(lafile)
- # Find all combinations
- combos = get_combinations(lafile)
- for combo in combos:
- if os.path.exists(lapath + '/' + combo + '.la'):
- break
- lafile = lapath + '/' + combo + '.la'
-
- #bb.note("Foo2: %s" % lafile)
- #bb.note("Foo %s" % file)
- if os.path.exists(lafile):
- fd = open(lafile, 'r')
- lines = fd.readlines()
- fd.close()
- for l in lines:
- m = re.match("\s*dependency_libs=\s*'(.*)'", l)
- if m:
- deps = m.group(1).split(" ")
- for dep in deps:
- #bb.note("Trying %s for %s" % (dep, pkg))
- name = None
- if dep.endswith(".la"):
- name = os.path.basename(dep).replace(".la", "")
- elif dep.startswith("-l"):
- name = dep.replace("-l", "lib")
- if pkg not in needed:
- needed[pkg] = []
- if name and name not in needed[pkg]:
- needed[pkg].append(name)
- if name not in needed_from:
- needed_from[name] = []
- if lafile and lafile not in needed_from[name]:
- needed_from[name].append(lafile)
- #bb.note("Adding %s for %s" % (name, pkg))
+ rpath = []
+ p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned succesfully, process stderr for results
+ if p.returncode == 0:
+ for l in err.split("\n"):
+ l = l.strip()
+ if l.startswith('path '):
+ rpath.append(l.split()[1])
+
+ p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned succesfully, process stderr for results
+ if p.returncode == 0:
+ for l in err.split("\n"):
+ l = l.strip()
+ if not l or l.endswith(":"):
+ continue
+ if "is not an object file" in l:
+ continue
+ name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
+ if name and name not in needed[pkg]:
+ needed[pkg].append((name, file, []))
if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
snap_symlinks = True
@@ -1485,9 +1536,7 @@ python package_do_shlibs() {
use_ldconfig = False
needed = {}
- needed_from = {}
- shlib_provider = {}
- read_shlib_providers()
+ shlib_provider = oe.package.read_shlib_providers(d)
for pkg in packages.split():
private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
@@ -1509,9 +1558,9 @@ python package_do_shlibs() {
if cpath.islink(file):
continue
if targetos == "darwin" or targetos == "darwin8":
- darwin_so(file)
+ darwin_so(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
- ldconfig = linux_so(file)
+ ldconfig = linux_so(file, needed, sonames, renames, pkgver)
needs_ldconfig = needs_ldconfig or ldconfig
for (old, new) in renames:
bb.note("Renaming %s to %s" % (old, new))
@@ -1519,20 +1568,18 @@ python package_do_shlibs() {
pkgfiles[pkg].remove(old)
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
- shver_file = os.path.join(shlibswork_dir, pkg + ".ver")
if len(sonames):
fd = open(shlibs_file, 'w')
for s in sonames:
- if s in shlib_provider:
- (old_pkg, old_pkgver) = shlib_provider[s]
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
if old_pkg != pkg:
- bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s, pkg, pkgver))
- bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s))
- fd.write(s + '\n')
- shlib_provider[s] = (pkg, pkgver)
- fd.close()
- fd = open(shver_file, 'w')
- fd.write(pkgver + '\n')
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
fd.close()
if needs_ldconfig and use_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
@@ -1547,6 +1594,7 @@ python package_do_shlibs() {
assumed_libs = d.getVar('ASSUME_SHLIBS', True)
if assumed_libs:
+ libdir = d.getVar("libdir", True)
for e in assumed_libs.split():
l, dep_pkg = e.split(":")
lib_ver = None
@@ -1554,7 +1602,11 @@ python package_do_shlibs() {
if len(dep_pkg) == 2:
lib_ver = dep_pkg[1]
dep_pkg = dep_pkg[0]
- shlib_provider[l] = (dep_pkg, lib_ver)
+ if l not in shlib_provider:
+ shlib_provider[l] = {}
+ shlib_provider[l][libdir] = (dep_pkg, lib_ver)
+
+ libsearchpath = [d.getVar('libdir', True), d.getVar('base_libdir', True)]
for pkg in packages.split():
bb.debug(2, "calculating shlib requirements for %s" % pkg)
@@ -1566,25 +1618,34 @@ python package_do_shlibs() {
# /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
# but skipping it is still better alternative than providing own
# version and then adding runtime dependency for the same system library
- if private_libs and n in private_libs:
- bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n))
+ if private_libs and n[0] in private_libs:
+ bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
continue
- if n in shlib_provider.keys():
- (dep_pkg, ver_needed) = shlib_provider[n]
+ if n[0] in shlib_provider.keys():
+ shlib_provider_path = list()
+ for k in shlib_provider[n[0]].keys():
+ shlib_provider_path.append(k)
+ match = None
+ for p in n[2] + shlib_provider_path + libsearchpath:
+ if p in shlib_provider[n[0]]:
+ match = p
+ break
+ if match:
+ (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
- bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n, dep_pkg, needed_from[n]))
+ bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
- if dep_pkg == pkg:
- continue
+ if dep_pkg == pkg:
+ continue
- if ver_needed:
- dep = "%s (>= %s)" % (dep_pkg, ver_needed)
- else:
- dep = dep_pkg
- if not dep in deps:
- deps.append(dep)
- else:
- bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n, needed_from[n]))
+ if ver_needed:
+ dep = "%s (>= %s)" % (dep_pkg, ver_needed)
+ else:
+ dep = dep_pkg
+ if not dep in deps:
+ deps.append(dep)
+ continue
+ bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
if os.path.exists(deps_file):
@@ -1922,9 +1983,9 @@ python do_package () {
# Optimisations
###########################################################################
- # Contunually rexpanding complex expressions is inefficient, particularly when
- # we write to the datastore and invalidate the expansion cache. This code
- # pre-expands some frequently used variables
+ # Continually expanding complex expressions is inefficient, particularly
+ # when we write to the datastore and invalidate the expansion cache. This
+ # code pre-expands some frequently used variables
def expandVar(x, d):
d.setVar(x, d.getVar(x, True))
@@ -2013,7 +2074,4 @@ def mapping_rename_hook(d):
runtime_mapping_rename("RDEPENDS", pkg, d)
runtime_mapping_rename("RRECOMMENDS", pkg, d)
runtime_mapping_rename("RSUGGESTS", pkg, d)
- runtime_mapping_rename("RPROVIDES", pkg, d)
- runtime_mapping_rename("RREPLACES", pkg, d)
- runtime_mapping_rename("RCONFLICTS", pkg, d)
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 7bc29df165..9d7c59ba53 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -239,17 +239,30 @@ python do_package_deb () {
scriptvar = localdata.getVar('pkg_%s' % script, True)
if not scriptvar:
continue
+ scriptvar = scriptvar.strip()
try:
scriptfile = open(os.path.join(controldir, script), 'w')
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
- scriptfile.write("#!/bin/sh\n")
- scriptfile.write(scriptvar)
+
+ if scriptvar.startswith("#!"):
+ pos = scriptvar.find("\n") + 1
+ scriptfile.write(scriptvar[:pos])
+ else:
+ pos = 0
+ scriptfile.write("#!/bin/sh\n")
+
+ # Prevent the prerm/postrm scripts from being run during an upgrade
+ if script in ('prerm', 'postrm'):
+ scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
+
+ scriptfile.write(scriptvar[pos:])
+ scriptfile.write('\n')
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
- conffiles_str = localdata.getVar("CONFFILES", True)
+ conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
try:
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 2949d1d2e0..dba68042ac 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -63,7 +63,32 @@ python do_package_ipk () {
bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
arch = localdata.getVar('PACKAGE_ARCH', True)
- pkgoutdir = "%s/%s" % (outdir, arch)
+
+ if localdata.getVar('IPK_HIERARCHICAL_FEED') == "1":
+ # Spread packages across subdirectories so each isn't too crowded
+ if pkgname.startswith('lib'):
+ pkg_prefix = 'lib' + pkgname[3]
+ else:
+ pkg_prefix = pkgname[0]
+
+ # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
+ # together. These package suffixes are taken from the definitions of
+ # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
+ if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
+ pkg_subdir = pkgname[:-4]
+ elif pkgname.endswith('-staticdev'):
+ pkg_subdir = pkgname[:-10]
+ elif pkgname.endswith('-locale'):
+ pkg_subdir = pkgname[:-7]
+ elif '-locale-' in pkgname:
+ pkg_subdir = pkgname[:pkgname.find('-locale-')]
+ else:
+ pkg_subdir = pkgname
+
+ pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
+ else:
+ pkgoutdir = "%s/%s" % (outdir, arch)
+
bb.utils.mkdirhier(pkgoutdir)
os.chdir(root)
cleanupcontrol(root)
@@ -182,7 +207,7 @@ python do_package_ipk () {
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
if rconflicts:
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
- src_uri = localdata.getVar("SRC_URI", True) or "None"
+ src_uri = localdata.getVar("SRC_URI", True).strip() or "None"
if src_uri:
src_uri = re.sub("\s+", " ", src_uri)
ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
@@ -201,7 +226,7 @@ python do_package_ipk () {
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
- conffiles_str = localdata.getVar("CONFFILES", True)
+ conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
try:
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index 1ff2b36434..e305e8b4ab 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -185,7 +185,7 @@ python write_specfile () {
if not len(depends_dict[dep]):
array.append("%s: %s" % (tag, dep))
- def walk_files(walkpath, target, conffiles):
+ def walk_files(walkpath, target, conffiles, dirfiles):
# We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
# when packaging. We just ignore these files which are created in
# packages-split/ and not package/
@@ -196,11 +196,34 @@ python write_specfile () {
path = rootpath.replace(walkpath, "")
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
continue
- for dir in dirs:
- if dir == "CONTROL" or dir == "DEBIAN":
- continue
- # All packages own the directories their files are in...
- target.append('%dir "' + path + '/' + dir + '"')
+
+ # Treat all symlinks to directories as normal files.
+ # os.walk() lists them as directories.
+ def move_to_files(dir):
+ if os.path.islink(os.path.join(rootpath, dir)):
+ files.append(dir)
+ return True
+ else:
+ return False
+ dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
+
+ # Directory handling can happen in two ways, either DIRFILES is not set at all
+ # in which case we fall back to the older behaviour of packages owning all their
+ # directories
+ if dirfiles is None:
+ for dir in dirs:
+ if dir == "CONTROL" or dir == "DEBIAN":
+ continue
+ # All packages own the directories their files are in...
+ target.append('%dir "' + path + '/' + dir + '"')
+ else:
+ # packages own only empty directories or explict directory.
+ # This will prevent the overlapping of security permission.
+ if path and not files and not dirs:
+ target.append('%dir "' + path + '"')
+ elif path and path in dirfiles:
+ target.append('%dir "' + path + '"')
+
for file in files:
if file == "CONTROL" or file == "DEBIAN":
continue
@@ -293,6 +316,7 @@ python write_specfile () {
spec_files_bottom = []
perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
+ extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA", True) or "0") == "1"
for pkg in packages.split():
localdata = bb.data.createCopy(d)
@@ -310,7 +334,10 @@ python write_specfile () {
bb.data.update_data(localdata)
- conffiles = (localdata.getVar('CONFFILES', True) or "").split()
+ conffiles = get_conffiles(pkg, d)
+ dirfiles = localdata.getVar('DIRFILES', True)
+ if dirfiles is not None:
+ dirfiles = dirfiles.split()
splitname = strip_multilib(pkgname, d)
@@ -367,12 +394,14 @@ python write_specfile () {
srcrpostrm = splitrpostrm
file_list = []
- walk_files(root, file_list, conffiles)
+ walk_files(root, file_list, conffiles, dirfiles)
if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
bb.note("Not creating empty RPM package for %s" % splitname)
else:
bb.note("Creating RPM package for %s" % splitname)
spec_files_top.append('%files')
+ if extra_pkgdata:
+ package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
spec_files_top.append('%defattr(-,-,-,-)')
if file_list:
bb.note("Creating RPM package for %s" % splitname)
@@ -474,11 +503,13 @@ python write_specfile () {
# Now process files
file_list = []
- walk_files(root, file_list, conffiles)
+ walk_files(root, file_list, conffiles, dirfiles)
if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
bb.note("Not creating empty RPM package for %s" % splitname)
else:
spec_files_bottom.append('%%files -n %s' % splitname)
+ if extra_pkgdata:
+ package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
spec_files_bottom.append('%defattr(-,-,-,-)')
if file_list:
bb.note("Creating RPM package for %s" % splitname)
@@ -652,15 +683,16 @@ python do_package_rpm () {
# Setup the rpmbuild arguments...
rpmbuild = d.getVar('RPMBUILD', True)
targetsys = d.getVar('TARGET_SYS', True)
- targetvendor = d.getVar('TARGET_VENDOR', True)
+ targetvendor = d.getVar('HOST_VENDOR', True)
package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
- if package_arch not in "all any noarch".split() and not package_arch.endswith("_nativesdk"):
+ sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX', True) or "nativesdk").replace("-", "_")
+ if package_arch not in "all any noarch".split() and not package_arch.endswith(sdkpkgsuffix):
ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
else:
d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
- pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${TARGET_VENDOR}-${TARGET_OS}')
+ pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}')
magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
bb.utils.mkdirhier(pkgwritedir)
os.chmod(pkgwritedir, 0755)
@@ -668,6 +700,7 @@ python do_package_rpm () {
cmd = rpmbuild
cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
+ cmd = cmd + " --define '_builddir " + d.getVar('S', True) + "'"
cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
if perfiledeps:
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
index 6606bc6f71..56cfead82a 100644
--- a/meta/classes/packagegroup.bbclass
+++ b/meta/classes/packagegroup.bbclass
@@ -9,7 +9,12 @@ PACKAGES = "${PN}"
# By default, packagegroup packages do not depend on a certain architecture.
# Only if dependencies are modified by MACHINE_FEATURES, packages
# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
-inherit allarch
+PACKAGE_ARCH ?= "all"
+
+# Fully expanded - so it applies the overrides as well
+PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
+
+inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')}
# This automatically adds -dbg and -dev flavours of all PACKAGES
# to the list. Their dependencies (RRECOMMENDS) are handled as usual
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 86c65b3b8d..1e2aab0418 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -177,6 +177,7 @@ python patch_do_patch() {
bb.fatal(str(e))
bb.utils.remove(process_tmpdir, True)
+ del os.environ['TMPDIR']
}
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
index b8d75bd38c..9e6ecc8a53 100644
--- a/meta/classes/pixbufcache.bbclass
+++ b/meta/classes/pixbufcache.bbclass
@@ -67,6 +67,11 @@ pixbufcache_sstate_postinst() {
# Packages that use this class should extend this variable with their runtime
# dependencies.
PIXBUFCACHE_SYSROOT_DEPS = ""
-PIXBUFCACHE_SYSROOT_DEPS_class-native = "${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene"
+PIXBUFCACHE_SYSROOT_DEPS_class-native = "\
+ ${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} \
+ glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene \
+ libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene \
+ harfbuzz-native:do_populate_sysroot_setscene \
+ "
do_populate_sysroot_setscene[depends] += "${PIXBUFCACHE_SYSROOT_DEPS}"
do_populate_sysroot[depends] += "${@d.getVar('PIXBUFCACHE_SYSROOT_DEPS', True).replace('_setscene','')}"
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 35d837d5df..5c0769373a 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -20,7 +20,7 @@ def complementary_globs(featurevar, d):
SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
-inherit populate_sdk_${IMAGE_PKGTYPE}
+inherit rootfs_${IMAGE_PKGTYPE}
SDK_DIR = "${WORKDIR}/sdk"
SDK_OUTPUT = "${SDK_DIR}/image"
@@ -32,12 +32,15 @@ SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_TARGET_TASK ?= "packagegroup-core-standalone-sdk-target packagegroup-core-standalone-sdk-target-dbg"
+TOOLCHAIN_TARGET_TASK ?= " \
+ ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
+ ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target-dbg')} \
+ "
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native sed-native"
+SDK_DEPENDS = "virtual/fakeroot-native"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
# could be set to the MACHINE_ARCH
@@ -48,6 +51,20 @@ PID = "${@os.getpid()}"
EXCLUDE_FROM_WORLD = "1"
SDK_PACKAGING_FUNC ?= "create_shar"
+SDK_POST_INSTALL_COMMAND ?= ""
+SDK_RELOCATE_AFTER_INSTALL ?= "1"
+
+SDK_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.manifest"
+python write_target_sdk_manifest () {
+ from oe.sdk import sdk_list_installed_packages
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_MANIFEST", True))
+ if not os.path.exists(sdkmanifestdir):
+ bb.utils.mkdirhier(sdkmanifestdir)
+ with open(d.getVar('SDK_MANIFEST', True), 'w') as output:
+ output.write(sdk_list_installed_packages(d, True, 'ver'))
+}
+
+POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
fakeroot python do_populate_sdk() {
from oe.sdk import populate_sdk
@@ -55,7 +72,15 @@ fakeroot python do_populate_sdk() {
pn = d.getVar('PN', True)
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
-
+ runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
+
+ ld = bb.data.createCopy(d)
+ ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
+ runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
+ runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
+ d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK", True))
+ d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", True))
+
# create target/host SDK manifests
create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
@@ -64,25 +89,8 @@ fakeroot python do_populate_sdk() {
populate_sdk(d)
- # Handle multilibs in the SDK environment, siteconfig, etc files...
- localdata = bb.data.createCopy(d)
-
- # make sure we only use the WORKDIR value from 'd', or it can change
- localdata.setVar('WORKDIR', d.getVar('WORKDIR', True))
-
- # make sure we only use the SDKTARGETSYSROOT value from 'd'
- localdata.setVar('SDKTARGETSYSROOT', d.getVar('SDKTARGETSYSROOT', True))
-
# Process DEFAULTTUNE
- bb.build.exec_func("create_sdk_files", localdata)
-
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
- for item in variants.split():
- # Load overrides from 'd' to avoid having to reset the value...
- overrides = d.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
- localdata.setVar("OVERRIDES", overrides)
- bb.data.update_data(localdata)
- bb.build.exec_func("create_sdk_files", localdata)
+ bb.build.exec_func("create_sdk_files", d)
bb.build.exec_func("tar_sdk", d)
@@ -90,14 +98,6 @@ fakeroot python do_populate_sdk() {
}
fakeroot create_sdk_files() {
- # Setup site file for external use
- toolchain_create_sdk_siteconfig ${SDK_OUTPUT}/${SDKPATH}/site-config-${REAL_MULTIMACH_TARGET_SYS}
-
- toolchain_create_sdk_env_script ${SDK_OUTPUT}/${SDKPATH}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
-
- # Add version information
- toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${REAL_MULTIMACH_TARGET_SYS}
-
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
# Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
@@ -116,193 +116,27 @@ fakeroot tar_sdk() {
}
fakeroot create_shar() {
- cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
-#!/bin/bash
+ # copy in the template shar extractor script
+ cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
-INST_ARCH=$(uname -m | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
-SDK_ARCH=$(echo ${SDK_ARCH} | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
+ rm -f ${T}/post_install_command
-if [ "$INST_ARCH" != "$SDK_ARCH" ]; then
- # Allow for installation of ix86 SDK on x86_64 host
- if [ "$INST_ARCH" != x86_64 -o "$SDK_ARCH" != ix86 ]; then
- echo "Error: Installation machine not supported!"
- exit 1
- fi
-fi
-
-DEFAULT_INSTALL_DIR="${SDKPATH}"
-SUDO_EXEC=""
-target_sdk_dir=""
-answer=""
-relocate=1
-savescripts=0
-verbose=0
-while getopts ":yd:DRS" OPT; do
- case $OPT in
- y)
- answer="Y"
- [ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
- ;;
- d)
- target_sdk_dir=$OPTARG
- ;;
- D)
- verbose=1
- ;;
- R)
- relocate=0
- savescripts=1
- ;;
- S)
- savescripts=1
- ;;
- *)
- echo "Usage: $(basename $0) [-y] [-d <dir>]"
- echo " -y Automatic yes to all prompts"
- echo " -d <dir> Install the SDK to <dir>"
- echo "======== Advanced DEBUGGING ONLY OPTIONS ========"
- echo " -S Save relocation scripts"
- echo " -R Do not relocate executables"
- echo " -D use set -x to see what is going on"
- exit 1
- ;;
- esac
-done
-
-if [ $verbose = 1 ] ; then
- set -x
-fi
-
-printf "Enter target directory for SDK (default: $DEFAULT_INSTALL_DIR): "
-if [ "$target_sdk_dir" = "" ]; then
- read target_sdk_dir
- [ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
-else
- echo "$target_sdk_dir"
-fi
-
-eval target_sdk_dir=$(echo "$target_sdk_dir"|sed 's/ /\\ /g')
-if [ -d "$target_sdk_dir" ]; then
- target_sdk_dir=$(cd "$target_sdk_dir"; pwd)
-else
- target_sdk_dir=$(readlink -m "$target_sdk_dir")
-fi
-
-if [ -n "$(echo $target_sdk_dir|grep ' ')" ]; then
- echo "The target directory path ($target_sdk_dir) contains spaces. Abort!"
- exit 1
-fi
-
-if [ -e "$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}" ]; then
- echo "The directory \"$target_sdk_dir\" already contains a SDK for this architecture."
- printf "If you continue, existing files will be overwritten! Proceed[y/N]?"
-
- default_answer="n"
-else
- printf "You are about to install the SDK to \"$target_sdk_dir\". Proceed[Y/n]?"
-
- default_answer="y"
-fi
-
-if [ "$answer" = "" ]; then
- read answer
- [ "$answer" = "" ] && answer="$default_answer"
-else
- echo $answer
-fi
-
-if [ "$answer" != "Y" -a "$answer" != "y" ]; then
- echo "Installation aborted!"
- exit 1
-fi
-
-# Try to create the directory (this will not succeed if user doesn't have rights)
-mkdir -p $target_sdk_dir >/dev/null 2>&1
-
-# if don't have the right to access dir, gain by sudo
-if [ ! -x $target_sdk_dir -o ! -w $target_sdk_dir -o ! -r $target_sdk_dir ]; then
- SUDO_EXEC=$(which "sudo")
- if [ -z $SUDO_EXEC ]; then
- echo "No command 'sudo' found, please install sudo first. Abort!"
- exit 1
+ if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
+ cp ${COREBASE}/meta/files/toolchain-shar-relocate.sh ${T}/post_install_command
fi
+ cat << "EOF" >> ${T}/post_install_command
+${SDK_POST_INSTALL_COMMAND}
+EOF
+ sed -i -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
- # test sudo could gain root right
- $SUDO_EXEC pwd >/dev/null 2>&1
- [ $? -ne 0 ] && echo "Sorry, you are not allowed to execute as root." && exit 1
-
- # now that we have sudo rights, create the directory
- $SUDO_EXEC mkdir -p $target_sdk_dir >/dev/null 2>&1
-fi
-
-payload_offset=$(($(grep -na -m1 "^MARKER:$" $0|cut -d':' -f1) + 1))
-
-printf "Extracting SDK..."
-tail -n +$payload_offset $0| $SUDO_EXEC tar xj -C $target_sdk_dir
-echo "done"
-
-printf "Setting it up..."
-# fix environment paths
-for env_setup_script in `ls $target_sdk_dir/environment-setup-*`; do
- $SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g" -i $env_setup_script
-done
-
-# fix dynamic loader paths in all ELF SDK binaries
-native_sysroot=$($SUDO_EXEC cat $env_setup_script |grep 'OECORE_NATIVE_SYSROOT='|cut -d'=' -f2|tr -d '"')
-dl_path=$($SUDO_EXEC find $native_sysroot/lib -name "ld-linux*")
-if [ "$dl_path" = "" ] ; then
- echo "SDK could not be set up. Relocate script unable to find ld-linux.so. Abort!"
- exit 1
-fi
-executable_files=$($SUDO_EXEC find $native_sysroot -type f -perm /111)
-
-tdir=`mktemp -d`
-if [ x$tdir = x ] ; then
- echo "SDK relocate failed, could not create a temporary directory"
- exit 1
-fi
-echo "#!/bin/bash" > $tdir/relocate_sdk.sh
-echo exec ${env_setup_script%/*}/relocate_sdk.py $target_sdk_dir $dl_path $executable_files >> $tdir/relocate_sdk.sh
-$SUDO_EXEC mv $tdir/relocate_sdk.sh ${env_setup_script%/*}/relocate_sdk.sh
-$SUDO_EXEC chmod 755 ${env_setup_script%/*}/relocate_sdk.sh
-rm -rf $tdir
-if [ $relocate = 1 ] ; then
- $SUDO_EXEC ${env_setup_script%/*}/relocate_sdk.sh
- if [ $? -ne 0 ]; then
- echo "SDK could not be set up. Relocate script failed. Abort!"
- exit 1
- fi
-fi
-
-# replace ${SDKPATH} with the new prefix in all text files: configs/scripts/etc
-$SUDO_EXEC find $native_sysroot -type f -exec file '{}' \;|grep ":.*\(ASCII\|script\|source\).*text"|cut -d':' -f1|$SUDO_EXEC xargs sed -i -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g"
-
-# change all symlinks pointing to ${SDKPATH}
-for l in $($SUDO_EXEC find $native_sysroot -type l); do
- $SUDO_EXEC ln -sfn $(readlink $l|$SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:") $l
-done
-
-# find out all perl scripts in $native_sysroot and modify them replacing the
-# host perl with SDK perl.
-for perl_script in $($SUDO_EXEC grep "^#!.*perl" -rl $native_sysroot); do
- $SUDO_EXEC sed -i -e "s:^#! */usr/bin/perl.*:#! /usr/bin/env perl:g" -e \
- "s: /usr/bin/perl: /usr/bin/env perl:g" $perl_script
-done
-
-echo done
-
-# delete the relocating script, so that user is forced to re-run the installer
-# if he/she wants another location for the sdk
-if [ $savescripts = 0 ] ; then
- $SUDO_EXEC rm ${env_setup_script%/*}/relocate_sdk.py ${env_setup_script%/*}/relocate_sdk.sh
-fi
-
-echo "SDK has been successfully set up and is ready to be used."
-
-exit 0
+ # substitute variables
+ sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
+ -e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@OLDEST_KERNEL@#${OLDEST_KERNEL}#g' \
+ -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
+ -e '/@SDK_POST_INSTALL_COMMAND@/d' \
+ ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
-MARKER:
-EOF
# add execution permission
chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
@@ -331,7 +165,7 @@ populate_sdk_log_check() {
}
do_populate_sdk[dirs] = "${TOPDIR}"
-do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])}"
+do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
addtask populate_sdk
diff --git a/meta/classes/populate_sdk_deb.bbclass b/meta/classes/populate_sdk_deb.bbclass
deleted file mode 100644
index acb1f73983..0000000000
--- a/meta/classes/populate_sdk_deb.bbclass
+++ /dev/null
@@ -1,13 +0,0 @@
-do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
-
-DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', True), "i386"]\
- [d.getVar('SDK_ARCH', True) in \
- ["x86", "i486", "i586", "i686", "pentium"]]}"
-
-DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', True), "amd64"]\
- [d.getVar('SDK_ARCH', True) == "x86_64"]}"
-
-do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
-
-# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
-DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
new file mode 100644
index 0000000000..ec1cff0658
--- /dev/null
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -0,0 +1,217 @@
+# Extensible SDK
+
+inherit populate_sdk_base
+
+# NOTE: normally you cannot use task overrides for this kind of thing - this
+# only works because of get_sdk_ext_rdepends()
+
+TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
+ meta-environment-extsdk-${MACHINE} \
+ "
+
+TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
+
+SDK_RDEPENDS_append_task-populate-sdk-ext = " ${SDK_TARGETS}"
+
+SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
+
+SDK_META_CONF_WHITELIST ?= "MACHINE DISTRO PACKAGE_CLASSES"
+
+SDK_TARGETS ?= "${PN}"
+OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
+
+# The files from COREBASE that you want preserved in the COREBASE copied
+# into the sdk. This allows someone to have their own setup scripts in
+# COREBASE be preserved as well as untracked files.
+COREBASE_FILES ?= " \
+ oe-init-build-env \
+ oe-init-build-env-memres \
+ scripts \
+ LICENSE \
+ .templateconf \
+"
+
+SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
+B_task-populate-sdk-ext = "${SDK_DIR}"
+TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+
+python copy_buildsystem () {
+ import re
+ import oe.copy_buildsystem
+
+ oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT', True)
+
+ conf_bbpath = ''
+ conf_initpath = ''
+ core_meta_subdir = ''
+
+ # Copy in all metadata layers + bitbake (as repositories)
+ buildsystem = oe.copy_buildsystem.BuildSystem(d)
+ baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
+ layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers')
+
+ sdkbblayers = []
+ corebase = os.path.basename(d.getVar('COREBASE', True))
+ for layer in layers_copied:
+ if corebase == os.path.basename(layer):
+ conf_bbpath = os.path.join('layers', layer, 'bitbake')
+ else:
+ sdkbblayers.append(layer)
+
+ for path in os.listdir(baseoutpath + '/layers'):
+ relpath = os.path.join('layers', path, oe_init_env_script)
+ if os.path.exists(os.path.join(baseoutpath, relpath)):
+ conf_initpath = relpath
+
+ relpath = os.path.join('layers', path, 'scripts', 'devtool')
+ if os.path.exists(os.path.join(baseoutpath, relpath)):
+ scriptrelpath = os.path.dirname(relpath)
+
+ relpath = os.path.join('layers', path, 'meta')
+ if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
+ core_meta_subdir = relpath
+
+ d.setVar('oe_init_build_env_path', conf_initpath)
+ d.setVar('scriptrelpath', scriptrelpath)
+
+ # Write out config file for devtool
+ import ConfigParser
+ config = ConfigParser.SafeConfigParser()
+ config.add_section('General')
+ config.set('General', 'bitbake_subdir', conf_bbpath)
+ config.set('General', 'init_path', conf_initpath)
+ config.set('General', 'core_meta_subdir', core_meta_subdir)
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
+ with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
+ config.write(f)
+
+ # Create a layer for new recipes / appends
+ bb.process.run("devtool --basepath %s create-workspace --create-only %s" % (baseoutpath, os.path.join(baseoutpath, 'workspace')))
+
+ # Create bblayers.conf
+ bb.utils.mkdirhier(baseoutpath + '/conf')
+ with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
+ f.write('LCONF_VERSION = "%s"\n\n' % d.getVar('LCONF_VERSION'))
+ f.write('BBPATH = "$' + '{TOPDIR}"\n')
+ f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
+ f.write('BBLAYERS := " \\\n')
+ for layerrelpath in sdkbblayers:
+ f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
+ f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
+ f.write(' "\n')
+
+ # Create local.conf
+ with open(baseoutpath + '/conf/local.conf', 'w') as f:
+ f.write('INHERIT += "%s"\n\n' % 'uninative')
+ f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION'))
+
+ # This is a bit of a hack, but we really don't want these dependencies
+ # (we're including them in the SDK as nativesdk- versions instead)
+ f.write('POKYQEMUDEPS_forcevariable = ""\n\n')
+ f.write('EXTRA_IMAGEDEPENDS_remove = "qemu-native qemu-helper-native"\n\n')
+
+ # Another hack, but we want the native part of sstate to be kept the same
+ # regardless of the host distro
+ fixedlsbstring = 'SDK-Fixed'
+ f.write('NATIVELSBSTRING_forcevariable = "%s"\n\n' % fixedlsbstring)
+
+ # Ensure locked sstate cache objects are re-used without error
+ f.write('SIGGEN_LOCKEDSIGS_CHECK_LEVEL = "warn"\n\n')
+
+ for varname in d.getVar('SDK_META_CONF_WHITELIST', True).split():
+ f.write('%s = "%s"\n' % (varname, d.getVar(varname, True)))
+ f.write('require conf/locked-sigs.inc\n')
+ f.write('require conf/work-config.inc\n')
+
+ sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+
+ # Filter the locked signatures file to just the sstate tasks we are interested in
+ allowed_tasks = ['do_populate_lic', 'do_populate_sysroot', 'do_packagedata', 'do_package_write_ipk', 'do_package_write_rpm', 'do_package_write_deb', 'do_package_qa', 'do_deploy']
+ excluded_targets = d.getVar('SDK_TARGETS', True)
+ lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ oe.copy_buildsystem.prune_lockedsigs(allowed_tasks,
+ excluded_targets,
+ sigfile,
+ lockedsigs_pruned)
+
+ sstate_out = baseoutpath + '/sstate-cache'
+ bb.utils.remove(sstate_out, True)
+ oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
+ d.getVar('SSTATE_DIR', True),
+ sstate_out, d,
+ fixedlsbstring)
+
+ # Create a dummy config file for additional settings
+ with open(baseoutpath + '/conf/work-config.inc', 'w') as f:
+ pass
+}
+
+install_tools() {
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
+ ln -sr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/devtool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/devtool
+ ln -sr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/recipetool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/recipetool
+ touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
+
+ install ${SDK_DEPLOY}/${DISTRO}-${TCLIBC}-${SDK_ARCH}-buildtools-tarball-${TUNE_PKGARCH}-buildtools-nativesdk-standalone-${DISTRO_VERSION}.sh ${SDK_OUTPUT}/${SDKPATH}
+
+ install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}
+}
+
+# FIXME this preparation should be done as part of the SDK construction
+sdk_ext_postinst() {
+ printf "\nExtracting buildtools...\n"
+ cd $target_sdk_dir
+ printf "buildtools\ny" | ./*buildtools-tarball* > /dev/null
+
+ # Make sure when the user sets up the environment, they also get
+ # the buildtools-tarball tools in their path.
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $target_sdk_dir/environment-setup*
+
+ # Allow bitbake environment setup to be ran as part of this sdk.
+ echo "export OE_SKIP_SDK_CHECK=1" >> $target_sdk_dir/environment-setup*
+
+ # A bit of another hack, but we need this in the path only for devtool
+ # so put it at the end of $PATH.
+ echo "export PATH=\$PATH:$target_sdk_dir/sysroots/${SDK_SYS}/${bindir_nativesdk}" >> $target_sdk_dir/environment-setup*
+
+ # For now this is where uninative.bbclass expects the tarball
+ mv *-nativesdk-libc.tar.* $target_sdk_dir/`dirname ${oe_init_build_env_path}`
+
+ printf "Preparing build system...\n"
+ # dash which is /bin/sh on Ubuntu will not preserve the
+ # current working directory when first ran, nor will it set $1 when
+ # sourcing a script. That is why this has to look so ugly.
+ sh -c ". buildtools/environment-setup* > /dev/null && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir > /dev/null && bitbake ${SDK_TARGETS} > /dev/null" || { echo "SDK preparation failed" ; exit 1 ; }
+ echo done
+}
+
+SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
+
+SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+
+fakeroot python do_populate_sdk_ext() {
+ bb.build.exec_func("do_populate_sdk", d)
+}
+
+def get_sdk_ext_rdepends(d):
+ localdata = d.createCopy()
+ localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
+ bb.data.update_data(localdata)
+ return localdata.getVarFlag('do_populate_sdk', 'rdepends', True)
+
+do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
+do_populate_sdk_ext[depends] += "${@d.getVarFlag('do_populate_sdk', 'depends', False)}"
+do_populate_sdk_ext[rdepends] = "${@get_sdk_ext_rdepends(d)}"
+do_populate_sdk_ext[recrdeptask] += "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
+
+
+do_populate_sdk_ext[depends] += "buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk"
+
+do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
+do_populate_sdk_ext[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy"
+
+# Make sure codes change in copy_buildsystem can result in rebuilt
+do_populate_sdk_ext[vardeps] += "copy_buildsystem"
+
+addtask populate_sdk_ext
diff --git a/meta/classes/populate_sdk_ipk.bbclass b/meta/classes/populate_sdk_ipk.bbclass
deleted file mode 100644
index 8b2cb6dc48..0000000000
--- a/meta/classes/populate_sdk_ipk.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
-do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
-
-do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
diff --git a/meta/classes/populate_sdk_rpm.bbclass b/meta/classes/populate_sdk_rpm.bbclass
deleted file mode 100644
index 4954d00490..0000000000
--- a/meta/classes/populate_sdk_rpm.bbclass
+++ /dev/null
@@ -1,16 +0,0 @@
-# Smart is python based, so be sure python-native is available to us.
-EXTRANATIVEPATH += "python-native"
-
-do_populate_sdk[depends] += "rpm-native:do_populate_sysroot"
-do_populate_sdk[depends] += "rpmresolve-native:do_populate_sysroot"
-do_populate_sdk[depends] += "python-smartpm-native:do_populate_sysroot"
-
-# Needed for update-alternatives
-do_populate_sdk[depends] += "opkg-native:do_populate_sysroot"
-
-# Creating the repo info in do_rootfs
-do_populate_sdk[depends] += "createrepo-native:do_populate_sysroot"
-
-rpmlibdir = "/var/lib/rpm"
-
-do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
diff --git a/meta/classes/prserv.bbclass b/meta/classes/prserv.bbclass
index b440d863ef..139597f9cb 100644
--- a/meta/classes/prserv.bbclass
+++ b/meta/classes/prserv.bbclass
@@ -1,33 +1,2 @@
-def prserv_get_pr_auto(d):
- import oe.prservice
- import re
- pv = d.getVar("PV", True)
- if not d.getVar('PRSERV_HOST', True):
- if 'AUTOINC' in pv:
- d.setVar("PKGV", pv.replace("AUTOINC", "0"))
- bb.warn("Not using network based PR service")
- return None
- version = d.getVar("PRAUTOINX", True)
- pkgarch = d.getVar("PACKAGE_ARCH", True)
- checksum = d.getVar("BB_TASKHASH", True)
-
- conn = d.getVar("__PRSERV_CONN", True)
- if conn is None:
- conn = oe.prservice.prserv_make_conn(d)
- if conn is None:
- return None
-
- if "AUTOINC" in pv:
- srcpv = bb.fetch2.get_srcrev(d)
- base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
- value = conn.getPR(base_ver, pkgarch, srcpv)
- d.setVar("PKGV", pv.replace("AUTOINC", str(value)))
-
- if d.getVar('PRSERV_LOCKDOWN', True):
- auto_rev = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
- else:
- auto_rev = conn.getPR(version, pkgarch, checksum)
-
- return auto_rev
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
new file mode 100644
index 0000000000..b2949af9bb
--- /dev/null
+++ b/meta/classes/ptest-gnome.bbclass
@@ -0,0 +1,8 @@
+inherit ptest
+
+EXTRA_OECONF_append_class-target = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
+
+FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
+ ${datadir}/installed-tests/"
+
+RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
index fdd22bbc86..97029dc525 100644
--- a/meta/classes/pythonnative.bbclass
+++ b/meta/classes/pythonnative.bbclass
@@ -2,5 +2,7 @@
inherit python-dir
PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
+# PYTHON_EXECUTABLE is used by cmake
+PYTHON_EXECUTABLE="${PYTHON}"
EXTRANATIVEPATH += "${PYTHON_PN}-native"
DEPENDS += " ${PYTHON_PN}-native "
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
index abee8aaa7c..601f587534 100644
--- a/meta/classes/qemu.bbclass
+++ b/meta/classes/qemu.bbclass
@@ -29,19 +29,23 @@ def qemu_run_binary(data, rootfs_path, binary):
libdir = rootfs_path + data.getVar("libdir", False)
base_libdir = rootfs_path + data.getVar("base_libdir", False)
+ qemu_options = data.getVar("QEMU_OPTIONS", True)
- return "PSEUDO_UNLOAD=1 " + qemu_binary + " -L " + rootfs_path\
+ return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+ " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\
+ rootfs_path + binary
-QEMU_OPTIONS = ""
-QEMU_OPTIONS_iwmmxt = "-cpu pxa270-c5"
-QEMU_OPTIONS_armv6 = "-cpu arm1136"
-QEMU_OPTIONS_armv7a = "-cpu cortex-a8"
-QEMU_OPTIONS_e500v2 = "-cpu e500v2"
-QEMU_OPTIONS_e500mc = "-cpu e500mc"
-QEMU_OPTIONS_e5500 = "-cpu e5500"
-QEMU_OPTIONS_e5500-64b = "-cpu e5500"
-QEMU_OPTIONS_e6500 = "-cpu e6500"
-QEMU_OPTIONS_e6500-64b = "-cpu e6500"
-QEMU_OPTIONS_ppc7400 = "-cpu 7400"
+# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
+# PACKAGE_ARCH, not overrides and hence have to do this dance. Simply being arch
+# specific isn't good enough.
+QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) or ""}"
+QEMU_EXTRAOPTIONS_iwmmxt = " -cpu pxa270-c5"
+QEMU_EXTRAOPTIONS_armv6 = " -cpu arm1136"
+QEMU_EXTRAOPTIONS_armv7a = " -cpu cortex-a8"
+QEMU_EXTRAOPTIONS_e500v2 = " -cpu e500v2"
+QEMU_EXTRAOPTIONS_e500mc = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_e5500 = " -cpu e5500"
+QEMU_EXTRAOPTIONS_e5500-64b = " -cpu e5500"
+QEMU_EXTRAOPTIONS_e6500 = " -cpu e6500"
+QEMU_EXTRAOPTIONS_e6500-64b = " -cpu e6500"
+QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass
index 86bbede260..dc98713fd2 100644
--- a/meta/classes/qmake_base.bbclass
+++ b/meta/classes/qmake_base.bbclass
@@ -38,9 +38,9 @@ do_generate_qt_config_file() {
[Paths]
Prefix =
Binaries = ${STAGING_BINDIR_NATIVE}
-Headers = ${STAGING_INCDIR}/qt4
-Plugins = ${STAGING_LIBDIR}/qt4/plugins/
-Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
+Headers = ${STAGING_INCDIR}/${QT_DIR_NAME}
+Plugins = ${STAGING_LIBDIR}/${QT_DIR_NAME}/plugins/
+Mkspecs = ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/
EOF
}
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
index 850bb6a717..13b1050aac 100644
--- a/meta/classes/qt4e.bbclass
+++ b/meta/classes/qt4e.bbclass
@@ -19,6 +19,3 @@ EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
# Qt4 uses atomic instructions not supported in thumb mode
ARM_INSTRUCTION_SET = "arm"
-
-# Qt4 could NOT be built on MIPS64 with 64 bits userspace
-COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
index 65d196afc6..6f06d34755 100644
--- a/meta/classes/qt4x11.bbclass
+++ b/meta/classes/qt4x11.bbclass
@@ -1,7 +1,10 @@
QT4DEPENDS ?= "qt4-x11 "
DEPENDS_prepend = "${QT4DEPENDS}"
-inherit qmake2
+# depends on qt4-x11
+REQUIRED_DISTRO_FEATURES += "x11"
+
+inherit qmake2 distro_features_check
QT_BASE_NAME = "qt4"
QT_DIR_NAME = "qt4"
@@ -9,6 +12,3 @@ QT_LIBINFIX = ""
# Qt4 uses atomic instructions not supported in thumb mode
ARM_INSTRUCTION_SET = "arm"
-
-# Qt4 could NOT be built on MIPS64 with 64 bits userspace
-COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 15adc97b52..9edf2ceb31 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -44,11 +44,18 @@ python errorreport_handler () {
task = e.task
taskdata={}
log = e.data.getVar('BB_LOGFILE', True)
- logFile = open(log, 'r')
taskdata['package'] = e.data.expand("${PF}")
taskdata['task'] = task
- taskdata['log'] = logFile.read()
- logFile.close()
+ if log:
+ try:
+ logFile = open(log, 'r')
+ taskdata['log'] = logFile.read().decode('utf-8')
+ logFile.close()
+ except:
+ taskdata['log'] = "Unable to read log file"
+
+ else:
+ taskdata['log'] = "No Log"
jsondata = json.loads(errorreport_getdata(e))
jsondata['failures'].append(taskdata)
errorreport_savedata(e, jsondata, "error-report.txt")
@@ -59,8 +66,8 @@ python errorreport_handler () {
if(len(failures) > 0):
filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
datafile = errorreport_savedata(e, jsondata, filename)
- bb.note("The errors of this build are stored in: %s. You can send the errors to an upstream server by running: send-error-report %s [server]" % (datafile, datafile))
- bb.note("The contents of these logs will be posted in public if you use the above script. Please ensure you remove any identifying or propriety information before sending.")
+ bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
+ bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
}
addhandler errorreport_handler
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index f0f6d18249..e68d02a783 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -97,3 +97,24 @@ do_rm_work_all () {
}
do_rm_work_all[recrdeptask] = "do_rm_work"
addtask rm_work_all after do_rm_work
+
+do_populate_sdk[postfuncs] += "rm_work_populatesdk"
+rm_work_populatesdk () {
+ :
+}
+rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk"
+
+do_rootfs[postfuncs] += "rm_work_rootfs"
+rm_work_rootfs () {
+ :
+}
+rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
+
+python () {
+ # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
+ excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split()
+ pn = d.getVar("PN", True)
+ if pn in excludes:
+ d.delVarFlag('rm_work_rootfs', 'cleandirs')
+ d.delVarFlag('rm_work_populatesdk', 'cleandirs')
+}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index a42a472822..d51b4582d2 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -6,11 +6,13 @@ ROOTFS_PKGMANAGE = "dpkg apt"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
+do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_deb"
rootfs_deb_do_rootfs[vardepsexclude] += "BUILDNAME"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
+do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
python rootfs_deb_bad_recommendations() {
if d.getVar("BAD_RECOMMENDATIONS", True):
@@ -22,3 +24,16 @@ DEB_POSTPROCESS_COMMANDS = ""
opkglibdir = "${localstatedir}/lib/opkg"
+python () {
+ # Map TARGET_ARCH to Debian's ideas about architectures
+ darch = d.getVar('SDK_ARCH', True)
+ if darch in ["x86", "i486", "i586", "i686", "pentium"]:
+ d.setVar('DEB_SDK_ARCH', 'i386')
+ elif darch == "x86_64":
+ d.setVar('DEB_SDK_ARCH', 'amd64')
+ elif darch == "arm":
+ d.setVar('DEB_SDK_ARCH', 'armel')
+}
+
+# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
+DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index f5fef00166..dd144e49ef 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -6,15 +6,17 @@
#
EXTRAOPKGCONFIG ?= ""
-ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}"
+ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
+do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_ipk"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
rootfs_ipk_do_rootfs[vardepsexclude] += "BUILDNAME"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
OPKG_PREPROCESS_COMMANDS = ""
@@ -35,4 +37,3 @@ python () {
d.setVar('OPKG_PREPROCESS_COMMANDS', "")
d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
}
-
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 9e97d341d9..d85d001a62 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -11,15 +11,15 @@ IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smar
# Smart is python based, so be sure python-native is available to us.
EXTRANATIVEPATH += "python-native"
-do_rootfs[depends] += "rpm-native:do_populate_sysroot"
-do_rootfs[depends] += "rpmresolve-native:do_populate_sysroot"
-do_rootfs[depends] += "python-smartpm-native:do_populate_sysroot"
+# opkg is needed for update-alternatives
+RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
+ rpmresolve-native:do_populate_sysroot \
+ python-smartpm-native:do_populate_sysroot \
+ createrepo-native:do_populate_sysroot \
+ opkg-native:do_populate_sysroot"
-# Needed for update-alternatives
-do_rootfs[depends] += "opkg-native:do_populate_sysroot"
-
-# Creating the repo info in do_rootfs
-do_rootfs[depends] += "createrepo-native:do_populate_sysroot"
+do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
+do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
do_rootfs[recrdeptask] += "do_package_write_rpm"
rootfs_rpm_do_rootfs[vardepsexclude] += "BUILDNAME"
@@ -28,6 +28,7 @@ do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
+do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
python () {
if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
@@ -40,3 +41,7 @@ python () {
d.setVar('RPM_POSTPROCESS_COMMANDS', '')
}
+# Smart is python based, so be sure python-native is available to us.
+EXTRANATIVEPATH += "python-native"
+
+rpmlibdir = "/var/lib/rpm"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 4b42b17145..31b99d4bc8 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -70,6 +70,12 @@ python oecore_update_bblayers() {
sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
return
+ elif current_lconf == 5 and lconf_version > 5:
+ # Null update, to avoid issues with people switching between poky and other distros
+ current_lconf = 6
+ sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+ return
+
sys.exit()
}
@@ -87,17 +93,54 @@ def raise_sanity_error(msg, d, network_error=False):
%s""" % msg)
+# Check flags associated with a tuning.
+def check_toolchain_tune_args(data, tune, multilib, errs):
+ found_errors = False
+ if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
+ found_errors = True
+ if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
+ found_errors = True
+ if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
+ found_errors = True
+
+ return found_errors
+
+def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
+ args_set = (data.getVar("TUNE_%s" % which, True) or "").split()
+ args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune), True) or "").split()
+ args_missing = []
+
+ # If no args are listed/required, we are done.
+ if not args_wanted:
+ return
+ for arg in args_wanted:
+ if arg not in args_set:
+ args_missing.append(arg)
+
+ found_errors = False
+ if args_missing:
+ found_errors = True
+ tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
+ (tune, ' '.join(args_missing), which, ' '.join(args_set)))
+ return found_errors
+
# Check a single tune for validity.
def check_toolchain_tune(data, tune, multilib):
tune_errors = []
if not tune:
return "No tuning found for %s multilib." % multilib
+ localdata = bb.data.createCopy(data)
+ if multilib != "default":
+ # Apply the overrides so we can look at the details.
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
+ localdata.setVar("OVERRIDES", overrides)
+ bb.data.update_data(localdata)
bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (data.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
+ features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
if not features:
return "Tuning '%s' has no defined features, and cannot be used." % tune
- valid_tunes = data.getVarFlags('TUNEVALID') or {}
- conflicts = data.getVarFlags('TUNECONFLICTS') or {}
+ valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
+ conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
# [doc] is the documentation for the variable, not a real feature
if 'doc' in valid_tunes:
del valid_tunes['doc']
@@ -113,15 +156,17 @@ def check_toolchain_tune(data, tune, multilib):
bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
else:
tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = data.getVar("TUNEABI_WHITELIST", True) or ''
- override = data.getVar("TUNEABI_OVERRIDE", True) or ''
+ whitelist = localdata.getVar("TUNEABI_WHITELIST", True)
if whitelist:
- tuneabi = data.getVar("TUNEABI_tune-%s" % tune, True) or ''
+ tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune, True)
if not tuneabi:
tuneabi = tune
if True not in [x in whitelist.split() for x in tuneabi.split()]:
tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
(tune, tuneabi))
+ else:
+ if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
+ bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
if tune_errors:
return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
@@ -225,7 +270,7 @@ def check_connectivity(d):
# CONNECTIVITY_CHECK_URIS are set
network_enabled = not d.getVar('BB_NO_NETWORK', True)
check_enabled = len(test_uris)
- # Take a copy of the data store and unset MIRRORS and PREMIRROS
+ # Take a copy of the data store and unset MIRRORS and PREMIRRORS
data = bb.data.createCopy(d)
data.delVar('PREMIRRORS')
data.delVar('MIRRORS')
@@ -386,15 +431,15 @@ def check_tar_version(sanity_data):
return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
return None
-# We use git parameters and functionality only found in 1.7.5 or later
+# We use git parameters and functionality only found in 1.7.8 or later
def check_git_version(sanity_data):
from distutils.version import LooseVersion
status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
if status != 0:
return "Unable to execute git --version, exit code %s\n" % status
version = result.split()[2]
- if LooseVersion(version) < LooseVersion("1.7.5"):
- return "Your version of git is older than 1.7.5 and has bugs which will break builds. Please install a newer version of git.\n"
+ if LooseVersion(version) < LooseVersion("1.7.8"):
+ return "Your version of git is older than 1.7.8 and has bugs which will break builds. Please install a newer version of git.\n"
return None
# Check the required perl modules which may not be installed by default
@@ -431,7 +476,6 @@ def sanity_check_conffiles(status, d):
if success:
bb.note("Your conf/bblayers.conf has been automatically updated.")
status.reparse = True
- break
if not status.reparse:
status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version))
@@ -480,6 +524,16 @@ def sanity_handle_abichanges(status, d):
status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
elif (abi != current_abi and current_abi == "9"):
status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
+ elif (abi != current_abi and current_abi == "10" and (abi == "8" or abi == "9")):
+ bb.note("Converting staging layout from version 8/9 to layout version 10")
+ cmd = d.expand("grep -r -l sysroot-providers/virtual_kernel ${SSTATE_MANIFESTS}")
+ ret, result = oe.utils.getstatusoutput(cmd)
+ result = result.split()
+ for f in result:
+ bb.note("Uninstalling manifest file %s" % f)
+ sstate_clean_manifest(f, d)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -514,6 +568,7 @@ def check_sanity_version_change(status, d):
import xml.parsers.expat
except ImportError:
status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
+ import stat
status.addresult(check_make_version(d))
status.addresult(check_tar_version(d))
@@ -566,6 +621,11 @@ def check_sanity_version_change(status, d):
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
tmpdir = d.getVar('TMPDIR', True)
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
+ tmpdirmode = os.stat(tmpdir).st_mode
+ if (tmpdirmode & stat.S_ISGID):
+ status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
+ if (tmpdirmode & stat.S_ISUID):
+ status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
# Some third-party software apparently relies on chmod etc. being suid root (!!)
import stat
@@ -708,6 +768,44 @@ def check_sanity_everybuild(status, d):
if oeroot.find(' ') != -1:
status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
+ # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
+ import re
+ mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
+ protocols = ['http', 'ftp', 'file', 'https', \
+ 'git', 'gitsm', 'hg', 'osc', 'p4', 'svk', 'svn', \
+ 'bzr', 'cvs']
+ for mirror_var in mirror_vars:
+ mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n')
+ for mirror_entry in mirrors:
+ mirror_entry = mirror_entry.strip()
+ if not mirror_entry:
+ # ignore blank lines
+ continue
+
+ try:
+ pattern, mirror = mirror_entry.split()
+ except ValueError:
+ bb.warn('Invalid %s: %s, should be 2 members.' % (mirror_var, mirror_entry.strip()))
+ continue
+
+ decoded = bb.fetch2.decodeurl(pattern)
+ try:
+ pattern_scheme = re.compile(decoded[0])
+ except re.error as exc:
+ bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
+ continue
+
+ if not any(pattern_scheme.match(protocol) for protocol in protocols):
+ bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
+ continue
+
+ if not any(mirror.startswith(protocol + '://') for protocol in protocols):
+ bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
+ continue
+
+ if mirror.startswith('file://') and not mirror.startswith('file:///'):
+ bb.warn('Invalid file url in %s: %s, must be absolute path (file:///)' % (mirror_var, mirror_entry))
+
# Check that TMPDIR hasn't changed location since the last time we were run
tmpdir = d.getVar('TMPDIR', True)
checkfile = os.path.join(tmpdir, "saved_tmpdir")
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
index 9a4d03b887..45dce489de 100644
--- a/meta/classes/siteconfig.bbclass
+++ b/meta/classes/siteconfig.bbclass
@@ -18,13 +18,13 @@ siteconfig_do_siteconfig_gencache () {
>${WORKDIR}/site_config_${MACHINE}/configure.ac
cd ${WORKDIR}/site_config_${MACHINE}
autoconf
- rm -f ${PN}_cache
- CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${PN}_cache
+ rm -f ${BPN}_cache
+ CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${BPN}_cache
sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
-e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
- < ${PN}_cache > ${PN}_config
+ < ${BPN}_cache > ${BPN}_config
mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
- cp ${PN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+ cp ${BPN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
}
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index e90632aeef..2c1f9d07fc 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -18,10 +18,10 @@
def siteinfo_data(d):
archinfo = {
"allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
- "aarch64": "endian-little bit-64 arm-common",
- "aarch64_be": "endian-big bit-64 arm-common",
- "arm": "endian-little bit-32 arm-common",
- "armeb": "endian-big bit-32 arm-common",
+ "aarch64": "endian-little bit-64 arm-common arm-64",
+ "aarch64_be": "endian-big bit-64 arm-common arm-64",
+ "arm": "endian-little bit-32 arm-common arm-32",
+ "armeb": "endian-big bit-32 arm-common arm-32",
"avr32": "endian-big bit-32 avr32-common",
"bfin": "endian-little bit-32 bfin-common",
"i386": "endian-little bit-32 ix86-common",
@@ -150,9 +150,13 @@ def siteinfo_get_files(d, no_cache = False):
if no_cache: return sitefiles
# Now check for siteconfig cache files
- path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True)
- if os.path.isdir(path_siteconfig):
+ # Use the files copied to the aclocal cache generated by autotools.bbclass
+ # to avoid races
+ path_siteconfig = d.getVar('ACLOCALDIR', True)
+ if path_siteconfig and os.path.isdir(path_siteconfig):
for i in os.listdir(path_siteconfig):
+ if not i.endswith("_config"):
+ continue
filename = os.path.join(path_siteconfig, i)
sitefiles += filename + " "
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
index 55ce3aff4f..454c53e96f 100644
--- a/meta/classes/spdx.bbclass
+++ b/meta/classes/spdx.bbclass
@@ -15,252 +15,291 @@
# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
# in ./meta/conf/licenses.conf.
-SPDXOUTPUTDIR = "${WORKDIR}/spdx_output_dir"
SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
+# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
+# the real top-level directory.
+SPDX_S ?= "${S}"
+
python do_spdx () {
import os, sys
- import json
+ import json, shutil
info = {}
- info['workdir'] = (d.getVar('WORKDIR', True) or "")
- info['sourcedir'] = (d.getVar('S', True) or "")
- info['pn'] = (d.getVar( 'PN', True ) or "")
- info['pv'] = (d.getVar( 'PV', True ) or "")
- info['src_uri'] = (d.getVar( 'SRC_URI', True ) or "")
- info['spdx_version'] = (d.getVar('SPDX_VERSION', True) or '')
- info['data_license'] = (d.getVar('DATA_LICENSE', True) or '')
-
- spdx_sstate_dir = (d.getVar('SPDXSSTATEDIR', True) or "")
- manifest_dir = (d.getVar('SPDX_MANIFEST_DIR', True) or "")
+ info['workdir'] = d.getVar('WORKDIR', True)
+ info['sourcedir'] = d.getVar('SPDX_S', True)
+ info['pn'] = d.getVar('PN', True)
+ info['pv'] = d.getVar('PV', True)
+ info['spdx_version'] = d.getVar('SPDX_VERSION', True)
+ info['data_license'] = d.getVar('DATA_LICENSE', True)
+
+ sstatedir = d.getVar('SPDXSSTATEDIR', True)
+ sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
+
+ manifest_dir = d.getVar('SPDX_MANIFEST_DIR', True)
info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
- sstatefile = os.path.join(spdx_sstate_dir,
- info['pn'] + info['pv'] + ".spdx" )
- info['spdx_temp_dir'] = (d.getVar('SPDX_TEMP_DIR', True) or "")
- info['tar_file'] = os.path.join( info['workdir'], info['pn'] + ".tar.gz" )
+ info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR', True)
+ info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
+
+ # Make sure important dirs exist
+ try:
+ bb.utils.mkdirhier(manifest_dir)
+ bb.utils.mkdirhier(sstatedir)
+ bb.utils.mkdirhier(info['spdx_temp_dir'])
+ except OSError as e:
+ bb.error("SPDX: Could not set up required directories: " + str(e))
+ return
## get everything from cache. use it to decide if
## something needs to be rerun
- cur_ver_code = get_ver_code( info['sourcedir'] )
+ cur_ver_code = get_ver_code(info['sourcedir'])
cache_cur = False
- if not os.path.exists( spdx_sstate_dir ):
- bb.utils.mkdirhier( spdx_sstate_dir )
- if not os.path.exists( info['spdx_temp_dir'] ):
- bb.utils.mkdirhier( info['spdx_temp_dir'] )
- if os.path.exists( sstatefile ):
+ if os.path.exists(sstatefile):
## cache for this package exists. read it in
- cached_spdx = get_cached_spdx( sstatefile )
+ cached_spdx = get_cached_spdx(sstatefile)
if cached_spdx['PackageVerificationCode'] == cur_ver_code:
- bb.warn(info['pn'] + "'s ver code same as cache's. do nothing")
+ bb.warn("SPDX: Verification code for " + info['pn']
+ + "is same as cache's. do nothing")
cache_cur = True
else:
- local_file_info = setup_foss_scan( info,
- True, cached_spdx['Files'] )
+ local_file_info = setup_foss_scan(info, True, cached_spdx['Files'])
else:
- local_file_info = setup_foss_scan( info, False, None )
+ local_file_info = setup_foss_scan(info, False, None)
if cache_cur:
spdx_file_info = cached_spdx['Files']
+ foss_package_info = cached_spdx['Package']
+ foss_license_info = cached_spdx['Licenses']
else:
## setup fossology command
- foss_server = (d.getVar('FOSS_SERVER', True) or "")
- foss_flags = (d.getVar('FOSS_WGET_FLAGS', True) or "")
+ foss_server = d.getVar('FOSS_SERVER', True)
+ foss_flags = d.getVar('FOSS_WGET_FLAGS', True)
+ foss_full_spdx = d.getVar('FOSS_FULL_SPDX', True) == "true" or False
foss_command = "wget %s --post-file=%s %s"\
- % (foss_flags,info['tar_file'],foss_server)
+ % (foss_flags, info['tar_file'], foss_server)
- #bb.warn(info['pn'] + json.dumps(local_file_info))
- foss_file_info = run_fossology( foss_command )
- spdx_file_info = create_spdx_doc( local_file_info, foss_file_info )
- ## write to cache
- write_cached_spdx(sstatefile,cur_ver_code,spdx_file_info)
+ foss_result = run_fossology(foss_command, foss_full_spdx)
+ if foss_result is not None:
+ (foss_package_info, foss_file_info, foss_license_info) = foss_result
+ spdx_file_info = create_spdx_doc(local_file_info, foss_file_info)
+ ## write to cache
+ write_cached_spdx(sstatefile, cur_ver_code, foss_package_info,
+ spdx_file_info, foss_license_info)
+ else:
+ bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command)
+ return
## Get document and package level information
- spdx_header_info = get_header_info(info, cur_ver_code, spdx_file_info)
+ spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info)
## CREATE MANIFEST
- create_manifest(info,spdx_header_info,spdx_file_info)
+ create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info)
## clean up the temp stuff
- remove_dir_tree( info['spdx_temp_dir'] )
+ shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True)
if os.path.exists(info['tar_file']):
- remove_file( info['tar_file'] )
+ remove_file(info['tar_file'])
}
addtask spdx after do_patch before do_configure
-def create_manifest(info,header,files):
- with open(info['outfile'], 'w') as f:
+def create_manifest(info, header, files, licenses):
+ import codecs
+ with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f:
+ # Write header
f.write(header + '\n')
+
+ # Write file data
for chksum, block in files.iteritems():
+ f.write("FileName: " + block['FileName'] + '\n')
+ for key, value in block.iteritems():
+ if not key == 'FileName':
+ f.write(key + ": " + value + '\n')
+ f.write('\n')
+
+ # Write license data
+ for id, block in licenses.iteritems():
+ f.write("LicenseID: " + id + '\n')
for key, value in block.iteritems():
- f.write(key + ": " + value)
- f.write('\n')
+ f.write(key + ": " + value + '\n')
f.write('\n')
-def get_cached_spdx( sstatefile ):
+def get_cached_spdx(sstatefile):
import json
+ import codecs
cached_spdx_info = {}
- with open( sstatefile, 'r' ) as f:
+ with codecs.open(sstatefile, mode='r', encoding='utf-8') as f:
try:
cached_spdx_info = json.load(f)
except ValueError as e:
cached_spdx_info = None
return cached_spdx_info
-def write_cached_spdx( sstatefile, ver_code, files ):
+def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info):
import json
+ import codecs
spdx_doc = {}
spdx_doc['PackageVerificationCode'] = ver_code
spdx_doc['Files'] = {}
spdx_doc['Files'] = files
- with open( sstatefile, 'w' ) as f:
+ spdx_doc['Package'] = {}
+ spdx_doc['Package'] = package_info
+ spdx_doc['Licenses'] = {}
+ spdx_doc['Licenses'] = license_info
+ with codecs.open(sstatefile, mode='w', encoding='utf-8') as f:
f.write(json.dumps(spdx_doc))
-def setup_foss_scan( info, cache, cached_files ):
+def setup_foss_scan(info, cache, cached_files):
import errno, shutil
import tarfile
file_info = {}
cache_dict = {}
- for f_dir, f in list_files( info['sourcedir'] ):
- full_path = os.path.join( f_dir, f )
+ for f_dir, f in list_files(info['sourcedir']):
+ full_path = os.path.join(f_dir, f)
abs_path = os.path.join(info['sourcedir'], full_path)
- dest_dir = os.path.join( info['spdx_temp_dir'], f_dir )
- dest_path = os.path.join( info['spdx_temp_dir'], full_path )
- try:
- stats = os.stat(abs_path)
- except OSError as e:
- bb.warn( "Stat failed" + str(e) + "\n")
- continue
-
- checksum = hash_file( abs_path )
- mtime = time.asctime(time.localtime(stats.st_mtime))
-
- ## retain cache information if it exists
- file_info[checksum] = {}
- if cache and checksum in cached_files:
- file_info[checksum] = cached_files[checksum]
- else:
- file_info[checksum]['FileName'] = full_path
-
- try:
- os.makedirs( dest_dir )
- except OSError as e:
- if e.errno == errno.EEXIST and os.path.isdir(dest_dir):
- pass
+ dest_dir = os.path.join(info['spdx_temp_dir'], f_dir)
+ dest_path = os.path.join(info['spdx_temp_dir'], full_path)
+
+ checksum = hash_file(abs_path)
+ if not checksum is None:
+ file_info[checksum] = {}
+ ## retain cache information if it exists
+ if cache and checksum in cached_files:
+ file_info[checksum] = cached_files[checksum]
+ ## have the file included in what's sent to the FOSSology server
else:
- bb.warn( "mkdir failed " + str(e) + "\n" )
- continue
-
- if(cache and checksum not in cached_files) or not cache:
- try:
- shutil.copyfile( abs_path, dest_path )
- except shutil.Error as e:
- bb.warn( str(e) + "\n" )
- except IOError as e:
- bb.warn( str(e) + "\n" )
+ file_info[checksum]['FileName'] = full_path
+ try:
+ bb.utils.mkdirhier(dest_dir)
+ shutil.copyfile(abs_path, dest_path)
+ except OSError as e:
+ bb.warn("SPDX: mkdirhier failed: " + str(e))
+ except shutil.Error as e:
+ bb.warn("SPDX: copyfile failed: " + str(e))
+ except IOError as e:
+ bb.warn("SPDX: copyfile failed: " + str(e))
+ else:
+ bb.warn("SPDX: Could not get checksum for file: " + f)
- with tarfile.open( info['tar_file'], "w:gz" ) as tar:
- tar.add( info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']) )
- tar.close()
+ with tarfile.open(info['tar_file'], "w:gz") as tar:
+ tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']))
return file_info
-
-def remove_dir_tree( dir_name ):
- import shutil
+def remove_file(file_name):
try:
- shutil.rmtree( dir_name )
- except:
- pass
-
-def remove_file( file_name ):
- try:
- os.remove( file_name )
+ os.remove(file_name)
except OSError as e:
pass
-def list_files( dir ):
- for root, subFolders, files in os.walk( dir ):
+def list_files(dir):
+ for root, subFolders, files in os.walk(dir):
for f in files:
- rel_root = os.path.relpath( root, dir )
+ rel_root = os.path.relpath(root, dir)
yield rel_root, f
return
-def hash_file( file_name ):
+def hash_file(file_name):
try:
- f = open( file_name, 'rb' )
- data_string = f.read()
+ with open(file_name, 'rb') as f:
+ data_string = f.read()
+ sha1 = hash_string(data_string)
+ return sha1
except:
- return None
- finally:
- f.close()
- sha1 = hash_string( data_string )
- return sha1
+ return None
-def hash_string( data ):
+def hash_string(data):
import hashlib
sha1 = hashlib.sha1()
- sha1.update( data )
+ sha1.update(data)
return sha1.hexdigest()
-def run_fossology( foss_command ):
+def run_fossology(foss_command, full_spdx):
import string, re
import subprocess
p = subprocess.Popen(foss_command.split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
foss_output, foss_error = p.communicate()
-
- records = []
- records = re.findall('FileName:.*?</text>', foss_output, re.S)
+ if p.returncode != 0:
+ return None
+
+ foss_output = unicode(foss_output, "utf-8")
+ foss_output = string.replace(foss_output, '\r', '')
+
+ # Package info
+ package_info = {}
+ if full_spdx:
+ # All mandatory, only one occurance
+ package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
+ package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
+ package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
+ # These may be more than one
+ package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output)
+ else:
+ DEFAULT = "NOASSERTION"
+ package_info['PackageCopyrightText'] = "<text>" + DEFAULT + "</text>"
+ package_info['PackageLicenseDeclared'] = DEFAULT
+ package_info['PackageLicenseConcluded'] = DEFAULT
+ package_info['PackageLicenseInfoFromFiles'] = []
+ # File info
file_info = {}
+ records = []
+ # FileName is also in PackageFileName, so we match on FileType as well.
+ records = re.findall('FileName:.*?FileType:.*?</text>', foss_output, re.S)
for rec in records:
- rec = string.replace( rec, '\r', '' )
- chksum = re.findall( 'FileChecksum: SHA1: (.*)\n', rec)[0]
+ chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0]
file_info[chksum] = {}
- file_info[chksum]['FileCopyrightText'] = re.findall( 'FileCopyrightText: '
+ file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: '
+ '(.*?</text>)', rec, re.S )[0]
- fields = ['FileType','LicenseConcluded',
- 'LicenseInfoInFile','FileName']
+ fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile']
for field in fields:
file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
- return file_info
+ # Licenses
+ license_info = {}
+ licenses = []
+ licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S)
+ for lic in licenses:
+ license_id = re.findall('LicenseID: (.*)\n', lic)[0]
+ license_info[license_id] = {}
+ license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?</text>)', lic, re.S)[0]
+ license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0]
+
+ return (package_info, file_info, license_info)
-def create_spdx_doc( file_info, scanned_files ):
+def create_spdx_doc(file_info, scanned_files):
import json
## push foss changes back into cache
for chksum, lic_info in scanned_files.iteritems():
if chksum in file_info:
- file_info[chksum]['FileName'] = file_info[chksum]['FileName']
file_info[chksum]['FileType'] = lic_info['FileType']
file_info[chksum]['FileChecksum: SHA1'] = chksum
file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
else:
- bb.warn(lic_info['FileName'] + " : " + chksum
+ bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum
+ " : is not in the local file info: "
- + json.dumps(lic_info,indent=1))
+ + json.dumps(lic_info, indent=1))
return file_info
-def get_ver_code( dirname ):
+def get_ver_code(dirname):
chksums = []
- for f_dir, f in list_files( dirname ):
- try:
- stats = os.stat(os.path.join(dirname,f_dir,f))
- except OSError as e:
- bb.warn( "Stat failed" + str(e) + "\n")
- continue
- chksums.append(hash_file(os.path.join(dirname,f_dir,f)))
- ver_code_string = ''.join( chksums ).lower()
- ver_code = hash_string( ver_code_string )
+ for f_dir, f in list_files(dirname):
+ hash = hash_file(os.path.join(dirname, f_dir, f))
+ if not hash is None:
+ chksums.append(hash)
+ else:
+ bb.warn("SPDX: Could not hash file: " + path)
+ ver_code_string = ''.join(chksums).lower()
+ ver_code = hash_string(ver_code_string)
return ver_code
-def get_header_info( info, spdx_verification_code, spdx_files ):
+def get_header_info(info, spdx_verification_code, package_info):
"""
Put together the header SPDX information.
Eventually this needs to become a lot less
@@ -271,14 +310,12 @@ def get_header_info( info, spdx_verification_code, spdx_files ):
head = []
DEFAULT = "NOASSERTION"
- #spdx_verification_code = get_ver_code( info['sourcedir'] )
- package_checksum = ''
- if os.path.exists(info['tar_file']):
- package_checksum = hash_file( info['tar_file'] )
- else:
+ package_checksum = hash_file(info['tar_file'])
+ if package_checksum is None:
package_checksum = DEFAULT
## document level information
+ head.append("## SPDX Document Information")
head.append("SPDXVersion: " + info['spdx_version'])
head.append("DataLicense: " + info['data_license'])
head.append("DocumentComment: <text>SPDX for "
@@ -286,9 +323,11 @@ def get_header_info( info, spdx_verification_code, spdx_files ):
head.append("")
## Creator information
- now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+ ## Note that this does not give time in UTC.
+ now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
head.append("## Creation Information")
- head.append("Creator: fossology-spdx")
+ ## Tools are supposed to have a version, but FOSSology+SPDX provides none.
+ head.append("Creator: Tool: FOSSology+SPDX")
head.append("Created: " + now)
head.append("CreatorComment: <text>UNO</text>")
head.append("")
@@ -297,21 +336,26 @@ def get_header_info( info, spdx_verification_code, spdx_files ):
head.append("## Package Information")
head.append("PackageName: " + info['pn'])
head.append("PackageVersion: " + info['pv'])
- head.append("PackageDownloadLocation: " + DEFAULT)
- head.append("PackageSummary: <text></text>")
head.append("PackageFileName: " + os.path.basename(info['tar_file']))
head.append("PackageSupplier: Person:" + DEFAULT)
+ head.append("PackageDownloadLocation: " + DEFAULT)
+ head.append("PackageSummary: <text></text>")
head.append("PackageOriginator: Person:" + DEFAULT)
head.append("PackageChecksum: SHA1: " + package_checksum)
head.append("PackageVerificationCode: " + spdx_verification_code)
head.append("PackageDescription: <text>" + info['pn']
+ " version " + info['pv'] + "</text>")
head.append("")
- head.append("PackageCopyrightText: <text>" + DEFAULT + "</text>")
+ head.append("PackageCopyrightText: "
+ + package_info['PackageCopyrightText'])
head.append("")
- head.append("PackageLicenseDeclared: " + DEFAULT)
- head.append("PackageLicenseConcluded: " + DEFAULT)
- head.append("PackageLicenseInfoFromFiles: " + DEFAULT)
+ head.append("PackageLicenseDeclared: "
+ + package_info['PackageLicenseDeclared'])
+ head.append("PackageLicenseConcluded: "
+ + package_info['PackageLicenseConcluded'])
+
+ for licref in package_info['PackageLicenseInfoFromFiles']:
+ head.append("PackageLicenseInfoFromFiles: " + licref)
head.append("")
## header for file level
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 90119732dd..2f0632af89 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -21,9 +21,12 @@ SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC
# of the system, we let the sstate paths take care of this.
SSTATE_EXTRAPATH[vardepvalue] = ""
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/"
+# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
+SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
# Avoid docbook/sgml catalog warnings for now
SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+# Archive the sources for many architectures in one deploy folder
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
SSTATE_SCAN_FILES ?= "*.la *-config *_config"
SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
@@ -32,24 +35,34 @@ BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
-SSTATEPREINSTFUNCS ?= ""
-SSTATEPOSTINSTFUNCS ?= ""
+SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATEPOSTCREATEFUNCS = ""
+SSTATEPREINSTFUNCS = ""
+SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
+SSTATEPOSTINSTFUNCS = ""
EXTRA_STAGING_FIXMES ?= ""
+SIGGEN_LOCKEDSIGS_CHECK_LEVEL ?= 'error'
+
# Specify dirs in which the shell function is executed and don't use ${B}
# as default dirs to avoid possible race about ${B} with other task.
sstate_create_package[dirs] = "${SSTATE_BUILDDIR}"
sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}"
+# Do not run sstate_hardcode_path() in ${B}:
+# the ${B} maybe removed by cmake_do_configure() while
+# sstate_hardcode_path() running.
+sstate_hardcode_path[dirs] = "${SSTATE_BUILDDIR}"
+
python () {
if bb.data.inherits_class('native', d):
d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH'))
elif bb.data.inherits_class('crosssdk', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}"))
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross', d):
d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
elif bb.data.inherits_class('nativesdk', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}"))
+ d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
@@ -182,7 +195,29 @@ def sstate_install(ss, d):
if search_output != "":
match.append("Matched in %s" % search_output.rstrip())
if match:
- bb.warn("The recipe %s is trying to install files into a shared area when those files already exist. Those files and their manifest location are:\n %s\nPlease verify which package should provide the above files." % (d.getVar('PN', True), "\n ".join(match)))
+ bb.error("The recipe %s is trying to install files into a shared " \
+ "area when those files already exist. Those files and their manifest " \
+ "location are:\n %s\nPlease verify which recipe should provide the " \
+ "above files.\nThe build has stopped as continuing in this scenario WILL " \
+ "break things, if not now, possibly in the future (we've seen builds fail " \
+ "several months later). If the system knew how to recover from this " \
+ "automatically it would however there are several different scenarios " \
+ "which can result in this and we don't know which one this is. It may be " \
+ "you have switched providers of something like virtual/kernel (e.g. from " \
+ "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
+ "clean task for both recipes and it will resolve this error. It may be " \
+ "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
+ "those recipes should again resolve this error however switching " \
+ "DISTRO_FEATURES on an existing build directory is not supported, you " \
+ "should really clean out tmp and rebuild (reusing sstate should be safe). " \
+ "It could be the overlapping files detected are harmless in which case " \
+ "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
+ "also be your build is including two different conflicting versions of " \
+ "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
+ "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
+ "sharing the error and filelist above." % \
+ (d.getVar('PN', True), "\n ".join(match)))
+ bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
# Write out the manifest
f = open(manifest, "w")
@@ -239,16 +274,32 @@ def sstate_installpkg(ss, d):
d.setVar('SSTATE_INSTDIR', sstateinst)
d.setVar('SSTATE_PKG', sstatepkg)
- for preinst in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split():
- bb.build.exec_func(preinst, d)
+ for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
- bb.build.exec_func('sstate_unpack_package', d)
+ for state in ss['dirs']:
+ prepdir(state[1])
+ os.rename(sstateinst + state[0], state[1])
+ sstate_install(ss, d)
+ for plain in ss['plaindirs']:
+ workdir = d.getVar('WORKDIR', True)
+ src = sstateinst + "/" + plain.replace(workdir, '')
+ dest = plain
+ bb.utils.mkdirhier(src)
+ prepdir(dest)
+ os.rename(src, dest)
+
+ return True
+
+python sstate_hardcode_path_unpack () {
# Fixup hardcoded paths
#
# Note: The logic below must match the reverse logic in
# sstate_hardcode_path(d)
+ import subprocess
+ sstateinst = d.getVar('SSTATE_INSTDIR', True)
fixmefn = sstateinst + "fixmepath"
if os.path.isfile(fixmefn):
staging = d.getVar('STAGING_DIR', True)
@@ -276,21 +327,7 @@ def sstate_installpkg(ss, d):
# Need to remove this or we'd copy it into the target directory and may
# conflict with another writer
os.remove(fixmefn)
-
- for state in ss['dirs']:
- prepdir(state[1])
- os.rename(sstateinst + state[0], state[1])
- sstate_install(ss, d)
-
- for plain in ss['plaindirs']:
- workdir = d.getVar('WORKDIR', True)
- src = sstateinst + "/" + plain.replace(workdir, '')
- dest = plain
- bb.utils.mkdirhier(src)
- prepdir(dest)
- os.rename(src, dest)
-
- return True
+}
def sstate_clean_cachefile(ss, d):
import oe.path
@@ -395,7 +432,7 @@ python sstate_cleanall() {
sstate_clean(shared_state, ld)
}
-def sstate_hardcode_path(d):
+python sstate_hardcode_path () {
import subprocess, platform
# Need to remove hardcoded paths and fix these when we install the
@@ -413,7 +450,7 @@ def sstate_hardcode_path(d):
sstate_grep_cmd = "grep -l -e '%s'" % (staging)
sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
elif bb.data.inherits_class('cross', d):
- sstate_grep_cmd = "grep -l -e '(%s|%s)'" % (staging_target, staging)
+ sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
else:
sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
@@ -449,6 +486,7 @@ def sstate_hardcode_path(d):
else:
bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
subprocess.call(sstate_filelist_relative_cmd, shell=True)
+}
def sstate_package(ss, d):
import oe.path
@@ -506,9 +544,11 @@ def sstate_package(ss, d):
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_PKG', sstatepkg)
- sstate_hardcode_path(d)
- bb.build.exec_func('sstate_create_package', d)
-
+
+ for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \
+ (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+
bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
return
@@ -591,7 +631,8 @@ sstate_create_package () {
if [ "$(ls -A)" ]; then
set +e
tar -czf $TFILE *
- if [ $? -ne 0 ] && [ $? -ne 1 ]; then
+ ret=$?
+ if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
exit 1
fi
set -e
@@ -612,6 +653,8 @@ sstate_unpack_package () {
mkdir -p ${SSTATE_INSTDIR}
cd ${SSTATE_INSTDIR}
tar -xmvzf ${SSTATE_PKG}
+ # Use "! -w ||" to return true for read only files
+ [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
@@ -704,6 +747,9 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d):
evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
+ if hasattr(bb.parse.siggen, "checkhashes"):
+ bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
+
return ret
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -733,15 +779,15 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
if dep in notneeded:
continue
# do_package_write_* and do_package doesn't need do_package
- if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
+ if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
continue
# do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
return False
continue
# Native/Cross packages don't exist and are noexec anyway
- if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package']:
+ if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
continue
# Consider sysroot depending on sysroot tasks
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 7c43e7618d..57b2743196 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -106,6 +106,7 @@ python do_populate_sysroot () {
}
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
+do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
SSTATETASKS += "do_populate_sysroot"
do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
index 8964d3ff5e..d6498d98bb 100644
--- a/meta/classes/syslinux.bbclass
+++ b/meta/classes/syslinux.bbclass
@@ -5,7 +5,7 @@
# Provide syslinux specific functions for building bootable images.
# External variables
-# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
# ${LABELS} - a list of targets for the automatic config
@@ -17,7 +17,7 @@
# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
-do_bootimg[depends] += "syslinux:do_populate_sysroot \
+do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot"
SYSLINUXCFG = "${S}/syslinux.cfg"
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index a6ad723dfd..c34884bd38 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -55,6 +55,8 @@ fi
systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
+systemd_populate_packages[vardepsexclude] += "OVERRIDES"
+
python systemd_populate_packages() {
if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
@@ -141,10 +143,7 @@ python systemd_populate_packages() {
if has_exactly_one_service:
has_exactly_one_service = len(get_package_var(d, 'SYSTEMD_SERVICE', systemd_packages).split()) == 1
- keys = 'Also' # Conflicts??
- if has_exactly_one_service:
- # single service gets also the /dev/null dummies
- keys = 'Also Conflicts'
+ keys = 'Also'
# scan for all in SYSTEMD_SERVICE[]
for pkg_systemd in systemd_packages.split():
for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index 285c6a9d42..683173854d 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -19,7 +19,7 @@
# Note that order in TEST_SUITES is important (it's the order tests run) and it influences tests dependencies.
# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
-# TEST_LOG_DIR contains a ssh log (what command is running, output and return codes) and a qemu boot log till login
+# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
# Booting is handled by this class, and it's not a test in itself.
# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
@@ -30,8 +30,9 @@ TEST_EXPORT_ONLY ?= "0"
DEFAULT_TEST_SUITES = "ping auto"
DEFAULT_TEST_SUITES_pn-core-image-minimal = "ping"
-DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date rpm smart dmesg python"
-DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc rpm smart kernelmodule dmesg python"
+DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date rpm smart dmesg python parselogs"
+DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc rpm smart kernelmodule dmesg python parselogs"
+DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
@@ -53,8 +54,15 @@ do_testimage[nostamp] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
+python do_testsdk() {
+ testsdk_main(d)
+}
+addtask testsdk
+do_testsdk[nostamp] = "1"
+do_testsdk[depends] += "${TESTIMAGEDEPENDS}"
+do_testsdk[lockfiles] += "${TESTIMAGELOCK}"
-def get_tests_list(d):
+def get_tests_list(d, type="runtime"):
testsuites = d.getVar("TEST_SUITES", True).split()
bbpath = d.getVar("BBPATH", True).split(':')
@@ -65,12 +73,12 @@ def get_tests_list(d):
if testname != "auto":
found = False
for p in bbpath:
- if os.path.exists(os.path.join(p, 'lib', 'oeqa', 'runtime', testname + '.py')):
- testslist.append("oeqa.runtime." + testname)
+ if os.path.exists(os.path.join(p, 'lib', 'oeqa', type, testname + '.py')):
+ testslist.append("oeqa." + type + "." + testname)
found = True
break
if not found:
- bb.error('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
+ bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
if "auto" in testsuites:
def add_auto_list(path):
@@ -78,12 +86,12 @@ def get_tests_list(d):
bb.fatal('Tests directory %s exists but is missing __init__.py' % path)
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
for f in files:
- module = 'oeqa.runtime.' + f[:-3]
+ module = 'oeqa.' + type + '.' + f[:-3]
if module not in testslist:
testslist.append(module)
for p in bbpath:
- testpath = os.path.join(p, 'lib', 'oeqa', 'runtime')
+ testpath = os.path.join(p, 'lib', 'oeqa', type)
bb.debug(2, 'Searching for tests in %s' % testpath)
if os.path.exists(testpath):
add_auto_list(testpath)
@@ -230,3 +238,86 @@ def testimage_main(d):
target.stop()
testimage_main[vardepsexclude] =+ "BB_ORIGENV"
+
+
+def testsdk_main(d):
+ import unittest
+ import os
+ import glob
+ import oeqa.runtime
+ import oeqa.sdk
+ import time
+ import subprocess
+ from oeqa.oetest import loadTests, runTests
+
+ pn = d.getVar("PN", True)
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+
+ # tests in TEST_SUITES become required tests
+ # they won't be skipped even if they aren't suitable.
+ # testslist is what we'll actually pass to the unittest loader
+ testslist = get_tests_list(d, "sdk")
+ testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"]
+
+ sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
+ bb.utils.remove(sdktestdir, True)
+ bb.utils.mkdirhier(sdktestdir)
+
+ tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
+ if not os.path.exists(tcname):
+ bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake meta-toolchain' .")
+ subprocess.call("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
+
+ targets = glob.glob(d.expand(sdktestdir + "/tc/sysroots/*${TARGET_VENDOR}-linux*"))
+ if len(targets) > 1:
+ bb.fatal("Error, multiple targets within the SDK found and we don't know which to test? %s" % str(targets))
+ sdkenv = sdktestdir + "/tc/environment-setup-" + os.path.basename(targets[0])
+
+ class TestContext(object):
+ def __init__(self):
+ self.d = d
+ self.testslist = testslist
+ self.testsrequired = testsrequired
+ self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
+ self.sdktestdir = sdktestdir
+ self.sdkenv = sdkenv
+ self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
+ self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
+ manifest = os.path.join(d.getVar("SDK_MANIFEST", True))
+ try:
+ with open(manifest) as f:
+ self.pkgmanifest = f.read()
+ except IOError as e:
+ bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)
+
+ # test context
+ tc = TestContext()
+
+ # this is a dummy load of tests
+ # we are doing that to find compile errors in the tests themselves
+ # before booting the image
+ try:
+ loadTests(tc, "sdk")
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ try:
+ starttime = time.time()
+ result = runTests(tc, "sdk")
+ stoptime = time.time()
+ if result.wasSuccessful():
+ bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
+ msg = "%s - OK - All required tests passed" % pn
+ skipped = len(result.skipped)
+ if skipped:
+ msg += " (skipped=%d)" % skipped
+ bb.plain(msg)
+ else:
+ raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
+ finally:
+ pass
+ bb.utils.remove(sdktestdir, True)
+
+testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
+
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 95499a5cdd..55d0d28157 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -71,7 +71,7 @@ python toaster_layerinfo_dumpdata() {
layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/'
layer_url_name = _get_url_map_name(layer_name)
- layer_info['name'] = layer_name
+ layer_info['name'] = layer_url_name
layer_info['local_path'] = layer_path
layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name)
layer_info['version'] = _get_layer_version_information(layer_path)
@@ -149,14 +149,26 @@ python toaster_image_dumpdata() {
image_name = d.getVar('IMAGE_NAME', True);
image_info_data = {}
+ artifact_info_data = {}
+ # collect all artifacts
for dirpath, dirnames, filenames in os.walk(deploy_dir_image):
for fn in filenames:
- if fn.startswith(image_name):
- image_output = os.path.join(dirpath, fn)
- image_info_data[image_output] = os.stat(image_output).st_size
+ try:
+ if fn.startswith(image_name):
+ image_output = os.path.join(dirpath, fn)
+ image_info_data[image_output] = os.stat(image_output).st_size
+ else:
+ import stat
+ artifact_path = os.path.join(dirpath, fn)
+ filestat = os.stat(artifact_path)
+ if stat.S_ISREG(filestat.st_mode):
+ artifact_info_data[artifact_path] = filestat.st_size
+ except OSError as e:
+ bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d)
+ bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize",artifact_info_data), d)
}
@@ -187,8 +199,10 @@ python toaster_collect_task_stats() {
def _read_stats(filename):
cpu_usage = 0
disk_io = 0
- startio = ''
- endio = ''
+ startio = '0'
+ endio = '0'
+ started = '0'
+ ended = '0'
pn = ''
taskname = ''
statinfo = {}
@@ -198,20 +212,28 @@ python toaster_collect_task_stats() {
k,v = line.strip().split(": ", 1)
statinfo[k] = v
- try:
- cpu_usage = statinfo["CPU usage"]
- endio = statinfo["EndTimeIO"]
- startio = statinfo["StartTimeIO"]
- except KeyError:
- pass # we may have incomplete data here
+ if "CPU usage" in statinfo:
+ cpu_usage = str(statinfo["CPU usage"]).strip('% \n\r')
+
+ if "EndTimeIO" in statinfo:
+ endio = str(statinfo["EndTimeIO"]).strip('% \n\r')
+
+ if "StartTimeIO" in statinfo:
+ startio = str(statinfo["StartTimeIO"]).strip('% \n\r')
+
+ if "Started" in statinfo:
+ started = str(statinfo["Started"]).strip('% \n\r')
+
+ if "Ended" in statinfo:
+ ended = str(statinfo["Ended"]).strip('% \n\r')
+
+ disk_io = int(endio) - int(startio)
- if startio and endio:
- disk_io = int(endio.strip('\n ')) - int(startio.strip('\n '))
+ elapsed_time = float(ended) - float(started)
- if cpu_usage:
- cpu_usage = float(cpu_usage.strip('% \n'))
+ cpu_usage = float(cpu_usage)
- return {'cpu_usage': cpu_usage, 'disk_io': disk_io}
+ return {'cpu_usage': cpu_usage, 'disk_io': disk_io, 'elapsed_time': elapsed_time}
if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index b9f2aea930..ea6aff0780 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -7,6 +7,9 @@ REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
# Create environment setup script
+ sdkpathnative=${7:-${SDKPATHNATIVE}}
+ prefix=${6:-${prefix_nativesdk}}
+ bindir=${5:-${bindir_nativesdk}}
libdir=${4:-${libdir}}
sysroot=${3:-${SDKTARGETSYSROOT}}
multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
@@ -14,14 +17,19 @@ toolchain_create_sdk_env_script () {
rm -f $script
touch $script
echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
- echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}:$PATH' >> $script
+ EXTRAPATH=""
+ for i in ${CANADIANEXTRAOS}; do
+ EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
+ done
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo 'export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/${TARGET_SYS}'$EXTRAPATH':$CCACHE_PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
- echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
+ echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
- echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
- echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script
+ echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
+ echo "export PYTHONHOME=$sdkpathnative$prefix" >> $script
toolchain_shared_env_script
}
@@ -33,6 +41,7 @@ toolchain_create_tree_env_script () {
rm -f $script
touch $script
echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
+ echo 'export CCACHE_PATH=${STAGING_DIR_NATIVE}/usr/bin:${CCACHE_PATH}' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
@@ -64,15 +73,31 @@ toolchain_shared_env_script () {
echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
+ echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
echo 'export ARCH=${ARCH}' >> $script
echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
+
+ cat >> $script <<EOF
+
+# Append environment subscripts
+if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
+ for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
+ source \$envfile
+ done
+fi
+if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
+ for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
+ source \$envfile
+ done
+fi
+EOF
}
#we get the cached site config in the runtime
TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
-TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
+TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE = "${TCLIBC} ncurses"
#This function create a site config file
@@ -112,5 +137,7 @@ python __anonymous () {
deps = ""
for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
deps += " %s:do_populate_sysroot" % dep
+ for variant in (d.getVar('MULTILIB_VARIANTS', True) or "").split():
+ deps += " %s-%s:do_populate_sysroot" % (variant, dep)
d.appendVarFlag('do_configure', 'depends', deps)
}
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index 8ac1b71bc2..cb061af348 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -31,31 +31,19 @@ python () {
return
ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
- if len(ubootconfig) > 1:
- raise bb.parse.SkipPackage('You can only have a single default for UBOOT_CONFIG.')
+ if len(ubootconfig) > 0:
+ for config in ubootconfig:
+ for f, v in ubootconfigflags.items():
+ if config == f:
+ items = v.split(',')
+ if items[0] and len(items) > 2:
+ raise bb.parse.SkipPackage('Only config,images can be specified!')
+ d.appendVar('UBOOT_MACHINE', ' ' + items[0])
+ # IMAGE_FSTYPES appending
+ if len(items) > 1 and items[1]:
+ bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
+ d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
+ break
elif len(ubootconfig) == 0:
- raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
- ubootconfig = ubootconfig[0]
-
- for f, v in ubootconfigflags.items():
- if f == 'defaultval':
- continue
-
- items = v.split(',')
- if items[0] and len(items) > 2:
- raise bb.parse.SkipPackage('Only config,images can be specified!')
-
- if ubootconfig == f:
- bb.debug(1, "Setting UBOOT_MACHINE to %s." % items[0])
- d.setVar('UBOOT_MACHINE', items[0])
-
- # IMAGE_FSTYPES appending
- if len(items) > 1 and items[1]:
- bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
- d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
-
- # Go out as we found a match!
- break
- else:
- raise bb.parse.SkipPackage("UBOOT_CONFIG %s is not supported" % ubootconfig)
+ raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
new file mode 100644
index 0000000000..51391dbc4a
--- /dev/null
+++ b/meta/classes/uninative.bbclass
@@ -0,0 +1,44 @@
+NATIVELSBSTRING = "universal"
+
+UNINATIVE_LOADER = "${STAGING_DIR_NATIVE}/lib/ld-linux-x86-64.so.2"
+
+addhandler uninative_eventhandler
+uninative_eventhandler[eventmask] = "bb.event.BuildStarted"
+
+python uninative_eventhandler() {
+ loader = e.data.getVar("UNINATIVE_LOADER", True)
+ if not os.path.exists(loader):
+ import subprocess
+ cmd = e.data.expand("mkdir -p ${STAGING_DIR}; cd ${STAGING_DIR}; tar -xjf ${COREBASE}/${BUILD_ARCH}-nativesdk-libc.tar.bz2; ${STAGING_DIR}/relocate_sdk.py ${STAGING_DIR_NATIVE} ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_BINDIR_NATIVE}/patchelf-uninative")
+ #bb.warn("nativesdk lib extraction: " + cmd)
+ subprocess.check_call(cmd, shell=True)
+}
+
+SSTATEPOSTUNPACKFUNCS_append = " uninative_changeinterp"
+
+python uninative_changeinterp () {
+ import subprocess
+ import stat
+ import oe.qa
+
+ if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
+ return
+
+ sstateinst = d.getVar('SSTATE_INSTDIR', True)
+ for walkroot, dirs, files in os.walk(sstateinst):
+ for file in files:
+ f = os.path.join(walkroot, file)
+ if os.path.islink(f):
+ continue
+ s = os.stat(f)
+ if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
+ continue
+ elf = oe.qa.ELFFile(f)
+ try:
+ elf.open()
+ except:
+ continue
+
+ #bb.warn("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f))
+ subprocess.call("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f), shell=True)
+}
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 19b081d235..a9c0323f95 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -48,9 +48,9 @@ fi
updatercd_postrm() {
if type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
- OPT="-r $D"
+ OPT="-f -r $D"
else
- OPT=""
+ OPT="-f"
fi
update-rc.d $OPT ${INITSCRIPT_NAME} remove
fi
@@ -72,6 +72,7 @@ PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd "
PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
+populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
python populate_packages_updatercd () {
def update_rcd_auto_depend(pkg):
@@ -121,7 +122,7 @@ python populate_packages_updatercd () {
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
- if bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
+ if bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) or \
not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
if pkgs == None:
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index a89cb10a4a..421a70a6ab 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -58,7 +58,9 @@ def update_useradd_static_config(d):
newparams = []
for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
- param=param.strip()
+ param = param.strip()
+ if not param:
+ continue
try:
uaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
except:
@@ -194,7 +196,9 @@ def update_useradd_static_config(d):
newparams = []
for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
- param=param.strip()
+ param = param.strip()
+ if not param:
+ continue
try:
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index ea15dabb84..e443f845f7 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -24,6 +24,18 @@ if test "x$D" != "x"; then
# Installing into a sysroot
SYSROOT="$D"
OPT="--root $D"
+
+ # Make sure login.defs is there, this is to make debian package backend work
+ # correctly while doing rootfs.
+ # The problem here is that if /etc/login.defs is treated as a config file for
+ # shadow package, then while performing preinsts for packages that depend on
+ # shadow, there might only be /etc/login.def.dpkg-new there in root filesystem.
+ if [ ! -e $D${sysconfdir}/login.defs -a -e $D${sysconfdir}/login.defs.dpkg-new ]; then
+ cp $D${sysconfdir}/login.defs.dpkg-new $D${sysconfdir}/login.defs
+ fi
+
+ # user/group lookups should match useradd/groupadd --root
+ export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}"
fi
# If we're not doing a special SSTATE/SYSROOT install
@@ -126,7 +138,7 @@ SYSROOTPOSTFUNC_class-cross = ""
SYSROOTPOSTFUNC_class-native = ""
SYSROOTPOSTFUNC_class-nativesdk = ""
-USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
USERADDSETSCENEDEPS_class-cross = ""
USERADDSETSCENEDEPS_class-native = ""
USERADDSETSCENEDEPS_class-nativesdk = ""
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index 0ee13e04d7..80e90e8777 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -259,11 +259,11 @@ create_cmdline_wrapper () {
echo "Generating wrapper script for $cmd"
mv $cmd $cmd.real
- cmdname=`basename $cmd`.real
+ cmdname=`basename $cmd`
cat <<END >$cmd
#!/bin/bash
realpath=\`readlink -fn \$0\`
-exec -a $cmd \`dirname \$realpath\`/$cmdname $@ "\$@"
+exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $@ "\$@"
END
chmod +x $cmd
}
@@ -312,6 +312,8 @@ def explode_deps(s):
def base_set_filespath(path, d):
filespath = []
extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ # Remove default flag which was used for checking
+ extrapaths = extrapaths.replace("__default:", "")
# Don't prepend empty strings to the path list
if extrapaths != "":
path = extrapaths.split(":") + path
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
index 0b7803b251..9ff664ac41 100644
--- a/meta/classes/vala.bbclass
+++ b/meta/classes/vala.bbclass
@@ -1,6 +1,3 @@
-# Vala has problems with multiple concurrent invocations
-PARALLEL_MAKE = ""
-
# Everyone needs vala-native and targets need vala, too,
# because that is where target builds look for .vapi files.
#