aboutsummaryrefslogtreecommitdiffstats
path: root/classes
diff options
context:
space:
mode:
Diffstat (limited to 'classes')
-rw-r--r--classes/cni_networking.bbclass18
-rw-r--r--classes/container-host.bbclass15
-rw-r--r--classes/image-oci-sloci-image.inc89
-rw-r--r--classes/image-oci-umoci.inc135
-rw-r--r--classes/image-oci.bbclass89
-rw-r--r--classes/meta-virt-container-cfg.bbclass10
-rw-r--r--classes/meta-virt-depreciated-warning.bbclass3
-rw-r--r--classes/meta-virt-hosts.bbclass29
-rw-r--r--classes/meta-virt-xen-cfg.bbclass6
-rw-r--r--classes/qemuboot-testimage-network.bbclass51
-rw-r--r--classes/qemuboot-xen-defaults.bbclass42
-rw-r--r--classes/qemuboot-xen-dtb.bbclass210
12 files changed, 628 insertions, 69 deletions
diff --git a/classes/cni_networking.bbclass b/classes/cni_networking.bbclass
new file mode 100644
index 00000000..2e81d63f
--- /dev/null
+++ b/classes/cni_networking.bbclass
@@ -0,0 +1,18 @@
+DEPENDS:append:class-target = " cni"
+
+PACKAGES:prepend = "${PN}-cni "
+
+FILES:${PN}-cni = "${sysconfdir}/cni/net.d/*"
+
+RDEPENDS:${PN}-cni = "cni"
+
+do_install:append() {
+ if [ -z "${CNI_NETWORKING_FILES}" ]; then
+ bbfatal "cni-networking was inherited, but no networking configuration was provided via CNI_NETWORKING_FILES"
+ fi
+ install -d "${D}/${sysconfdir}/cni/net.d/"
+ for f in ${CNI_NETWORKING_FILES}; do
+ conf_name="$(basename $f)"
+ install -D -m 0644 "$f" "${D}/${sysconfdir}/cni/net.d/$conf_name"
+ done
+}
diff --git a/classes/container-host.bbclass b/classes/container-host.bbclass
new file mode 100644
index 00000000..99a75fea
--- /dev/null
+++ b/classes/container-host.bbclass
@@ -0,0 +1,15 @@
+# This class is the collection point for automatic dependencies,
+# package installs, rootfs postprocessing, etc, that are used
+# by container host images and recipes.
+
+# It currently is largely empty, and simply adds RDEPENDS, but
+# will expand to CRI/CNI configurations in the future.
+#
+
+RDEPENDS:${PN}:append = " container-host-config"
+
+do_install:append() {
+ # placeholder for additional package install, or configuration
+ # of the rootfs
+ true
+}
diff --git a/classes/image-oci-sloci-image.inc b/classes/image-oci-sloci-image.inc
new file mode 100644
index 00000000..9248489f
--- /dev/null
+++ b/classes/image-oci-sloci-image.inc
@@ -0,0 +1,89 @@
+IMAGE_CMD:oci() {
+ sloci_options=""
+
+ bbdebug 1 "OCI image settings:"
+ bbdebug 1 " author: ${OCI_IMAGE_AUTHOR}"
+ bbdebug 1 " author email: ${OCI_IMAGE_AUTHOR_EMAIL}"
+ bbdebug 1 " tag: ${OCI_IMAGE_TAG}"
+ bbdebug 1 " arch: ${OCI_IMAGE_ARCH}"
+ bbdebug 1 " subarch: ${OCI_IMAGE_SUBARCH}"
+ bbdebug 1 " entrypoint: ${OCI_IMAGE_ENTRYPOINT}"
+ bbdebug 1 " entrypoing args: ${OCI_IMAGE_ENTRYPOINT_ARGS}"
+ bbdebug 1 " labels: ${OCI_IMAGE_LABELS}"
+ bbdebug 1 " uid: ${OCI_IMAGE_RUNTIME_UID}"
+ bbdebug 1 " working dir: ${OCI_IMAGE_WORKINGDIR}"
+ bbdebug 1 " env vars: ${OCI_IMAGE_ENV_VARS}"
+ bbdebug 1 " ports: ${OCI_IMAGE_PORTS}"
+
+ # Change into the image deploy dir to avoid having any output operations capture
+ # long directories or the location.
+ cd ${IMGDEPLOYDIR}
+
+ oci_image_label_options=""
+ if [ -n "${OCI_IMAGE_LABELS}" ]; then
+ for l in ${OCI_IMAGE_LABELS}; do
+ oci_image_label_options="${oci_image_label_options} --label ${l}"
+ done
+ fi
+ oci_image_env_options=""
+ if [ -n "${OCI_IMAGE_ENV_VARS}" ]; then
+ for l in ${OCI_IMAGE_ENV_VARS}; do
+ oci_image_env_options="${oci_image_env_options} --env ${l}"
+ done
+ fi
+ oci_image_port_options=""
+ if [ -n "${OCI_IMAGE_PORTS}" ]; then
+ for l in ${OCI_IMAGE_PORTS}; do
+ oci_image_port_options="${oci_image_port_options} --port ${l}"
+ done
+ fi
+
+ if [ -n "${OCI_IMAGE_RUNTIME_UID}" ]; then
+ oci_image_user_options="--user ${OCI_IMAGE_RUNTIME_UID}"
+ fi
+
+ if [ -n "${OCI_IMAGE_WORKINGDIR}" ]; then
+ oci_image_working_dir_options="--working-dir ${OCI_IMAGE_WORKINGDIR}"
+ fi
+
+ if [ -n "${OCI_IMAGE_TAR_OUTPUT}" ]; then
+ sloci_options="$sloci_options --tar"
+ fi
+
+ # options that always appear are required for a valid oci container image
+ # others are optional based on settings.
+ sloci-image $sloci_options \
+ --arch ${OCI_IMAGE_ARCH} \
+ --arch-variant "${OCI_IMAGE_SUBARCH}" \
+ --entrypoint ${OCI_IMAGE_ENTRYPOINT} \
+ --cmd "${OCI_IMAGE_ENTRYPOINT_ARGS}" \
+ --author ${OCI_IMAGE_AUTHOR_EMAIL} \
+ ${oci_image_user_options} \
+ ${oci_image_label_options} \
+ ${oci_image_env_options} \
+ ${oci_image_working_dir_options} \
+ ${oci_image_port_options} \
+ ${IMAGE_ROOTFS} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci:${OCI_IMAGE_TAG}
+
+ if [ -n "${OCI_IMAGE_TAR_OUTPUT}" ]; then
+ # if tar is specified, sloci-image is removing the directory that we need for a secondary
+ # tar format, so we need to restore it.
+ tar xf ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-${OCI_IMAGE_TAG}-${OCI_IMAGE_ARCH}-linux.oci-image.tar
+ fi
+
+ # create a convenience symlink
+ ln -sf ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci ${IMAGE_BASENAME}-${OCI_IMAGE_TAG}-oci
+
+ if [ -n "${OCI_IMAGE_TAR_OUTPUT}" ]; then
+ # move the sloci output to a naming convention that matches what we do with umoci, thie
+ # default creates a subdirectory, so it get the "-dir" in the name
+ mv ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-${OCI_IMAGE_TAG}-${OCI_IMAGE_ARCH}-linux.oci-image.tar ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-${OCI_IMAGE_TAG}-${OCI_IMAGE_ARCH}-linux.oci-dir.tar
+ ln -sf ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-${OCI_IMAGE_TAG}-${OCI_IMAGE_ARCH}-linux.oci-dir.tar ${IMAGE_BASENAME}-${OCI_IMAGE_TAG}-oci-dir.tar
+
+ (
+ cd "${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci"
+ tar -cf ../"${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-${OCI_IMAGE_TAG}-${OCI_IMAGE_ARCH}-linux.oci-image.tar" "."
+ )
+ ln -sf "${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-${OCI_IMAGE_TAG}-${OCI_IMAGE_ARCH}-linux.oci-image.tar" ${IMAGE_BASENAME}-${OCI_IMAGE_TAG}-oci.tar
+ fi
+}
diff --git a/classes/image-oci-umoci.inc b/classes/image-oci-umoci.inc
new file mode 100644
index 00000000..c77750fb
--- /dev/null
+++ b/classes/image-oci-umoci.inc
@@ -0,0 +1,135 @@
+IMAGE_CMD:oci() {
+ umoci_options=""
+
+ bbdebug 1 "UMOCI image settings:"
+ bbdebug 1 " author: ${OCI_IMAGE_AUTHOR}"
+ bbdebug 1 " author email: ${OCI_IMAGE_AUTHOR_EMAIL}"
+ bbdebug 1 " tag: ${OCI_IMAGE_TAG}"
+ bbdebug 1 " arch: ${OCI_IMAGE_ARCH}"
+ bbdebug 1 " subarch: ${OCI_IMAGE_SUBARCH}"
+ bbdebug 1 " entrypoint: ${OCI_IMAGE_ENTRYPOINT}"
+ bbdebug 1 " entrypoint args: ${OCI_IMAGE_ENTRYPOINT_ARGS}"
+ bbdebug 1 " labels: ${OCI_IMAGE_LABELS}"
+ bbdebug 1 " uid: ${OCI_IMAGE_RUNTIME_UID}"
+ bbdebug 1 " working dir: ${OCI_IMAGE_WORKINGDIR}"
+ bbdebug 1 " env vars: ${OCI_IMAGE_ENV_VARS}"
+ bbdebug 1 " ports: ${OCI_IMAGE_PORTS}"
+
+ OCI_REUSE_IMAGE=""
+
+ # Change into the image deploy dir to avoid having any output operations capture
+ # long directories or the location.
+ cd ${IMGDEPLOYDIR}
+
+ new_image=t
+ image_name="${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci"
+ image_bundle_name="${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci-bundle"
+ if [ -n "$OCI_REUSE_IMAGE" ]; then
+ if [ -d $image_name ]; then
+ bbdebug 1 "OCI: reusing image directory"
+ new_image=""
+ fi
+ else
+ bbdebug 1 "OCI: removing existing container image directory"
+ rm -rf $image_name $image_bundle_name
+ fi
+
+ if [ -z "${OCI_IMAGE_TAG}" ]; then
+ OCI_IMAGE_TAG="initial-tag"
+ fi
+
+ if [ -n "$new_image" ]; then
+ bbdebug 1 "OCI: umoci init --layout $image_name"
+ umoci init --layout $image_name
+ umoci new --image $image_name:${OCI_IMAGE_TAG}
+ umoci unpack --rootless --image $image_name:${OCI_IMAGE_TAG} $image_bundle_name
+ else
+ # todo: create a different tag, after checking if the passed one exists
+ true
+ fi
+
+ bbdebug 1 "OCI: populating rootfs"
+ bbdebug 1 "OCI: cp -r ${IMAGE_ROOTFS}/* $image_bundle_name/rootfs/"
+ cp -r ${IMAGE_ROOTFS}/* $image_bundle_name/rootfs
+
+ bbdebug 1 "OCI: umoci repack --image $image_name:${OCI_IMAGE_TAG} $image_bundle_name"
+ umoci repack --image $image_name:${OCI_IMAGE_TAG} $image_bundle_name
+
+ bbdebug 1 "OCI: configuring image"
+ if [ -n "${OCI_IMAGE_LABELS}" ]; then
+ for l in ${OCI_IMAGE_LABELS}; do
+ bbdebug 1 "OCI: umoci config --image $image_name:${OCI_IMAGE_TAG} --config.label \"$l\""
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.label "$l"
+ done
+ fi
+ if [ -n "${OCI_IMAGE_ENV_VARS}" ]; then
+ for l in ${OCI_IMAGE_ENV_VARS}; do
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.env \"$l\""
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.env "$l"
+ done
+ fi
+ if [ -n "${OCI_IMAGE_PORTS}" ]; then
+ for l in ${OCI_IMAGE_PORTS}; do
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.exposedports $l"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.exposedports $l
+ done
+ fi
+ if [ -n "${OCI_IMAGE_RUNTIME_UID}" ]; then
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.user ${OCI_IMAGE_RUNTIME_UID}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.user ${OCI_IMAGE_RUNTIME_UID}
+ fi
+ if [ -n "${OCI_IMAGE_WORKINGDIR}" ]; then
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.workingdir ${OCI_IMAGE_WORKINGDIR}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.workingdir ${OCI_IMAGE_WORKINGDIR}
+ fi
+ if [ -n "${OCI_IMAGE_STOPSIGNAL}" ]; then
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.stopsignal ${OCI_IMAGE_STOPSIGNAL}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.stopsignal ${OCI_IMAGE_STOPSIGNAL}
+ fi
+ if [ -n "${OCI_IMAGE_OS}" ]; then
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --os ${OCI_IMAGE_OS}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --os ${OCI_IMAGE_OS}
+ fi
+
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --architecture ${OCI_IMAGE_ARCH}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --architecture ${OCI_IMAGE_ARCH}
+ # NOTE: umoci doesn't currently expose setting the architecture variant,
+ # so if you need it use sloci instead
+ if [ -n "${OCI_IMAGE_SUBARCH}" ]; then
+ bbnote "OCI: image subarch is set to: ${OCI_IMAGE_SUBARCH}, but umoci does not"
+ bbnote " expose variants. use sloci instead if this is important"
+ fi
+ umoci config --image $image_name:${OCI_IMAGE_TAG} \
+ ${@" ".join("--config.entrypoint %s" % s for s in d.getVar("OCI_IMAGE_ENTRYPOINT").split())}
+ if [ -n "${OCI_IMAGE_ENTRYPOINT_ARGS}" ]; then
+ umoci config --image $image_name:${OCI_IMAGE_TAG} ${@" ".join("--config.cmd %s" % s for s in d.getVar("OCI_IMAGE_ENTRYPOINT_ARGS").split())}
+ fi
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --author ${OCI_IMAGE_AUTHOR_EMAIL}
+
+ # OCI_IMAGE_TAG may contain ":", but these are not allowed in OCI file
+ # names so replace them
+ image_tag="${@d.getVar("OCI_IMAGE_TAG").replace(":", "_")}"
+
+ # make a tar version of the image direcotry
+ # 1) image_name.tar: compatible with oci tar format, blobs and rootfs
+ # are at the top level. Can load directly from something like podman
+ # 2) image_name-dir.tar: original format from meta-virt, is just a tar'd
+ # up oci image directory (compatible with skopeo :dir format)
+ if [ -n "${OCI_IMAGE_TAR_OUTPUT}" ]; then
+ (
+ cd "$image_name"
+ tar -cf ../"$image_name.tar" "."
+ )
+ tar -cf "$image_name-dir.tar" "$image_name"
+
+ # create a convenience symlink
+ ln -sf "$image_name.tar" "${IMAGE_BASENAME}-$image_tag-oci.tar"
+ ln -sf "$image_name-dir.tar" "${IMAGE_BASENAME}-$image_tag-oci-dir.tar"
+ fi
+
+ # We could make this optional, since the bundle is directly runnable via runc
+ rm -rf $image_bundle_name
+
+ # This is the OCI image directory, which is technically the "image" as specified
+ ln -sf $image_name ${IMAGE_BASENAME}-$image_tag-oci
+}
diff --git a/classes/image-oci.bbclass b/classes/image-oci.bbclass
index c256b12c..0ec5c487 100644
--- a/classes/image-oci.bbclass
+++ b/classes/image-oci.bbclass
@@ -16,19 +16,32 @@
# And then create the bundle:
# % oci-image-tool create --ref name=latest container-base-<arch>-<stamp>.rootfs-oci container-base-oci-bundle
#
+# Alternatively, the bundle can be created with umoci (use --rootless if sudo is not available)
+# % sudo umoci unpack --image container-base-<arch>-<stamp>.rootfs-oci:latest container-base-oci-bundle
+#
# Or to copy (push) the oci image to a docker registry, skopeo can be used (vary the
# tag based on the created oci image:
#
# % skopeo copy --dest-creds <username>:<password> oci:container-base-<arch>-<stamp>:latest docker://zeddii/container-base
#
+# If your build host architecture matches the target, you can execute the unbundled
+# container with runc:
+#
+# % sudo runc run -b container-base-oci-bundle ctr-build
+# / % uname -a
+# Linux mrsdalloway 4.18.0-25-generic #26-Ubuntu SMP Mon Jun 24 09:32:08 UTC 2019 x86_64 GNU/Linux
+#
# We'd probably get this through the container image typdep, but just
# to be sure, we'll repeat it here.
ROOTFS_BOOTSTRAP_INSTALL = ""
# we want container and tar.bz2's to be created
-IMAGE_TYPEDEP_oci = "container tar.bz2"
+IMAGE_TYPEDEP:oci = "container tar.bz2"
+
# sloci is the script/project that will create the oci image
-do_image_oci[depends] += "sloci-image-native:do_populate_sysroot"
+# OCI_IMAGE_BACKEND ?= "sloci-image"
+OCI_IMAGE_BACKEND ?= "umoci"
+do_image_oci[depends] += "${OCI_IMAGE_BACKEND}-native:do_populate_sysroot"
#
# image type configuration block
@@ -39,12 +52,13 @@ OCI_IMAGE_AUTHOR_EMAIL ?= "${PATCH_GIT_USER_EMAIL}"
OCI_IMAGE_TAG ?= "latest"
OCI_IMAGE_RUNTIME_UID ?= ""
-OCI_IMAGE_ARCH ?= "${TARGET_ARCH}"
+OCI_IMAGE_ARCH ?= "${@oe.go.map_arch(d.getVar('TARGET_ARCH'))}"
OCI_IMAGE_SUBARCH ?= "${@oci_map_subarch(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
OCI_IMAGE_ENTRYPOINT ?= "sh"
OCI_IMAGE_ENTRYPOINT_ARGS ?= ""
OCI_IMAGE_WORKINGDIR ?= ""
+OCI_IMAGE_STOPSIGNAL ?= ""
# List of ports to expose from a container running this image:
# PORT[/PROT]
@@ -75,70 +89,7 @@ def oci_map_subarch(a, f, d):
return ''
return ''
-IMAGE_CMD_oci() {
- sloci_options=""
-
- bbdebug 1 "OCI image settings:"
- bbdebug 1 " author: ${OCI_IMAGE_AUTHOR}"
- bbdebug 1 " author email: ${OCI_IMAGE_AUTHOR_EMAIL}"
- bbdebug 1 " tag: ${OCI_IMAGE_TAG}"
- bbdebug 1 " arch: ${OCI_IMAGE_ARCH}"
- bbdebug 1 " subarch: ${OCI_IMAGE_SUBARCH}"
- bbdebug 1 " entrypoint: ${OCI_IMAGE_ENTRYPOINT}"
- bbdebug 1 " entrypoing args: ${OCI_IMAGE_ENTRYPOINT_ARGS}"
- bbdebug 1 " labels: ${OCI_IMAGE_LABELS}"
- bbdebug 1 " uid: ${OCI_IMAGE_RUNTIME_UID}"
- bbdebug 1 " working dir: ${OCI_IMAGE_WORKINGDIR}"
- bbdebug 1 " env vars: ${OCI_IMAGE_ENV_VARS}"
- bbdebug 1 " ports: ${OCI_IMAGE_PORTS}"
-
- # Change into the image deploy dir to avoid having any output operations capture
- # long directories or the location.
- cd ${IMGDEPLOYDIR}
-
- oci_image_label_options=""
- if [ -n "${OCI_IMAGE_LABELS}" ]; then
- for l in ${OCI_IMAGE_LABELS}; do
- oci_image_label_options="${oci_image_label_options} --label ${l}"
- done
- fi
- oci_image_env_options=""
- if [ -n "${OCI_IMAGE_ENV_VARS}" ]; then
- for l in ${OCI_IMAGE_ENV_VARS}; do
- oci_image_env_options="${oci_image_env_options} --env ${l}"
- done
- fi
- oci_image_port_options=""
- if [ -n "${OCI_IMAGE_PORTS}" ]; then
- for l in ${OCI_IMAGE_PORTS}; do
- oci_image_port_options="${oci_image_port_options} --port ${l}"
- done
- fi
-
- if [ -n "${OCI_IMAGE_RUNTIME_UID}" ]; then
- oci_image_user_options="--user ${OCI_IMAGE_RUNTIME_UID}"
- fi
-
- if [ -n "${OCI_IMAGE_WORKINGDIR}" ]; then
- oci_image_working_dir_options="--working-dir ${OCI_IMAGE_WORKINGDIR}"
- fi
-
- if [ -n "${OCI_IMAGE_TAR_OUTPUT}" ]; then
- sloci_options="$sloci_options --tar"
- fi
+# the IMAGE_CMD:oci comes from the .inc
+OCI_IMAGE_BACKEND_INC ?= "${@"image-oci-" + "${OCI_IMAGE_BACKEND}" + ".inc"}"
+include ${OCI_IMAGE_BACKEND_INC}
- # options that always appear are required for a valid oci container image
- # others are optional based on settings.
- sloci-image $sloci_options \
- --arch ${OCI_IMAGE_ARCH} \
- --arch-variant "${OCI_IMAGE_SUBARCH}" \
- --entrypoint ${OCI_IMAGE_ENTRYPOINT} \
- --cmd "${OCI_IMAGE_ENTRYPOINT_ARGS}" \
- --author ${OCI_IMAGE_AUTHOR_EMAIL} \
- ${oci_image_user_options} \
- ${oci_image_label_options} \
- ${oci_image_env_options} \
- ${oci_image_working_dir_options} \
- ${oci_image_port_options} \
- ${IMAGE_ROOTFS} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}-oci:${OCI_IMAGE_TAG}
-}
diff --git a/classes/meta-virt-container-cfg.bbclass b/classes/meta-virt-container-cfg.bbclass
new file mode 100644
index 00000000..9a4b144e
--- /dev/null
+++ b/classes/meta-virt-container-cfg.bbclass
@@ -0,0 +1,10 @@
+# We need to set the Xen meta-virt config components, only if "xen"
+# is in the distro features. Since we don't know the distro flags during
+# layer.conf load time, we delay using a special bbclass that simply includes
+# the META_VIRT_CONTAINER_CONFIG_PATH file.
+
+# the defaults are valid if we do or don't have virtualization enabled, so
+# we include it in either case below. But we leave the pattern in place, to
+# match the other configs of the layer and in case the above statement isn't
+# always true in the future.
+include ${@bb.utils.contains('DISTRO_FEATURES', 'virtualization', '${META_VIRT_CONTAINER_CONFIG_PATH}', '${META_VIRT_CONTAINER_CONFIG_PATH}', d)}
diff --git a/classes/meta-virt-depreciated-warning.bbclass b/classes/meta-virt-depreciated-warning.bbclass
new file mode 100644
index 00000000..97495bbf
--- /dev/null
+++ b/classes/meta-virt-depreciated-warning.bbclass
@@ -0,0 +1,3 @@
+do_compile:append() {
+ bbwarn "${PN} is depreciated and should not be used, it will be removed in the future"
+}
diff --git a/classes/meta-virt-hosts.bbclass b/classes/meta-virt-hosts.bbclass
new file mode 100644
index 00000000..80aefb76
--- /dev/null
+++ b/classes/meta-virt-hosts.bbclass
@@ -0,0 +1,29 @@
+# This doesn't work, since it seems to be too late for sanity checking.
+# IMAGE_FEATURES[validitems] += ' ${@bb.utils.contains("DISTRO_FEATURES", "virtualization", "virt-unique-hostname; ", "",d)}'
+
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "virt-unique-hostname", "virt_gen_hostname; ", "",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "virt-unique-hostname", "virt_set_hostname; ", "",d)}'
+
+python virt_gen_hostname() {
+ import uuid
+
+ targetname = d.getVar("VIRT_TARGETNAME")
+ if targetname != None:
+ return
+
+ status, date = oe.utils.getstatusoutput("date +%d-%m-%y")
+ if status:
+ bb.warn("Can't get the date string for target hostname")
+
+ uuid = ':'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
+ if uuid:
+ targetname = "%s-%s" % (d.getVar("MACHINE"), uuid.split(":")[0])
+ else:
+ targetname = "%s-%s" % (d.getVar("MACHINE"), date)
+
+ d.setVar("VIRT_TARGETNAME", targetname)
+}
+
+virt_set_hostname() {
+ echo "${VIRT_TARGETNAME}" > ${IMAGE_ROOTFS}/etc/hostname
+}
diff --git a/classes/meta-virt-xen-cfg.bbclass b/classes/meta-virt-xen-cfg.bbclass
new file mode 100644
index 00000000..61b32aa3
--- /dev/null
+++ b/classes/meta-virt-xen-cfg.bbclass
@@ -0,0 +1,6 @@
+# We need to load the Xen meta-virt config components, only if "xen"
+# is in the distro features. Since we don't know the distro flags during
+# layer.conf load time, we delay using a special bbclass that simply includes
+# the META_VIRT_XEN_CONFIG_PATH file.
+
+include ${@bb.utils.contains('DISTRO_FEATURES', 'xen', '${META_VIRT_XEN_CONFIG_PATH}', '', d)}
diff --git a/classes/qemuboot-testimage-network.bbclass b/classes/qemuboot-testimage-network.bbclass
new file mode 100644
index 00000000..57e03551
--- /dev/null
+++ b/classes/qemuboot-testimage-network.bbclass
@@ -0,0 +1,51 @@
+# The recipe for init-ifupdown in core has a special-case for all
+# the Qemu MACHINES: it removes all external network interfaces
+# by default. However, eth0 is needed for testimage, so enable it here.
+
+# If QB_NETWORK_XEN_BRIDGE is set, configure bridging for the network.
+QB_NETWORK_XEN_BRIDGE ??= ""
+
+enable_runqemu_network() {
+ : # no-op for non-qemu MACHINES
+}
+
+enable_runqemu_network:qemuall() {
+ # Do not override a network configuration for eth0 if one is present
+ if ! grep -q eth0 "${IMAGE_ROOTFS}${sysconfdir}/network/interfaces" ; then
+
+ # Xen host networking: use bridging to support guest networks
+ if [ -n "${QB_NETWORK_XEN_BRIDGE}" ] ; then
+ # Configure a Xen host network bridge and put eth0 on it
+ cat <<EOF >>${IMAGE_ROOTFS}${sysconfdir}/network/interfaces
+
+# Bridged host network for Xen testimage
+iface eth0 inet manual
+
+auto xenbr0
+iface xenbr0 inet dhcp
+ bridge_ports eth0
+EOF
+
+# Add a script to create the bridge and add eth0 if necessary
+ cat <<EOF >>${IMAGE_ROOTFS}${sysconfdir}/network/if-pre-up.d/xenbr0
+#!/bin/sh
+
+if [ "\$IFACE" = xenbr0 ]; then
+ brctl addbr xenbr0 || /bin/true
+ brctl addif xenbr0 eth0 || /bin/true
+ ifconfig eth0 up
+fi
+EOF
+ chmod 755 ${IMAGE_ROOTFS}${sysconfdir}/network/if-pre-up.d/xenbr0
+ else
+ # Just configure eth0
+ cat <<EOF >>${IMAGE_ROOTFS}${sysconfdir}/network/interfaces
+
+# Network for testimage
+auto eth0
+iface eth0 inet dhcp
+EOF
+ fi
+ fi
+}
+ROOTFS_POSTPROCESS_COMMAND += 'enable_runqemu_network;'
diff --git a/classes/qemuboot-xen-defaults.bbclass b/classes/qemuboot-xen-defaults.bbclass
new file mode 100644
index 00000000..460cbe6a
--- /dev/null
+++ b/classes/qemuboot-xen-defaults.bbclass
@@ -0,0 +1,42 @@
+# Set defaults for booting Xen images with qemuboot
+
+# Xen and Dom0 command line options
+QB_XEN_CMDLINE_EXTRA ??= "dom0_mem=192M"
+QB_XEN_DOM0_BOOTARGS ??= \
+ "console=hvc0 earlyprintk clk_ignore_unused root=/dev/vda"
+
+# Launch with one initial domain, dom0, with one boot module, the kernel
+DOM0_KERNEL ??= "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE}"
+DOM0_KERNEL_LOAD_ADDR ??= "0x45000000"
+QB_XEN_DOMAIN_MODULES ??= "${DOM0_KERNEL}:${DOM0_KERNEL_LOAD_ADDR}:multiboot,kernel"
+
+# Qemuboot for Arm uses the QB_DEFAULT_KERNEL method to load Xen
+# and the device loader option for the dom0 kernel:
+QB_OPT_APPEND:append:aarch64 = " \
+ -device loader,file=${DOM0_KERNEL},addr=${DOM0_KERNEL_LOAD_ADDR} \
+ "
+QB_OPT_APPEND:append:qemuarm = " \
+ -device loader,file=${DOM0_KERNEL},addr=${DOM0_KERNEL_LOAD_ADDR} \
+ "
+QB_DEFAULT_KERNEL:qemuarm64 = "xen-${MACHINE}"
+QB_DEFAULT_KERNEL:qemuarm = "xen-${MACHINE}"
+
+# 64-bit Arm: gic version 3
+QB_MACHINE:qemuarm64 = "-machine virt,gic-version=3 -machine virtualization=true"
+# 32-bit Arm: highmem=off
+# Disable highmem so that qemu does not use highmem IO regions that end up
+# being placed at the 256GiB mark (e.g. ECAM space) and can cause issues in Xen.
+QB_MACHINE:qemuarm = "-machine virt,highmem=off -machine virtualization=true"
+
+# Increase the default qemu memory allocation to allow for the hypervisor.
+# Use a weak assignment to allow for change of default and override elsewhere.
+QB_MEM_VALUE ??= "512"
+QB_MEM = "-m ${QB_MEM_VALUE}"
+
+# 64-bit Arm: qemuboot with a device tree binary
+QB_DTB:qemuarm64 = "${IMAGE_NAME}.qemuboot.dtb"
+QB_DTB_LINK:qemuarm64 = "${IMAGE_LINK_NAME}.qemuboot.dtb"
+
+# 32-bit Arm: qemuboot with a device tree binary
+QB_DTB:qemuarm = "${IMAGE_NAME}.qemuboot.dtb"
+QB_DTB_LINK:qemuarm = "${IMAGE_LINK_NAME}.qemuboot.dtb"
diff --git a/classes/qemuboot-xen-dtb.bbclass b/classes/qemuboot-xen-dtb.bbclass
new file mode 100644
index 00000000..d43d23a3
--- /dev/null
+++ b/classes/qemuboot-xen-dtb.bbclass
@@ -0,0 +1,210 @@
+# Enable booting Xen with qemuboot / runqemu: generate device tree
+#
+# Copyright (c) 2021 Star Lab Corp. All rights reserved.
+#
+# Author: Christopher Clark <christopher.clark@starlab.io>
+
+# Interface variables:
+#
+# QB_DTB : defined in qemuboot.bbclass.
+# If set, this class will generate the specified device tree file.
+#
+# QB_XEN_CMDLINE_EXTRA :
+# A string to be appended to the default Xen hypervisor boot command line,
+# for supplying Xen boot options.
+# The device tree that this bbclass generates will contain Xen command
+# line options to connect the Xen console to the Qemu serial port.
+#
+# QB_XEN_DOMAIN_MODULES :
+# A space-separated list of colon-separated entries:
+# "<file for the module>:<load memory address>:<module compatibility string>"
+#
+# QB_XEN_DOM0_BOOTARGS :
+# A string for specifying Dom0 boot options for the Xen section of the device
+# tree.
+#
+# QB_XEN_DTB_TASK_DEPENDS :
+# The task dependencies for the DTB generation. A default is provided.
+#
+# See also: Other QB_ variables as defined by the qemuboot.bbclass.
+
+write_lops_xen_section() {
+ DOM0_BOOTARGS="$2"
+ cat <<EOF >"$1"
+/dts-v1/;
+/ {
+ compatible = "system-device-tree-v1";
+ lops {
+ /* Connect the Xen console to Qemu dtuart */
+ lop_1 {
+ compatible = "system-device-tree-v1,lop,code-v1";
+ code = "
+ chosen = node.tree['/chosen']
+ stdout_path = str(chosen['stdout-path'].value[0])
+ chosen['xen,xen-bootargs'] = \
+ 'console=dtuart dtuart=%s' % stdout_path
+ return True
+ ";
+ };
+ lop_2 {
+ compatible = "system-device-tree-v1,lop,modify";
+ modify = "/chosen:xen,dom0-bootargs:${DOM0_BOOTARGS}";
+ };
+ lop_3 {
+ compatible = "system-device-tree-v1,lop,modify";
+ modify = "/chosen:#address-cells:<1>";
+ };
+ lop_4 {
+ compatible = "system-device-tree-v1,lop,modify";
+ modify = "/chosen:#size-cells:<1>";
+ };
+ };
+};
+EOF
+}
+
+write_lop_add_to_xen_cmdline() {
+ EXTRA_XEN_BOOTARGS="$2"
+ cat <<EOF >"$1"
+/dts-v1/;
+/ {
+ compatible = "system-device-tree-v1";
+ lops {
+ lop_1 {
+ compatible = "system-device-tree-v1,lop,code-v1";
+ options = "extra_xen_bootargs:${EXTRA_XEN_BOOTARGS}";
+ code = "
+ chosen = node.tree['/chosen']
+ xen_bootargs = str(chosen['xen,xen-bootargs'].value)
+ chosen['xen,xen-bootargs'] = '%s %s' % \
+ (xen_bootargs, extra_xen_bootargs)
+ return True
+ ";
+ };
+ };
+};
+EOF
+}
+
+write_lop_add_chosen_module() {
+ ADDR="$2"
+ SIZE="$3"
+ MODULE_TYPE="$4"
+ cat <<EOF >"$1"
+/dts-v1/;
+/ {
+ compatible = "system-device-tree-v1";
+ lops {
+ lop_1 {
+ compatible = "system-device-tree-v1,lop,add";
+ node_src = "module@${ADDR}";
+ node_dest = "/chosen/module@${ADDR}";
+
+ module@${ADDR} {
+ compatible = "multiboot,module", "${MODULE_TYPE}";
+ reg = <${ADDR} ${SIZE}>;
+ };
+ };
+ };
+};
+EOF
+}
+
+generate_xen_qemuboot_dtb() {
+ # First: invoke qemu to generate an initial device tree.
+ # Parameters supplied here inspired by inspection of:
+ # runqemu "${IMAGE_BASENAME}" nographic slirp \
+ # qemuparams='-dtb "" -machine dumpdtb=${B}/qemu-dumped.dtb'
+ ${QB_SYSTEM_NAME} \
+ -device qemu-xhci \
+ -device usb-tablet \
+ -device usb-kbd \
+ ${QB_MACHINE} \
+ ${QB_CPU} \
+ ${QB_SMP} \
+ ${QB_MEM} \
+ -nographic \
+ -serial mon:stdio \
+ -machine "dumpdtb=${B}/qemu-dumped.dtb"
+
+ # Lopper generates temporary files in cwd, so run it within ${B}
+ cd "${B}"
+ write_lops_xen_section "${B}/lop-insert-xen-section.dts" \
+ "${QB_XEN_DOM0_BOOTARGS}"
+
+ write_lop_add_to_xen_cmdline "${B}/lop-xen-cmdline.dts" \
+ "${QB_XEN_CMDLINE_EXTRA}"
+
+ if [ -z "${QB_XEN_DOMAIN_MODULES}" ]; then
+ bbwarn "No domain modules: please set QB_XEN_DOMAIN_MODULES"
+ fi
+
+ for DOMAIN_MODULE in ${QB_XEN_DOMAIN_MODULES}
+ do
+ MODULE_FILE="$(echo ${DOMAIN_MODULE} | cut -f1 -d:)"
+ ADDR="$(echo ${DOMAIN_MODULE} | cut -f2 -d:)"
+ MODULE_TYPE="$(echo ${DOMAIN_MODULE} | cut -f3 -d:)"
+ RESOLVED_FILE="$(readlink -f ${MODULE_FILE})"
+ SIZE=$(printf '0x%x\n' $(stat -c '%s' "${RESOLVED_FILE}"))
+ [ "x${SIZE}" != "x0x0" ] || bbfatal No module: "${MODULE_FILE}"
+ write_lop_add_chosen_module "${B}/lop-add-module-${ADDR}.dts" \
+ "${ADDR}" "${SIZE}" "${MODULE_TYPE}"
+ LOP_MODULE_ARGS="${LOP_MODULE_ARGS} -i ${B}/lop-add-module-${ADDR}.dts"
+ done
+
+ QEMUBOOT_DTB="${IMGDEPLOYDIR}/${QB_DTB}"
+ QEMUBOOT_DTB_LINK="${IMGDEPLOYDIR}/${QB_DTB_LINK}"
+
+ lopper --werror --verbose \
+ -i "${B}/lop-insert-xen-section.dts" \
+ -i "${B}/lop-xen-cmdline.dts" \
+ ${LOP_MODULE_ARGS} \
+ -f -o "${QEMUBOOT_DTB}" \
+ "${B}/qemu-dumped.dtb"
+
+ # To assist debugging:
+ dtc -I dtb -O dts -o "${B}/output.dts" "${QEMUBOOT_DTB}"
+
+ if [ "${QEMUBOOT_DTB_LINK}" != "${QEMUBOOT_DTB}" ] ; then
+ if [ -e "${QEMUBOOT_DTB_LINK}" ] ; then
+ rm "${QEMUBOOT_DTB_LINK}"
+ fi
+ ln -s "${QB_DTB}" "${QEMUBOOT_DTB_LINK}"
+ fi
+}
+
+do_write_xen_qemuboot_dtb() {
+ # Not all architectures qemuboot with a device tree binary, so check
+ # to see if one is needed. This allows this bbclass file to be used
+ # in the same image recipe for multiple architectures.
+ if [ -n "${QB_DTB}" ] && [ -n "${QB_SYSTEM_NAME}" ] ; then
+ generate_xen_qemuboot_dtb
+ fi
+}
+
+addtask do_write_xen_qemuboot_dtb after do_write_qemuboot_conf before do_image
+# Task dependency:
+# An expected common case is that the kernel for at least one of the initial
+# domains (eg. dom0) is deployed from the virtual/kernel recipe, so
+# add virtual/kernel:do_deploy as a task dependency here since the kernel size
+# needs to be known for generating the device tree.
+# Dependencies are only introduced if a device tree will be generated.
+QB_XEN_DTB_TASK_DEPENDS ?= " \
+ ${@[ ' \
+ qemu-helper-native:do_populate_sysroot \
+ lopper-native:do_populate_sysroot \
+ dtc-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
+ ', ''][d.getVar('QB_DTB') == '' or d.getVar('QB_DTB') is None]} \
+ "
+do_write_xen_qemuboot_dtb[depends] = "${QB_XEN_DTB_TASK_DEPENDS}"
+
+def qemuboot_dtb_vars(d):
+ build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
+ 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
+ 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
+ 'STAGING_DIR_HOST', 'SERIAL_CONSOLES']
+ return build_vars + [k for k in d.keys() if k.startswith('QB_')]
+
+do_write_qemuboot_dtb[vardeps] += "${@' '.join(qemuboot_dtb_vars(d))}"
+do_write_qemuboot_dtb[vardepsexclude] += "TOPDIR"