aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README2
-rw-r--r--classes/container-host.bbclass15
-rw-r--r--classes/image-oci-umoci.inc38
-rw-r--r--classes/image-oci.bbclass1
-rw-r--r--docs/00-INDEX3
-rw-r--r--docs/podman.txt14
-rw-r--r--docs/podman.txt.license3
-rw-r--r--dynamic-layers/xilinx/recipes-extended/xen/xen_%.bbappend (renamed from dynamic-layers/xilinx/recipes-extended/xen/xen_4.14.bbappend)0
-rw-r--r--dynamic-layers/xilinx/recipes-extended/xen/xen_git.bbappend1
-rw-r--r--recipes-containers/buildah/buildah_git.bb57
-rw-r--r--recipes-containers/catatonit/catatonit_0.1.7.bb16
-rw-r--r--recipes-containers/container-host-config/container-host-config.bb22
-rwxr-xr-xrecipes-containers/container-host-config/container-host-config/policy.json7
-rw-r--r--recipes-containers/container-host-config/container-host-config/registries.conf (renamed from recipes-containers/skopeo/files/registries.conf)0
-rw-r--r--recipes-containers/container-host-config/container-host-config/storage.conf (renamed from recipes-containers/skopeo/files/storage.conf)0
-rw-r--r--recipes-containers/containerd/containerd-opencontainers/0001-Add-build-option-GODEBUG-1.patch32
-rw-r--r--recipes-containers/containerd/containerd-opencontainers/0001-Makefile-allow-GO_BUILD_FLAGS-to-be-externally-speci.patch6
-rw-r--r--recipes-containers/containerd/containerd-opencontainers/0001-build-don-t-use-gcflags-to-define-trimpath.patch30
-rw-r--r--recipes-containers/containerd/containerd-opencontainers_git.bb62
-rw-r--r--recipes-containers/containerd/files/0001-build-use-oe-provided-GO-and-flags.patch6
-rw-r--r--recipes-containers/cri-o/cri-o_git.bb7
-rw-r--r--recipes-containers/cri-o/files/crio.conf604
-rw-r--r--recipes-containers/criu/criu_git.bb6
-rw-r--r--recipes-containers/criu/files/0001-criu-Skip-documentation-install.patch (renamed from recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch)14
-rw-r--r--recipes-containers/criu/files/0002-criu-Change-libraries-install-directory.patch (renamed from recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch)16
-rw-r--r--recipes-containers/criu/files/0003-lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch (renamed from recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch)12
-rw-r--r--recipes-containers/criu/files/fix-building-on-newest-glibc-and-kernel.patch45
-rw-r--r--recipes-containers/docker-distribution/docker-distribution_git.bb1
-rw-r--r--recipes-containers/docker-distribution/files/0001-Fix-runaway-allocation-on-v2-_catalog.patch669
-rw-r--r--recipes-containers/docker/README7
-rw-r--r--recipes-containers/docker/docker-ce_git.bb34
-rw-r--r--recipes-containers/docker/docker-moby_git.bb34
-rw-r--r--recipes-containers/docker/docker.inc18
-rw-r--r--recipes-containers/docker/files/0001-Revert-go-updates-from-1.19.12-to-1.18.7.patch144
-rw-r--r--recipes-containers/docker/files/0001-cli-use-external-GO111MODULE-and-cross-compiler.patch15
-rw-r--r--recipes-containers/docker/files/0001-dynbinary-use-go-cross-compiler.patch2
-rw-r--r--recipes-containers/docker/files/0001-libnetwork-use-GO-instead-of-go.patch10
-rw-r--r--recipes-containers/docker/files/0002-Revert-go-updates-from-1.18.7-to-1.17.13.patch1201
-rw-r--r--recipes-containers/docker/files/0003-builder.go-avoid-using-strings.Cut-from-go-1.18.patch32
-rw-r--r--recipes-containers/kubernetes/kubernetes/0001-Makefile.generated_files-Fix-race-issue-for-installi.patch16
-rw-r--r--recipes-containers/kubernetes/kubernetes/0001-build-golang.sh-convert-remaining-go-calls-to-use.patch16
-rw-r--r--recipes-containers/kubernetes/kubernetes/0001-cross-don-t-build-tests-by-default.patch10
-rw-r--r--recipes-containers/kubernetes/kubernetes/0001-hack-lib-golang.sh-use-CC-from-environment.patch12
-rw-r--r--recipes-containers/kubernetes/kubernetes/CVE-2023-2431.patch863
-rw-r--r--recipes-containers/kubernetes/kubernetes/CVE-2023-2727-CVE-2023-2728.patch559
-rw-r--r--recipes-containers/kubernetes/kubernetes_git.bb18
-rw-r--r--recipes-containers/lxc/files/0001-Patching-an-incoming-CVE-CVE-2022-47952.patch76
-rw-r--r--recipes-containers/lxc/files/templates-use-curl-instead-of-wget.patch40
-rw-r--r--recipes-containers/lxc/lxc_git.bb1
-rw-r--r--recipes-containers/nerdctl/nerdctl_git.bb6
-rw-r--r--recipes-containers/podman/podman/0001-Rename-BUILDFLAGS-to-GOBUILDFLAGS.patch125
-rw-r--r--recipes-containers/podman/podman/0002-Define-ActKillThread-equal-to-ActKill.patch90
-rw-r--r--recipes-containers/podman/podman/50-podman-rootless.conf6
-rw-r--r--recipes-containers/podman/podman/CVE-2022-27649.patch106
-rw-r--r--recipes-containers/podman/podman_git.bb21
-rw-r--r--recipes-containers/runc/runc-docker_git.bb4
-rw-r--r--recipes-containers/runc/runc-opencontainers_git.bb4
-rw-r--r--recipes-containers/singularity/singularity_git.bb2
-rw-r--r--recipes-containers/skopeo/skopeo_git.bb14
-rw-r--r--recipes-core/packagegroups/packagegroup-container.bb2
-rw-r--r--recipes-core/sysvinit/sysvinit-inittab_xen.inc10
-rw-r--r--recipes-devtools/go/go-context_git.bb2
-rw-r--r--recipes-devtools/go/go-mux_git.bb2
-rw-r--r--recipes-devtools/yq/yq_git.bb6
-rw-r--r--recipes-extended/ceph/ceph_15.2.17.bb (renamed from recipes-extended/ceph/ceph_15.2.15.bb)2
-rw-r--r--recipes-extended/cloud-init/cloud-init_21.4.bb5
-rw-r--r--recipes-extended/fuse-overlayfs/fuse-overlayfs/0001-Fix-buffer-overflow-on-workdir-path.patch32
-rw-r--r--recipes-extended/fuse-overlayfs/fuse-overlayfs_0.6.4.bb5
-rw-r--r--recipes-extended/images/xtf-image.bb2
-rw-r--r--recipes-extended/irqbalance/irqbalance.inc3
-rw-r--r--recipes-extended/irqbalance/irqbalance/irqbalanced.service9
-rw-r--r--recipes-extended/irqbalance/irqbalance_git.bb1
-rw-r--r--recipes-extended/libvirt/libvirt/CVE-2023-2700.patch54
-rw-r--r--recipes-extended/libvirt/libvirt_8.1.0.bb1
-rw-r--r--recipes-extended/upx/upx/0001-MyCom.h-fix-build-with-gcc-11.patch31
-rw-r--r--recipes-extended/upx/upx_git.bb33
-rw-r--r--recipes-networking/cni/cni_git.bb11
-rw-r--r--recipes-networking/openvswitch/openvswitch_git.bb6
-rw-r--r--recipes-networking/slirp4netns/slirp4netns_git.bb2
79 files changed, 4927 insertions, 467 deletions
diff --git a/README b/README
index 1c75f4a2..7a848bb6 100644
--- a/README
+++ b/README
@@ -68,9 +68,7 @@ revision: HEAD
prio: default
Required for cri-o:
-URI: git://github.com/advancedtelematic/meta-updater
URI: git://git.yoctoproject.org/meta-selinux
-URI: git://git.yoctoproject.org/meta-security
branch: master
revision: HEAD
prio: default
diff --git a/classes/container-host.bbclass b/classes/container-host.bbclass
new file mode 100644
index 00000000..99a75fea
--- /dev/null
+++ b/classes/container-host.bbclass
@@ -0,0 +1,15 @@
+# This class is the collection point for automatic dependencies,
+# package installs, rootfs postprocessing, etc, that are used
+# by container host images and recipes.
+
+# It currently is largely empty, and simply adds RDEPENDS, but
+# will expand to CRI/CNI configurations in the future.
+#
+
+RDEPENDS:${PN}:append = " container-host-config"
+
+do_install:append() {
+ # placeholder for additional package install, or configuration
+ # of the rootfs
+ true
+}
diff --git a/classes/image-oci-umoci.inc b/classes/image-oci-umoci.inc
index 4fe96ea0..f9417952 100644
--- a/classes/image-oci-umoci.inc
+++ b/classes/image-oci-umoci.inc
@@ -58,48 +58,52 @@ IMAGE_CMD:oci() {
bbdebug 1 "OCI: configuring image"
if [ -n "${OCI_IMAGE_LABELS}" ]; then
for l in ${OCI_IMAGE_LABELS}; do
- bbdebug 1 "OCI: umoci config --image $image_name --config.label $l"
- umoci config --image $image_name --config.label $l
+ bbdebug 1 "OCI: umoci config --image $image_name:${OCI_IMAGE_TAG} --config.label $l"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.label $l
done
fi
if [ -n "${OCI_IMAGE_ENV_VARS}" ]; then
for l in ${OCI_IMAGE_ENV_VARS}; do
- bbdebug 1 "umoci config --image $image_name --config.env $l"
- umoci config --image $image_name --config.env $l
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.env $l"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.env $l
done
fi
if [ -n "${OCI_IMAGE_PORTS}" ]; then
for l in ${OCI_IMAGE_PORTS}; do
- bbdebug 1 "umoci config --image $image_name --config.exposedports $l"
- umoci config --image $image_name --config.exposedports $l
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.exposedports $l"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.exposedports $l
done
fi
if [ -n "${OCI_IMAGE_RUNTIME_UID}" ]; then
- bbdebug 1 "umoci config --image $image_name --config.user ${OCI_IMAGE_RUNTIME_UID}"
- umoci config --image $image_name --config.user ${OCI_IMAGE_RUNTIME_UID}
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.user ${OCI_IMAGE_RUNTIME_UID}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.user ${OCI_IMAGE_RUNTIME_UID}
fi
if [ -n "${OCI_IMAGE_WORKINGDIR}" ]; then
- bbdebug 1 "umoci config --image $image_name --config.workingdir ${OCI_IMAGE_WORKINGDIR}"
- umoci config --image $image_name --config.workingdir ${OCI_IMAGE_WORKINGDIR}
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.workingdir ${OCI_IMAGE_WORKINGDIR}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.workingdir ${OCI_IMAGE_WORKINGDIR}
+ fi
+ if [ -n "${OCI_IMAGE_STOPSIGNAL}" ]; then
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --config.stopsignal ${OCI_IMAGE_STOPSIGNAL}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.stopsignal ${OCI_IMAGE_STOPSIGNAL}
fi
if [ -n "${OCI_IMAGE_OS}" ]; then
- bbdebug 1 "umoci config --image $image_name --os ${OCI_IMAGE_OS}"
- umoci config --image $image_name --os ${OCI_IMAGE_OS}
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --os ${OCI_IMAGE_OS}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --os ${OCI_IMAGE_OS}
fi
- bbdebug 1 "umoci config --image $image_name --architecture ${OCI_IMAGE_ARCH}"
- umoci config --image $image_name --architecture ${OCI_IMAGE_ARCH}
+ bbdebug 1 "umoci config --image $image_name:${OCI_IMAGE_TAG} --architecture ${OCI_IMAGE_ARCH}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --architecture ${OCI_IMAGE_ARCH}
# NOTE: umoci doesn't currently expose setting the architecture variant,
# so if you need it use sloci instead
if [ -n "${OCI_IMAGE_SUBARCH}" ]; then
bbnote "OCI: image subarch is set to: ${OCI_IMAGE_SUBARCH}, but umoci does not"
bbnote " expose variants. use sloci instead if this is important"
fi
- umoci config --image $image_name --config.entrypoint ${OCI_IMAGE_ENTRYPOINT}
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.entrypoint ${OCI_IMAGE_ENTRYPOINT}
if [ -n "${OCI_IMAGE_ENTRYPOINT_ARGS}" ]; then
- umoci config --image $image_name --config.cmd "${OCI_IMAGE_ENTRYPOINT_ARGS}"
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --config.cmd "${OCI_IMAGE_ENTRYPOINT_ARGS}"
fi
- umoci config --image $image_name --author ${OCI_IMAGE_AUTHOR_EMAIL}
+ umoci config --image $image_name:${OCI_IMAGE_TAG} --author ${OCI_IMAGE_AUTHOR_EMAIL}
# make a tar version of the image direcotry
if [ -n "${OCI_IMAGE_TAR_OUTPUT}" ]; then
diff --git a/classes/image-oci.bbclass b/classes/image-oci.bbclass
index 7a39d27a..9ddb88b6 100644
--- a/classes/image-oci.bbclass
+++ b/classes/image-oci.bbclass
@@ -58,6 +58,7 @@ OCI_IMAGE_SUBARCH ?= "${@oci_map_subarch(d.getVar('TARGET_ARCH'), d.getVar('TUNE
OCI_IMAGE_ENTRYPOINT ?= "sh"
OCI_IMAGE_ENTRYPOINT_ARGS ?= ""
OCI_IMAGE_WORKINGDIR ?= ""
+OCI_IMAGE_STOPSIGNAL ?= ""
# List of ports to expose from a container running this image:
# PORT[/PROT]
diff --git a/docs/00-INDEX b/docs/00-INDEX
index 5aa1b3c5..6659fbee 100644
--- a/docs/00-INDEX
+++ b/docs/00-INDEX
@@ -11,5 +11,8 @@ alphabetical order as well.
openvswitch.txt
- example on how to setup openvswitch with qemu/kvm.
+podman.txt
+ - documentation on podman container engine integration.
+
xvisor.txt
- example on how to setup Xvisor for RISC-V QEMU.
diff --git a/docs/podman.txt b/docs/podman.txt
new file mode 100644
index 00000000..66a69b3c
--- /dev/null
+++ b/docs/podman.txt
@@ -0,0 +1,14 @@
+Podman
+======
+
+Rootless mode
+-------------
+
+Podman is a daemonless container engine that has as one of its features the
+ability to run in rootless mode. This requires a set of configurations and
+additional components. The OE/Yocto integration configures podman with this
+support disabled by default. This can be changed via configuration files
+(distro, local.conf, etc.) or bbappends using the `PACKAGECONFIG` variable.
+
+To enable rootless support, add `rootless` to the `PACKAGECONFIG` podman
+recipe variable.
diff --git a/docs/podman.txt.license b/docs/podman.txt.license
new file mode 100644
index 00000000..c7348705
--- /dev/null
+++ b/docs/podman.txt.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Huawei Inc.
+
+SPDX-License-Identifier: MIT
diff --git a/dynamic-layers/xilinx/recipes-extended/xen/xen_4.14.bbappend b/dynamic-layers/xilinx/recipes-extended/xen/xen_%.bbappend
index 54edd1ba..54edd1ba 100644
--- a/dynamic-layers/xilinx/recipes-extended/xen/xen_4.14.bbappend
+++ b/dynamic-layers/xilinx/recipes-extended/xen/xen_%.bbappend
diff --git a/dynamic-layers/xilinx/recipes-extended/xen/xen_git.bbappend b/dynamic-layers/xilinx/recipes-extended/xen/xen_git.bbappend
deleted file mode 100644
index 54edd1ba..00000000
--- a/dynamic-layers/xilinx/recipes-extended/xen/xen_git.bbappend
+++ /dev/null
@@ -1 +0,0 @@
-include xen-xilinx.inc
diff --git a/recipes-containers/buildah/buildah_git.bb b/recipes-containers/buildah/buildah_git.bb
new file mode 100644
index 00000000..024e82c1
--- /dev/null
+++ b/recipes-containers/buildah/buildah_git.bb
@@ -0,0 +1,57 @@
+HOMEPAGE = "https://buildah.io"
+SUMMARY = "A tool that facilitates building OCI container images."
+DESCRIPTION = "A tool that facilitates building OCI container images."
+
+# Apache-2.0 for containerd
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/github.com/containers/buildah/LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e"
+
+S = "${WORKDIR}/git"
+
+BUILDAH_VERSION = "1.26"
+SRCREV_buildah = "0a9d6e6eaef2e2e7936313d449a4e226022eb865"
+
+PV = "${BUILDAH_VERSION}"
+
+inherit go
+inherit goarch
+inherit pkgconfig
+
+GO_IMPORT = "github.com/containers/buildah"
+GO_INSTALL = "${GO_IMPORT}"
+GO_WORKDIR = "${GO_INSTALL}"
+GOBUILDFLAGS += "-mod vendor"
+
+SRC_URI = " \
+ git://github.com/containers/buildah;branch=release-${BUILDAH_VERSION};name=buildah;protocol=https \
+ "
+
+DEPENDS = "libdevmapper btrfs-tools gpgme"
+RDEPENDS:${PN} = "cgroup-lite fuse-overlayfs libdevmapper podman"
+RDEPENDS:${PN}-dev = "bash perl"
+
+do_compile:prepend() {
+ cd ${S}/src/github.com/containers/buildah
+}
+
+go_do_compile() {
+ export TMPDIR="${GOTMPDIR}"
+ if [ -n "${GO_INSTALL}" ]; then
+ if [ -n "${GO_LINKSHARED}" ]; then
+ ${GO} install ${GOBUILDFLAGS} ./cmd/buildah
+ ${GO} install ${GOBUILDFLAGS} ./tests/imgtype/imgtype.go
+ ${GO} install ${GOBUILDFLAGS} ./tests/copy/copy.go
+ rm -rf ${B}/bin
+ fi
+ ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} ./cmd/buildah
+ ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} ./tests/imgtype/imgtype.go
+ ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} ./tests/copy/copy.go
+ fi
+}
+
+do_install:append() {
+ dest_dir=${D}/${sysconfdir}/containers
+ mkdir -p ${dest_dir}
+ install -m 666 ${S}/src/github.com/containers/buildah/docs/samples/registries.conf ${dest_dir}/buildah.registries.conf.sample
+ install -m 666 ${S}/src/github.com/containers/buildah/tests/policy.json ${dest_dir}/buildah.policy.json.sample
+}
diff --git a/recipes-containers/catatonit/catatonit_0.1.7.bb b/recipes-containers/catatonit/catatonit_0.1.7.bb
new file mode 100644
index 00000000..da3973de
--- /dev/null
+++ b/recipes-containers/catatonit/catatonit_0.1.7.bb
@@ -0,0 +1,16 @@
+# SPDX-FileCopyrightText: Huawei Inc.
+#
+# SPDX-License-Identifier: MIT
+
+SUMMARY = "A container init that is so simple it's effectively brain-dead."
+HOMEPAGE = "https://github.com/openSUSE/catatonit"
+DESCRIPTION = "${SUMMARY}"
+SECTION = "base"
+LICENSE = "GPL-3.0-or-later"
+LIC_FILES_CHKSUM = "file://COPYING;md5=1ebbd3e34237af26da5dc08a4e440464"
+
+SRC_URI = "git://github.com/openSUSE/${BPN};protocol=https;branch=main"
+SRCREV = "d8d72fea155c144ed3bf298a35a1aba5625a5656"
+S = "${WORKDIR}/git"
+
+inherit autotools
diff --git a/recipes-containers/container-host-config/container-host-config.bb b/recipes-containers/container-host-config/container-host-config.bb
new file mode 100644
index 00000000..80abddf0
--- /dev/null
+++ b/recipes-containers/container-host-config/container-host-config.bb
@@ -0,0 +1,22 @@
+HOMEPAGE = "https://git.yoctoproject.org/meta-virtualization"
+SUMMARY = "Configuration Package for container hosts"
+DESCRIPTION = "Common / centralized configuration files for container hosts"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
+SRC_URI = " \
+ file://storage.conf \
+ file://registries.conf \
+ file://policy.json \
+"
+
+do_install() {
+ install -d ${D}/${sysconfdir}/containers
+
+ install ${WORKDIR}/storage.conf ${D}/${sysconfdir}/containers/storage.conf
+ install ${WORKDIR}/registries.conf ${D}/${sysconfdir}/containers/registries.conf
+ install ${WORKDIR}/policy.json ${D}/${sysconfdir}/containers/policy.json
+}
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/recipes-containers/container-host-config/container-host-config/policy.json b/recipes-containers/container-host-config/container-host-config/policy.json
new file mode 100755
index 00000000..bb26e57f
--- /dev/null
+++ b/recipes-containers/container-host-config/container-host-config/policy.json
@@ -0,0 +1,7 @@
+{
+ "default": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ]
+}
diff --git a/recipes-containers/skopeo/files/registries.conf b/recipes-containers/container-host-config/container-host-config/registries.conf
index ba6c3f6e..ba6c3f6e 100644
--- a/recipes-containers/skopeo/files/registries.conf
+++ b/recipes-containers/container-host-config/container-host-config/registries.conf
diff --git a/recipes-containers/skopeo/files/storage.conf b/recipes-containers/container-host-config/container-host-config/storage.conf
index 722750c0..722750c0 100644
--- a/recipes-containers/skopeo/files/storage.conf
+++ b/recipes-containers/container-host-config/container-host-config/storage.conf
diff --git a/recipes-containers/containerd/containerd-opencontainers/0001-Add-build-option-GODEBUG-1.patch b/recipes-containers/containerd/containerd-opencontainers/0001-Add-build-option-GODEBUG-1.patch
deleted file mode 100644
index 8b43c8a0..00000000
--- a/recipes-containers/containerd/containerd-opencontainers/0001-Add-build-option-GODEBUG-1.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 84874e47aa2025b8e73df0286c44f3b8a1d9fdb2 Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Mon, 2 Sep 2019 16:20:07 +0800
-Subject: [PATCH] Add build option "GODEBUG=1"
-
-Make will generate GDB friendly binary with this build option.
-
-Signed-off-by: Hui Zhu <teawater@hyper.sh>
-
-Upstream-Status: Backport [c5a0c7f491b435e4eb45972903b00e2d8ed46495]
-
-Partly backport and refresh to v1.2.7
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- src/import/Makefile | 8 ++++++--
- 1 file changed, 6 insertions(+), 2 deletions(-)
-
-Index: git/src/import/Makefile
-===================================================================
---- git.orig/src/import/Makefile 2020-10-12 08:09:41.638977052 -0700
-+++ git/src/import/Makefile 2020-10-12 08:10:49.783074373 -0700
-@@ -72,6 +72,10 @@
- COMMANDS=ctr containerd containerd-stress
- MANPAGES=ctr.8 containerd.8 containerd-config.8 containerd-config.toml.5
-
-+ifndef GODEBUG
-+ EXTRA_LDFLAGS += -s -w
-+endif
-+
- ifdef BUILDTAGS
- GO_BUILDTAGS = ${BUILDTAGS}
- endif
diff --git a/recipes-containers/containerd/containerd-opencontainers/0001-Makefile-allow-GO_BUILD_FLAGS-to-be-externally-speci.patch b/recipes-containers/containerd/containerd-opencontainers/0001-Makefile-allow-GO_BUILD_FLAGS-to-be-externally-speci.patch
index 7f4d7518..0ef0d38f 100644
--- a/recipes-containers/containerd/containerd-opencontainers/0001-Makefile-allow-GO_BUILD_FLAGS-to-be-externally-speci.patch
+++ b/recipes-containers/containerd/containerd-opencontainers/0001-Makefile-allow-GO_BUILD_FLAGS-to-be-externally-speci.patch
@@ -19,10 +19,10 @@ Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
Makefile | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
-diff --git git.orig/src/import/Makefile git.orig/src/import/Makefile
+diff --git git.orig/Makefile git.orig/Makefile
index c61418e60..54a10eb42 100644
---- git.orig/src/import/Makefile
-+++ git.orig/src/import/Makefile
+--- git.orig/Makefile
++++ git.orig/Makefile
@@ -112,7 +112,8 @@ endif
GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n")
diff --git a/recipes-containers/containerd/containerd-opencontainers/0001-build-don-t-use-gcflags-to-define-trimpath.patch b/recipes-containers/containerd/containerd-opencontainers/0001-build-don-t-use-gcflags-to-define-trimpath.patch
new file mode 100644
index 00000000..b499de5d
--- /dev/null
+++ b/recipes-containers/containerd/containerd-opencontainers/0001-build-don-t-use-gcflags-to-define-trimpath.patch
@@ -0,0 +1,30 @@
+From 9174091fa1624dbb09ce812792a4102dff693541 Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@gmail.com>
+Date: Mon, 12 Sep 2022 15:40:08 -0400
+Subject: [PATCH] build: don't use gcflags to define trimpath
+
+We can pass trimpath in via environment variables. The gcflags
+definition of trimpath is for older go versions and is using the
+complete path for trimming. If the variable is captured in the
+resulting binary, we have a reproducibility and QA issue.
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
+---
+ Makefile | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git git.orig/Makefile git.orig/Makefile
+index 4a6c13042..debb57925 100644
+--- git.orig/Makefile
++++ git.orig/Makefile
+@@ -130,7 +130,6 @@ TESTFLAGS_RACE=
+ # See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809
+ GO_GCFLAGS=$(shell \
+ set -- ${GOPATHS}; \
+- echo "-gcflags=-trimpath=$${1}/src"; \
+ )
+
+ BINARIES=$(addprefix bin/,$(COMMANDS))
+--
+2.19.1
+
diff --git a/recipes-containers/containerd/containerd-opencontainers_git.bb b/recipes-containers/containerd/containerd-opencontainers_git.bb
index b0f92b12..6c0266ac 100644
--- a/recipes-containers/containerd/containerd-opencontainers_git.bb
+++ b/recipes-containers/containerd/containerd-opencontainers_git.bb
@@ -5,25 +5,25 @@ DESCRIPTION = "containerd is a daemon to control runC, built for performance and
support as well as checkpoint and restore for cloning and live migration of containers."
-SRCREV = "d12516713c315ea9e651eb1df89cf32ff7c8137c"
-SRC_URI = "git://github.com/containerd/containerd;branch=release/1.6;protocol=https \
- file://0001-Add-build-option-GODEBUG-1.patch \
+SRCREV = "1e1ea6e986c6c86565bc33d52e34b81b3e2bc71f"
+SRC_URI = "git://github.com/containerd/containerd;branch=release/1.6;protocol=https;destsuffix=git/src/github.com/containerd/containerd \
file://0001-Makefile-allow-GO_BUILD_FLAGS-to-be-externally-speci.patch \
+ file://0001-build-don-t-use-gcflags-to-define-trimpath.patch \
"
# Apache-2.0 for containerd
LICENSE = "Apache-2.0"
-LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=1269f40c0d099c21a871163984590d89"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1269f40c0d099c21a871163984590d89"
-CONTAINERD_VERSION = "v1.6.1"
-CVE_VERSION = "1.6.1"
+CONTAINERD_VERSION = "v1.6.19"
+CVE_VERSION = "1.6.19"
-EXTRA_OEMAKE += "GODEBUG=1"
+# EXTRA_OEMAKE += "GODEBUG=1"
PROVIDES += "virtual/containerd"
RPROVIDES:${PN} = "virtual-containerd"
-S = "${WORKDIR}/git"
+S = "${WORKDIR}/git/src/github.com/containerd/containerd"
PV = "${CONTAINERD_VERSION}+git${SRCPV}"
@@ -41,34 +41,7 @@ do_configure[noexec] = "1"
do_compile() {
export GOARCH="${TARGET_GOARCH}"
- # link fixups for compilation
- rm -f ${S}/src/import/vendor/src
- ln -sf ./ ${S}/src/import/vendor/src
-
- mkdir -p ${S}/src/import/vendor/src/github.com/containerd/containerd/
- mkdir -p ${S}/src/import/vendor/src/github.com/containerd/containerd/pkg/
- mkdir -p ${S}/src/import/vendor/src/github.com/containerd/containerd/contrib/
- # without this, the stress test parts of the build fail
- cp ${S}/src/import/*.go ${S}/src/import/vendor/src/github.com/containerd/containerd
-
- for c in content timeout ttrpcutil oom stdio process errdefs fs images mount snapshots linux api runtimes defaults progress \
- protobuf reference diff platforms runtime remotes version archive dialer gc metadata \
- metrics filters identifiers labels leases plugin server services \
- cmd cio containers namespaces oci events log reaper sys rootfs nvidia seed apparmor seccomp \
- cap cri userns atomic ioutil os registrar seutil runtimeoptions netns \
- shutdown schedcore tracing; do
- if [ -d ${S}/src/import/${c} ]; then
- ln -sfn ${S}/src/import/${c} ${S}/src/import/vendor/github.com/containerd/containerd/${c}
- fi
- if [ -d ${S}/src/import/pkg/${c} ]; then
- ln -sfn ${S}/src/import/pkg/${c} ${S}/src/import/vendor/github.com/containerd/containerd/pkg/${c}
- fi
- if [ -d ${S}/src/import/contrib/${c} ]; then
- ln -sfn ${S}/src/import/contrib/${c} ${S}/src/import/vendor/github.com/containerd/containerd/contrib/${c}
- fi
- done
-
- export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go:${WORKDIR}/git/"
export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
# Pass the needed cflags/ldflags so that cgo
@@ -84,10 +57,11 @@ do_compile() {
# cannot find package runtime/cgo (using -importcfg)
# ... recipe-sysroot-native/usr/lib/aarch64-poky-linux/go/pkg/tool/linux_amd64/link:
# cannot open file : open : no such file or directory
- export GO_BUILD_FLAGS="-a -pkgdir dontusecurrentpkgs"
+ export GO_BUILD_FLAGS="-trimpath -a -pkgdir dontusecurrentpkgs"
export GO111MODULE=off
- cd ${S}/src/import
+ cd ${S}
+
oe_runmake binaries
}
@@ -98,11 +72,11 @@ SYSTEMD_SERVICE:${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','conta
do_install() {
mkdir -p ${D}/${bindir}
- cp ${S}/src/import/bin/containerd ${D}/${bindir}/containerd
- cp ${S}/src/import/bin/containerd-shim ${D}/${bindir}/containerd-shim
- cp ${S}/src/import/bin/containerd-shim-runc-v1 ${D}/${bindir}/containerd-shim-runc-v1
- cp ${S}/src/import/bin/containerd-shim-runc-v2 ${D}/${bindir}/containerd-shim-runc-v2
- cp ${S}/src/import/bin/ctr ${D}/${bindir}/containerd-ctr
+ cp ${S}/bin/containerd ${D}/${bindir}/containerd
+ cp ${S}/bin/containerd-shim ${D}/${bindir}/containerd-shim
+ cp ${S}/bin/containerd-shim-runc-v1 ${D}/${bindir}/containerd-shim-runc-v1
+ cp ${S}/bin/containerd-shim-runc-v2 ${D}/${bindir}/containerd-shim-runc-v2
+ cp ${S}/bin/ctr ${D}/${bindir}/containerd-ctr
ln -sf containerd ${D}/${bindir}/docker-containerd
ln -sf containerd-shim ${D}/${bindir}/docker-containerd-shim
@@ -112,7 +86,7 @@ do_install() {
if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
install -d ${D}${systemd_unitdir}/system
- install -m 644 ${S}/src/import/containerd.service ${D}/${systemd_unitdir}/system
+ install -m 644 ${S}/containerd.service ${D}/${systemd_unitdir}/system
# adjust from /usr/local/bin to /usr/bin/
sed -e "s:/usr/local/bin/containerd:${bindir}/containerd:g" -i ${D}/${systemd_unitdir}/system/containerd.service
fi
diff --git a/recipes-containers/containerd/files/0001-build-use-oe-provided-GO-and-flags.patch b/recipes-containers/containerd/files/0001-build-use-oe-provided-GO-and-flags.patch
index 544881ef..95f23172 100644
--- a/recipes-containers/containerd/files/0001-build-use-oe-provided-GO-and-flags.patch
+++ b/recipes-containers/containerd/files/0001-build-use-oe-provided-GO-and-flags.patch
@@ -12,10 +12,10 @@ Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
Makefile | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
-Index: git/src/import/Makefile
+Index: git/Makefile
===================================================================
---- git.orig/src/import/Makefile
-+++ git/src/import/Makefile
+--- git.orig/Makefile
++++ git/Makefile
@@ -121,7 +121,7 @@
TESTFLAGS_PARALLEL ?= 8
diff --git a/recipes-containers/cri-o/cri-o_git.bb b/recipes-containers/cri-o/cri-o_git.bb
index 7940bbd2..d74a17fc 100644
--- a/recipes-containers/cri-o/cri-o_git.bb
+++ b/recipes-containers/cri-o/cri-o_git.bb
@@ -39,14 +39,14 @@ DEPENDS = " \
ostree \
libdevmapper \
libseccomp \
- libselinux \
"
RDEPENDS:${PN} = " \
cni \
libdevmapper \
"
-SKIP_RECIPE[cri-o] ?= "${@bb.utils.contains('BBFILE_COLLECTIONS', 'security', bb.utils.contains('BBFILE_COLLECTIONS', 'selinux', '', 'Depends on libselinux from meta-selinux which is not included', d), 'Depends on libseccomp from meta-security which is not included', d)}"
+PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'selinux', d)}"
+PACKAGECONFIG[selinux] = ",,libselinux"
PACKAGES =+ "${PN}-config"
@@ -57,6 +57,7 @@ inherit systemd
inherit go
inherit goarch
inherit pkgconfig
+inherit container-host
EXTRA_OEMAKE="BUILDTAGS=''"
@@ -96,6 +97,8 @@ do_install() {
install -m 0644 ${S}/src/import/contrib/systemd/crio.service ${D}${systemd_unitdir}/system/
install -m 0644 ${S}/src/import/contrib/systemd/crio-shutdown.service ${D}${systemd_unitdir}/system/
install -m 0644 ${S}/src/import/contrib/systemd/crio-wipe.service ${D}${systemd_unitdir}/system/
+
+ install -d ${D}${localstatedir}/lib/crio
}
FILES:${PN}-config = "${sysconfdir}/crio/config/*"
diff --git a/recipes-containers/cri-o/files/crio.conf b/recipes-containers/cri-o/files/crio.conf
index 899d255b..d48ddae1 100644
--- a/recipes-containers/cri-o/files/crio.conf
+++ b/recipes-containers/cri-o/files/crio.conf
@@ -1,146 +1,556 @@
# generated via: crio --config="" config --default
-# The "crio" table contains all of the server options.
+# The CRI-O configuration file specifies all of the available configuration
+# options and command-line flags for the crio(8) OCI Kubernetes Container Runtime
+# daemon, but in a TOML format that can be more easily modified and versioned.
+#
+# Please refer to crio.conf(5) for details of all configuration options.
+
+# CRI-O supports partial configuration reload during runtime, which can be
+# done by sending SIGHUP to the running process. Currently supported options
+# are explicitly mentioned with: 'This option supports live configuration
+# reload'.
+
+# CRI-O reads its storage defaults from the containers-storage.conf(5) file
+# located at /etc/containers/storage.conf. Modify this storage configuration if
+# you want to change the system's defaults. If you want to modify storage just
+# for CRI-O, you can change the storage configuration options here.
[crio]
-# root is a path to the "root directory". CRIO stores all of its data,
-# including container images, in this directory.
+# Path to the "root directory". CRI-O stores all of its data, including
+# containers images, in this directory.
root = "/var/lib/containers/storage"
-# run is a path to the "run directory". CRIO stores all of its state
-# in this directory.
-runroot = "/var/run/containers/storage"
+# Path to the "run directory". CRI-O stores all of its state in this directory.
+runroot = "/run/containers/storage"
-# storage_driver select which storage driver is used to manage storage
-# of images and containers.
-storage_driver = ""
+# Storage driver used to manage the storage of images and containers. Please
+# refer to containers-storage.conf(5) to see all available storage drivers.
+storage_driver = "overlay"
-# storage_option is used to pass an option to the storage driver.
+# List to pass options to the storage driver. Please refer to
+# containers-storage.conf(5) to see all available storage options.
storage_option = [
+ "overlay.mountopt=nodev",
]
-# The "crio.api" table contains settings for the kubelet/gRPC
-# interface (which is also used by crioctl).
+# The default log directory where all logs will go unless directly specified by
+# the kubelet. The log directory specified must be an absolute directory.
+log_dir = "/var/log/crio/pods"
+
+# Location for CRI-O to lay down the temporary version file.
+# It is used to check if crio wipe should wipe containers, which should
+# always happen on a node reboot
+version_file = "/var/run/crio/version"
+
+# Location for CRI-O to lay down the persistent version file.
+# It is used to check if crio wipe should wipe images, which should
+# only happen when CRI-O has been upgraded
+version_file_persist = "/var/lib/crio/version"
+
+# InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts.
+# If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations.
+internal_wipe = true
+
+# Location for CRI-O to lay down the clean shutdown file.
+# It is used to check whether crio had time to sync before shutting down.
+# If not found, crio wipe will clear the storage directory.
+clean_shutdown_file = "/var/lib/crio/clean.shutdown"
+
+# The crio.api table contains settings for the kubelet/gRPC interface.
[crio.api]
-# listen is the path to the AF_LOCAL socket on which crio will listen.
+# Path to AF_LOCAL socket on which CRI-O will listen.
listen = "/var/run/crio/crio.sock"
-# stream_address is the IP address on which the stream server will listen
-stream_address = ""
+# IP address on which the stream server will listen.
+stream_address = "127.0.0.1"
+
+# The port on which the stream server will listen. If the port is set to "0", then
+# CRI-O will allocate a random free port number.
+stream_port = "0"
+
+# Enable encrypted TLS transport of the stream server.
+stream_enable_tls = false
-# stream_port is the port on which the stream server will listen
-stream_port = "10010"
+# Length of time until open streams terminate due to lack of activity
+stream_idle_timeout = ""
-# file_locking is whether file-based locking will be used instead of
-# in-memory locking
-file_locking = true
+# Path to the x509 certificate file used to serve the encrypted stream. This
+# file can change, and CRI-O will automatically pick up the changes within 5
+# minutes.
+stream_tls_cert = ""
-# The "crio.runtime" table contains settings pertaining to the OCI
-# runtime used and options for how to set up and manage the OCI runtime.
+# Path to the key file used to serve the encrypted stream. This file can
+# change and CRI-O will automatically pick up the changes within 5 minutes.
+stream_tls_key = ""
+
+# Path to the x509 CA(s) file used to verify and authenticate client
+# communication with the encrypted stream. This file can change and CRI-O will
+# automatically pick up the changes within 5 minutes.
+stream_tls_ca = ""
+
+# Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024.
+grpc_max_send_msg_size = 83886080
+
+# Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024.
+grpc_max_recv_msg_size = 83886080
+
+# The crio.runtime table contains settings pertaining to the OCI runtime used
+# and options for how to set up and manage the OCI runtime.
[crio.runtime]
-# runtime is the OCI compatible runtime used for trusted container workloads.
-# This is a mandatory setting as this runtime will be the default one
-# and will also be used for untrusted container workloads if
-# runtime_untrusted_workload is not set.
-runtime = "/usr/bin/runc"
-
-# runtime_untrusted_workload is the OCI compatible runtime used for untrusted
-# container workloads. This is an optional setting, except if
-# default_container_trust is set to "untrusted".
-runtime_untrusted_workload = ""
-
-# default_workload_trust is the default level of trust crio puts in container
-# workloads. It can either be "trusted" or "untrusted", and the default
-# is "trusted".
-# Containers can be run through different container runtimes, depending on
-# the trust hints we receive from kubelet:
-# - If kubelet tags a container workload as untrusted, crio will try first to
-# run it through the untrusted container workload runtime. If it is not set,
-# crio will use the trusted runtime.
-# - If kubelet does not provide any information about the container workload trust
-# level, the selected runtime will depend on the default_container_trust setting.
-# If it is set to "untrusted", then all containers except for the host privileged
-# ones, will be run by the runtime_untrusted_workload runtime. Host privileged
-# containers are by definition trusted and will always use the trusted container
-# runtime. If default_container_trust is set to "trusted", crio will use the trusted
-# container runtime for all containers.
-default_workload_trust = "trusted"
-
-# conmon is the path to conmon binary, used for managing the runtime.
-conmon = "/usr/bin/conmon"
-
-# conmon_env is the environment variable list for conmon process,
-# used for passing necessary environment variable to conmon or runtime.
+# A list of ulimits to be set in containers by default, specified as
+# "<ulimit name>=<soft limit>:<hard limit>", for example:
+# "nofile=1024:2048"
+# If nothing is set here, settings will be inherited from the CRI-O daemon
+default_ulimits = [
+]
+
+# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
+no_pivot = false
+
+# decryption_keys_path is the path where the keys required for
+# image decryption are stored. This option supports live configuration reload.
+decryption_keys_path = "/etc/crio/keys/"
+
+# Path to the conmon binary, used for monitoring the OCI runtime.
+# Will be searched for using $PATH if empty.
+conmon = ""
+
+# Cgroup setting for conmon
+conmon_cgroup = "system.slice"
+
+# Environment variable list for the conmon process, used for passing necessary
+# environment variables to conmon or the runtime.
conmon_env = [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
]
-# selinux indicates whether or not SELinux will be used for pod
-# separation on the host. If you enable this flag, SELinux must be running
-# on the host.
-selinux = false
+# Additional environment variables to set for all the
+# containers. These are overridden if set in the
+# container image spec or in the container runtime configuration.
+default_env = [
+]
-# seccomp_profile is the seccomp json profile path which is used as the
-# default for the runtime.
-seccomp_profile = "/etc/crio/seccomp.json"
+# If true, SELinux will be used for pod separation on the host.
+selinux = false
-# apparmor_profile is the apparmor profile name which is used as the
-# default for the runtime.
+# Path to the seccomp.json profile which is used as the default seccomp profile
+# for the runtime. If not specified, then the internal default seccomp profile
+# will be used. This option supports live configuration reload.
+seccomp_profile = ""
+
+# Changes the meaning of an empty seccomp profile. By default
+# (and according to CRI spec), an empty profile means unconfined.
+# This option tells CRI-O to treat an empty profile as the default profile,
+# which might increase security.
+seccomp_use_default_when_empty = false
+
+# Used to change the name of the default AppArmor profile of CRI-O. The default
+# profile name is "crio-default". This profile only takes effect if the user
+# does not specify a profile via the Kubernetes Pod's metadata annotation. If
+# the profile is set to "unconfined", then this equals to disabling AppArmor.
+# This option supports live configuration reload.
apparmor_profile = "crio-default"
-# cgroup_manager is the cgroup management implementation to be used
-# for the runtime.
-cgroup_manager = "cgroupfs"
+# Path to the blockio class configuration file for configuring
+# the cgroup blockio controller.
+blockio_config_file = ""
+
+# Used to change irqbalance service config file path which is used for configuring
+# irqbalance daemon.
+irqbalance_config_file = "/etc/sysconfig/irqbalance"
+
+# Path to the RDT configuration file for configuring the resctrl pseudo-filesystem.
+# This option supports live configuration reload.
+rdt_config_file = ""
+
+# Cgroup management implementation used for the runtime.
+cgroup_manager = "systemd"
+
+# Specify whether the image pull must be performed in a separate cgroup.
+separate_pull_cgroup = ""
+
+# List of default capabilities for containers. If it is empty or commented out,
+# only the capabilities defined in the containers json file by the user/kube
+# will be added.
+default_capabilities = [
+ "CHOWN",
+ "DAC_OVERRIDE",
+ "FSETID",
+ "FOWNER",
+ "SETGID",
+ "SETUID",
+ "SETPCAP",
+ "NET_BIND_SERVICE",
+ "KILL",
+]
+
+# List of default sysctls. If it is empty or commented out, only the sysctls
+# defined in the container json file by the user/kube will be added.
+default_sysctls = [
+]
+
+# List of devices on the host that a
+# user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation.
+allowed_devices = [
+ "/dev/fuse",
+]
+
+# List of additional devices. specified as
+# "<device-on-host>:<device-on-container>:<permissions>", for example: "--device=/dev/sdc:/dev/xvdc:rwm".
+# If it is empty or commented out, only the devices
+# defined in the container json file by the user/kube will be added.
+additional_devices = [
+]
+
+# Change the default behavior of setting container devices uid/gid from CRI's
+# SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid.
+# Defaults to false.
+device_ownership_from_security_context = false
-# hooks_dir_path is the oci hooks directory for automatically executed hooks
-hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+# Path to OCI hooks directories for automatically executed hooks. If one of the
+# directories does not exist, then CRI-O will automatically skip them.
+hooks_dir = [
+ "/usr/share/containers/oci/hooks.d",
+]
-# pids_limit is the number of processes allowed in a container
+# Path to the file specifying the defaults mounts for each container. The
+# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
+# its default mounts from the following two files:
+#
+# 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the
+# override file, where users can either add in their own default mounts, or
+# override the default mounts shipped with the package.
+#
+# 2) /usr/share/containers/mounts.conf: This is the default file read for
+# mounts. If you want CRI-O to read from a different, specific mounts file,
+# you can change the default_mounts_file. Note, if this is done, CRI-O will
+# only add mounts it finds in this file.
+#
+default_mounts_file = ""
+
+# Maximum number of processes allowed in a container.
pids_limit = 1024
-# The "crio.image" table contains settings pertaining to the
-# management of OCI images.
+# Maximum sized allowed for the container log file. Negative numbers indicate
+# that no size limit is imposed. If it is positive, it must be >= 8192 to
+# match/exceed conmon's read buffer. The file is truncated and re-opened so the
+# limit is never exceeded.
+log_size_max = -1
+
+# Whether container output should be logged to journald in addition to the kuberentes log file
+log_to_journald = false
+
+# Path to directory in which container exit files are written to by conmon.
+container_exits_dir = "/var/run/crio/exits"
+
+# Path to directory for container attach sockets.
+container_attach_socket_dir = "/var/run/crio"
+
+# The prefix to use for the source of the bind mounts.
+bind_mount_prefix = ""
+
+# If set to true, all containers will run in read-only mode.
+read_only = false
+
+# Changes the verbosity of the logs based on the level it is set to. Options
+# are fatal, panic, error, warn, info, debug and trace. This option supports
+# live configuration reload.
+log_level = "info"
+
+# Filter the log messages by the provided regular expression.
+# This option supports live configuration reload.
+log_filter = ""
+
+# The UID mappings for the user namespace of each container. A range is
+# specified in the form containerUID:HostUID:Size. Multiple ranges must be
+# separated by comma.
+uid_mappings = ""
+
+# The GID mappings for the user namespace of each container. A range is
+# specified in the form containerGID:HostGID:Size. Multiple ranges must be
+# separated by comma.
+gid_mappings = ""
+
+# If set, CRI-O will reject any attempt to map host UIDs below this value
+# into user namespaces. A negative value indicates that no minimum is set,
+# so specifying mappings will only be allowed for pods that run as UID 0.
+minimum_mappable_uid = -1
+
+# If set, CRI-O will reject any attempt to map host GIDs below this value
+# into user namespaces. A negative value indicates that no minimum is set,
+# so specifying mappings will only be allowed for pods that run as UID 0.
+minimum_mappable_gid = -1
+
+# The minimal amount of time in seconds to wait before issuing a timeout
+# regarding the proper termination of the container. The lowest possible
+# value is 30s, whereas lower values are not considered by CRI-O.
+ctr_stop_timeout = 30
+
+# drop_infra_ctr determines whether CRI-O drops the infra container
+# when a pod does not have a private PID namespace, and does not use
+# a kernel separating runtime (like kata).
+# It requires manage_ns_lifecycle to be true.
+drop_infra_ctr = true
+
+# infra_ctr_cpuset determines what CPUs will be used to run infra containers.
+# You can use linux CPU list format to specify desired CPUs.
+# To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus.
+infra_ctr_cpuset = ""
+
+# The directory where the state of the managed namespaces gets tracked.
+# Only used when manage_ns_lifecycle is true.
+namespaces_dir = "/var/run"
+
+# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
+pinns_path = ""
+
+# default_runtime is the _name_ of the OCI runtime to be used as the default.
+# The name is matched against the runtimes map below. If this value is changed,
+# the corresponding existing entry from the runtimes map below will be ignored.
+default_runtime = "runc"
+
+# A list of paths that, when absent from the host,
+# will cause a container creation to fail (as opposed to the current behavior being created as a directory).
+# This option is to protect from source locations whose existence as a directory could jepordize the health of the node, and whose
+# creation as a file is not desired either.
+# An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because
+# the hostname is being managed dynamically.
+absent_mount_sources_to_reject = [
+]
+
+# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
+# The runtime to use is picked based on the runtime handler provided by the CRI.
+# If no runtime handler is provided, the runtime will be picked based on the level
+# of trust of the workload. Each entry in the table should follow the format:
+#
+#[crio.runtime.runtimes.runtime-handler]
+# runtime_path = "/path/to/the/executable"
+# runtime_type = "oci"
+# runtime_root = "/path/to/the/root"
+# privileged_without_host_devices = false
+# allowed_annotations = []
+# Where:
+# - runtime-handler: name used to identify the runtime
+# - runtime_path (optional, string): absolute path to the runtime executable in
+# the host filesystem. If omitted, the runtime-handler identifier should match
+# the runtime executable name, and the runtime executable should be placed
+# in $PATH.
+# - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If
+# omitted, an "oci" runtime is assumed.
+# - runtime_root (optional, string): root directory for storage of containers
+# state.
+# - runtime_config_path (optional, string): the path for the runtime configuration
+# file. This can only be used with when using the VM runtime_type.
+# - privileged_without_host_devices (optional, bool): an option for restricting
+# host devices from being passed to privileged containers.
+# - allowed_annotations (optional, array of strings): an option for specifying
+# a list of experimental annotations that this runtime handler is allowed to process.
+# The currently recognized values are:
+# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
+# "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true".
+# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
+# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
+# "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
+# "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
+# "io.kubernetes.cri.rdt-class" for setting the RDT class of a container
+
+
+[crio.runtime.runtimes.runc]
+runtime_path = ""
+runtime_type = "oci"
+runtime_root = "/run/runc"
+runtime_config_path = ""
+
+
+allowed_annotations = [
+ "io.containers.trace-syscall",
+]
+
+
+
+# crun is a fast and lightweight fully featured OCI runtime and C library for
+# running containers
+#[crio.runtime.runtimes.crun]
+
+# Kata Containers is an OCI runtime, where containers are run inside lightweight
+# VMs. Kata provides additional isolation towards the host, minimizing the host attack
+# surface and mitigating the consequences of containers breakout.
+
+# Kata Containers with the default configured VMM
+#[crio.runtime.runtimes.kata-runtime]
+
+# Kata Containers with the QEMU VMM
+#[crio.runtime.runtimes.kata-qemu]
+
+# Kata Containers with the Firecracker VMM
+#[crio.runtime.runtimes.kata-fc]
+
+# The workloads table defines ways to customize containers with different resources
+# that work based on annotations, rather than the CRI.
+# Note, the behavior of this table is EXPERIMENTAL and may change at any time.
+# Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating.
+# The currently supported resources are "cpu" (to configure the cpu shares) and "cpuset" to configure the cpuset.
+# Each resource can have a default value specified, or be empty.
+# For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored).
+# To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified
+# signifying for that resource type to override the default value.
+# If the annotation_prefix is not present, every container in the pod will be given the default values.
+# Example:
+# [crio.runtime.workloads.workload-type]
+# activation_annotation = "io.crio/workload"
+# annotation_prefix = "io.crio.workload-type"
+# [crio.runtime.workloads.workload-type.resources]
+# cpuset = 0
+# cpushares = "0-1"
+# Where:
+# The workload name is workload-type.
+# To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match).
+# This workload supports setting cpuset and cpu resources.
+# annotation_prefix is used to customize the different resources.
+# To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation:
+# "io.crio.workload-type/$container_name = {"cpushares": "value"}"
+
+
+# The crio.image table contains settings pertaining to the management of OCI images.
+#
+# CRI-O reads its configured registries defaults from the system wide
+# containers-registries.conf(5) located in /etc/containers/registries.conf. If
+# you want to modify just CRI-O, you can change the registries configuration in
+# this file. Otherwise, leave insecure_registries and registries commented out to
+# use the system's defaults from /etc/containers/registries.conf.
[crio.image]
-# default_transport is the prefix we try prepending to an image name if the
-# image name as we receive it can't be parsed as a valid source reference
+# Default transport for pulling images from a remote container storage.
default_transport = "docker://"
-# pause_image is the image which we use to instantiate infra containers.
-pause_image = "kubernetes/pause"
+# The path to a file containing credentials necessary for pulling images from
+# secure registries. The file is similar to that of /var/lib/kubelet/config.json
+global_auth_file = ""
-# pause_command is the command to run in a pause_image to have a container just
-# sit there. If the image contains the necessary information, this value need
-# not be specified.
+# The image used to instantiate infra containers.
+# This option supports live configuration reload.
+pause_image = "k8s.gcr.io/pause:3.6"
+
+# The path to a file containing credentials specific for pulling the pause_image from
+# above. The file is similar to that of /var/lib/kubelet/config.json
+# This option supports live configuration reload.
+pause_image_auth_file = ""
+
+# The command to run to have a container stay in the paused state.
+# When explicitly set to "", it will fallback to the entrypoint and command
+# specified in the pause image. When commented out, it will fallback to the
+# default: "/pause". This option supports live configuration reload.
pause_command = "/pause"
-# signature_policy is the name of the file which decides what sort of policy we
-# use when deciding whether or not to trust an image that we've pulled.
-# Outside of testing situations, it is strongly advised that this be left
-# unspecified so that the default system-wide policy will be used.
+# Path to the file which decides what sort of policy we use when deciding
+# whether or not to trust an image that we've pulled. It is not recommended that
+# this option be used, as the default behavior of using the system-wide default
+# policy (i.e., /etc/containers/policy.json) is most often preferred. Please
+# refer to containers-policy.json(5) for more details.
signature_policy = ""
-# image_volumes controls how image volumes are handled.
-# The valid values are mkdir and ignore.
-image_volumes = "mkdir"
-
-# insecure_registries is used to skip TLS verification when pulling images.
+# List of registries to skip TLS verification for pulling images. Please
+# consider configuring the registries via /etc/containers/registries.conf before
+# changing them here.
insecure_registries = [
]
-# registries is used to specify a comma separated list of registries to be used
-# when pulling an unqualified image (e.g. fedora:rawhide).
-registries = ['docker.io', 'registry.fedoraproject.org', 'registry.access.redhat.com']
+# Controls how image volumes are handled. The valid values are mkdir, bind and
+# ignore; the latter will ignore volumes entirely.
+image_volumes = "mkdir"
+
+# Temporary directory to use for storing big files
+big_files_temporary_dir = ""
-# The "crio.network" table contains settings pertaining to the
-# management of CNI plugins.
+# The crio.network table containers settings pertaining to the management of
+# CNI plugins.
[crio.network]
-# network_dir is is where CNI network configuration
-# files are stored.
+# The default CNI network name to be selected. If not set or "", then
+# CRI-O will pick-up the first one found in network_dir.
+# cni_default_network = ""
+
+# Path to the directory where CNI configuration files are located.
network_dir = "/etc/cni/net.d/"
-# plugin_dir is is where CNI plugin binaries are stored.
-plugin_dir = "/opt/cni/bin"
+# Paths to directories where CNI plugin binaries are located.
+plugin_dirs = [
+ "/opt/cni/bin/",
+]
+
+# A necessary configuration for Prometheus based metrics retrieval
+[crio.metrics]
+
+# Globally enable or disable metrics support.
+enable_metrics = false
+
+# Specify enabled metrics collectors.
+# Per default all metrics are enabled.
+# It is possible, to prefix the metrics with "container_runtime_" and "crio_".
+# For example, the metrics collector "operations" would be treated in the same
+# way as "crio_operations" and "container_runtime_crio_operations".
+metrics_collectors = [
+ "operations",
+ "operations_latency_microseconds_total",
+ "operations_latency_microseconds",
+ "operations_errors",
+ "image_pulls_by_digest",
+ "image_pulls_by_name",
+ "image_pulls_by_name_skipped",
+ "image_pulls_failures",
+ "image_pulls_successes",
+ "image_pulls_layer_size",
+ "image_layer_reuse",
+ "containers_oom_total",
+ "containers_oom",
+ "processes_defunct",
+ "operations_total",
+ "operations_latency_seconds",
+ "operations_latency_seconds_total",
+ "operations_errors_total",
+ "image_pulls_bytes_total",
+ "image_pulls_skipped_bytes_total",
+ "image_pulls_failure_total",
+ "image_pulls_success_total",
+ "image_layer_reuse_total",
+ "containers_oom_count_total",
+]
+
+# The port on which the metrics server will listen.
+metrics_port = 9090
+
+# Local socket path to bind the metrics server to
+metrics_socket = ""
+
+# The certificate for the secure metrics server.
+# If the certificate is not available on disk, then CRI-O will generate a
+# self-signed one. CRI-O also watches for changes of this path and reloads the
+# certificate on any modification event.
+metrics_cert = ""
+
+# The certificate key for the secure metrics server.
+# Behaves in the same way as the metrics_cert.
+metrics_key = ""
+
+# A necessary configuration for OpenTelemetry trace data exporting
+[crio.tracing]
+
+# Globally enable or disable exporting OpenTelemetry traces.
+enable_tracing = false
+
+# Address on which the gRPC trace collector listens on.
+tracing_endpoint = "0.0.0.0:4317"
+
+# Number of samples to collect per million spans.
+tracing_sampling_rate_per_million = 0
+
+# Necessary information pertaining to container and pod stats reporting.
+[crio.stats]
+
+# The number of seconds between collecting pod and container stats.
+# If set to 0, the stats are collected on-demand instead.
+stats_collection_period = 0
+
diff --git a/recipes-containers/criu/criu_git.bb b/recipes-containers/criu/criu_git.bb
index 77a12abf..46401f9a 100644
--- a/recipes-containers/criu/criu_git.bb
+++ b/recipes-containers/criu/criu_git.bb
@@ -17,9 +17,9 @@ SRCREV = "4f8f295e57e68740699479d12c1ad251e6dd859f"
PV = "3.17+git${SRCPV}"
SRC_URI = "git://github.com/checkpoint-restore/criu.git;branch=master;protocol=https \
- file://0002-criu-Skip-documentation-install.patch \
- file://0001-criu-Change-libraries-install-directory.patch \
- file://lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch \
+ file://0001-criu-Skip-documentation-install.patch \
+ file://0002-criu-Change-libraries-install-directory.patch \
+ file://0003-lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch \
"
COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
diff --git a/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch b/recipes-containers/criu/files/0001-criu-Skip-documentation-install.patch
index af45db73..43e27044 100644
--- a/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
+++ b/recipes-containers/criu/files/0001-criu-Skip-documentation-install.patch
@@ -1,22 +1,21 @@
-From 45d74ae8a314c481398ba91a3697ffbd074cd98b Mon Sep 17 00:00:00 2001
+From 485e957a4c3289d105dd6203af31c0e4e1438ac6 Mon Sep 17 00:00:00 2001
From: Jianchuan Wang <jianchuan.wang@windriver.com>
Date: Tue, 16 Aug 2016 09:42:24 +0800
-Subject: [PATCH] criu: Skip documentation install
+Subject: [PATCH 1/3] criu: Skip documentation install
asciidoc is needed to generate CRIU documentation, so skip it in install.
Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
-
---
Makefile.install | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile.install b/Makefile.install
-index 3987bcc..1def3cf 100644
+index aafb95469..1b02b70af 100644
--- a/Makefile.install
+++ b/Makefile.install
-@@ -29,7 +29,7 @@ export PREFIX BINDIR SBINDIR MANDIR RUNDIR
- export LIBDIR INCLUDEDIR LIBEXECDIR
+@@ -30,7 +30,7 @@ export PREFIX BINDIR SBINDIR MANDIR RUNDIR
+ export LIBDIR INCLUDEDIR LIBEXECDIR PLUGINDIR
install-man:
- $(Q) $(MAKE) -C Documentation install
@@ -24,3 +23,6 @@ index 3987bcc..1def3cf 100644
.PHONY: install-man
install-lib: lib
+--
+2.25.1
+
diff --git a/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch b/recipes-containers/criu/files/0002-criu-Change-libraries-install-directory.patch
index afb1332d..453be131 100644
--- a/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
+++ b/recipes-containers/criu/files/0002-criu-Change-libraries-install-directory.patch
@@ -1,25 +1,25 @@
-From f64fbca70e6049dad3c404d871f2383d97725d2d Mon Sep 17 00:00:00 2001
+From dcbf7f8ad1b07ff718eac2ce79ed522ac1cee189 Mon Sep 17 00:00:00 2001
From: Mark Asselstine <mark.asselstine@windriver.com>
Date: Fri, 8 Sep 2017 15:11:31 -0400
-Subject: [PATCH] criu: Change libraries install directory
+Subject: [PATCH 2/3] criu: Change libraries install directory
Install the libraries into /usr/lib(or /usr/lib64)
Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
-
---
Makefile.install | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/Makefile.install b/Makefile.install
-index 1def3cf..d020eef 100644
+index 1b02b70af..2839ef5fe 100644
--- a/Makefile.install
+++ b/Makefile.install
@@ -9,19 +9,6 @@ LIBEXECDIR ?= $(PREFIX)/libexec
RUNDIR ?= /run
+ PLUGINDIR ?= /var/lib/criu
- #
+-#
-# For recent Debian/Ubuntu with multiarch support.
-DEB_HOST_MULTIARCH := $(shell dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
-ifneq "$(DEB_HOST_MULTIARCH)" ""
@@ -32,7 +32,9 @@ index 1def3cf..d020eef 100644
- endif
-endif
-
--#
+ #
# LIBDIR falls back to the standard path.
LIBDIR ?= $(PREFIX)/lib
-
+--
+2.25.1
+
diff --git a/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch b/recipes-containers/criu/files/0003-lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch
index 70ccb287..210fbe7c 100644
--- a/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch
+++ b/recipes-containers/criu/files/0003-lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch
@@ -1,23 +1,22 @@
-From 6caf90592d61c8c45b32cb7ff76709f9326030e2 Mon Sep 17 00:00:00 2001
+From 0a04c5bc80319485e17e9a86e799fe2c5bfa3d38 Mon Sep 17 00:00:00 2001
From: Mark Asselstine <mark.asselstine@windriver.com>
Date: Fri, 8 Sep 2017 15:40:49 -0400
-Subject: [PATCH] lib/Makefile: overwrite install-lib, to allow multiarch
+Subject: [PATCH 3/3] lib/Makefile: overwrite install-lib, to allow multiarch
I am not sure why Yocto installs python modules in arch specific
/usr/libXX directories but it does. Allow the recipe to pass this via
INSTALL_LIB.
Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
-
---
lib/Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/Makefile b/lib/Makefile
-index b1bb057..06f5c5d 100644
+index 575a7bad3..f503d430b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
-@@ -53,7 +53,7 @@ install: lib-c lib-py crit/crit lib/c/criu.pc.in
+@@ -59,7 +59,7 @@ install: lib-c lib-a lib-py crit/crit lib/c/criu.pc.in
$(Q) sed -e 's,@version@,$(CRIU_VERSION),' -e 's,@libdir@,$(LIBDIR),' -e 's,@includedir@,$(dir $(INCLUDEDIR)/criu/),' lib/c/criu.pc.in > lib/c/criu.pc
$(Q) install -m 644 lib/c/criu.pc $(DESTDIR)$(LIBDIR)/pkgconfig
$(E) " INSTALL " crit
@@ -26,3 +25,6 @@ index b1bb057..06f5c5d 100644
.PHONY: install
uninstall:
+--
+2.25.1
+
diff --git a/recipes-containers/criu/files/fix-building-on-newest-glibc-and-kernel.patch b/recipes-containers/criu/files/fix-building-on-newest-glibc-and-kernel.patch
deleted file mode 100644
index 9361adc2..00000000
--- a/recipes-containers/criu/files/fix-building-on-newest-glibc-and-kernel.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From b59947007362b53e9f41f1e5a33071dedf1c59ac Mon Sep 17 00:00:00 2001
-From: Adrian Reber <areber@redhat.com>
-Date: Thu, 28 Sep 2017 09:13:33 +0000
-Subject: [PATCH] fix building on newest glibc and kernel
-
-On Fedora rawhide with kernel-headers-4.14.0-0.rc2.git0.1.fc28.x86_64
-glibc-devel-2.26.90-15.fc28.x86_64 criu does not build any more:
-
-In file included from /usr/include/linux/aio_abi.h:31:0,
- from criu/cr-check.c:24:
-/usr/include/sys/mount.h:35:3: error: expected identifier before numeric constant
- MS_RDONLY = 1, /* Mount read-only. */
- ^
-make[2]: *** [/builddir/build/BUILD/criu-3.5/scripts/nmk/scripts/build.mk:111: criu/cr-check.o] Error 1
-make[1]: *** [criu/Makefile:73: criu/built-in.o] Error 2
-make: *** [Makefile:233: criu] Error 2
-
-This simple re-ordering of includes fixes it for me.
-
-Signed-off-by: Adrian Reber <areber@redhat.com>
-Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
-
-Upstream-Status: Backport
-[https://github.com/checkpoint-restore/criu/commit/f41e386d4d40e3e26b0cfdc85a812b7edb337f1d#diff-cc847b1cc975358c6582595be92d48db]
-
-Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
-
----
- criu/cr-check.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/criu/cr-check.c b/criu/cr-check.c
-index 1dd887a..93df2ab 100644
---- a/criu/cr-check.c
-+++ b/criu/cr-check.c
-@@ -21,8 +21,8 @@
- #include <netinet/in.h>
- #include <sys/prctl.h>
- #include <sched.h>
--#include <linux/aio_abi.h>
- #include <sys/mount.h>
-+#include <linux/aio_abi.h>
-
- #include "../soccr/soccr.h"
-
diff --git a/recipes-containers/docker-distribution/docker-distribution_git.bb b/recipes-containers/docker-distribution/docker-distribution_git.bb
index 93c2067e..f8981a88 100644
--- a/recipes-containers/docker-distribution/docker-distribution_git.bb
+++ b/recipes-containers/docker-distribution/docker-distribution_git.bb
@@ -7,6 +7,7 @@ SRCREV_distribution= "b5ca020cfbe998e5af3457fda087444cf5116496"
SRC_URI = "git://github.com/docker/distribution.git;branch=release/2.8;name=distribution;destsuffix=git/src/github.com/docker/distribution;protocol=https \
file://docker-registry.service \
file://0001-build-use-to-use-cross-go-compiler.patch \
+ file://0001-Fix-runaway-allocation-on-v2-_catalog.patch \
"
PACKAGES =+ "docker-registry"
diff --git a/recipes-containers/docker-distribution/files/0001-Fix-runaway-allocation-on-v2-_catalog.patch b/recipes-containers/docker-distribution/files/0001-Fix-runaway-allocation-on-v2-_catalog.patch
new file mode 100644
index 00000000..69da7054
--- /dev/null
+++ b/recipes-containers/docker-distribution/files/0001-Fix-runaway-allocation-on-v2-_catalog.patch
@@ -0,0 +1,669 @@
+From 521ea3d973cb0c7089ebbcdd4ccadc34be941f54 Mon Sep 17 00:00:00 2001
+From: "Jose D. Gomez R" <jose.gomez@suse.com>
+Date: Mon, 24 Apr 2023 18:52:27 +0200
+Subject: [PATCH] Fix runaway allocation on /v2/_catalog
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Introduced a Catalog entry in the configuration struct. With it,
+it's possible to control the maximum amount of entries returned
+by /v2/catalog (`GetCatalog` in registry/handlers/catalog.go).
+
+It's set to a default value of 1000.
+
+`GetCatalog` returns 100 entries by default if no `n` is
+provided. When provided it will be validated to be between `0`
+and `MaxEntries` defined in Configuration. When `n` is outside
+the aforementioned boundary, ErrorCodePaginationNumberInvalid is
+returned.
+
+`GetCatalog` now handles `n=0` gracefully with an empty response
+as well.
+
+Signed-off-by: José D. Gómez R. <1josegomezr@gmail.com>
+Co-authored-by: Cory Snider <corhere@gmail.com>
+
+CVE: CVE-2023-2253
+
+Upstream-Status: Backport [https://github.com/distribution/distribution/commit/521ea3d973cb0c7089ebbcdd4ccadc34be941f54]
+
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+---
+ configuration/configuration.go | 18 +-
+ configuration/configuration_test.go | 4 +
+ registry/api/v2/descriptors.go | 17 ++
+ registry/api/v2/errors.go | 9 +
+ registry/handlers/api_test.go | 316 +++++++++++++++++++++++++---
+ registry/handlers/catalog.go | 54 +++--
+ 6 files changed, 376 insertions(+), 42 deletions(-)
+
+diff --git a/configuration/configuration.go b/configuration/configuration.go
+index dd315485..1e696613 100644
+--- a/configuration/configuration.go
++++ b/configuration/configuration.go
+@@ -193,7 +193,8 @@ type Configuration struct {
+ } `yaml:"pool,omitempty"`
+ } `yaml:"redis,omitempty"`
+
+- Health Health `yaml:"health,omitempty"`
++ Health Health `yaml:"health,omitempty"`
++ Catalog Catalog `yaml:"catalog,omitempty"`
+
+ Proxy Proxy `yaml:"proxy,omitempty"`
+
+@@ -244,6 +245,16 @@ type Configuration struct {
+ } `yaml:"policy,omitempty"`
+ }
+
++// Catalog is composed of MaxEntries.
++// Catalog endpoint (/v2/_catalog) configuration, it provides the configuration
++// options to control the maximum number of entries returned by the catalog endpoint.
++type Catalog struct {
++ // Max number of entries returned by the catalog endpoint. Requesting n entries
++ // to the catalog endpoint will return at most MaxEntries entries.
++ // An empty or a negative value will set a default of 1000 maximum entries by default.
++ MaxEntries int `yaml:"maxentries,omitempty"`
++}
++
+ // LogHook is composed of hook Level and Type.
+ // After hooks configuration, it can execute the next handling automatically,
+ // when defined levels of log message emitted.
+@@ -670,6 +681,11 @@ func Parse(rd io.Reader) (*Configuration, error) {
+ if v0_1.Loglevel != Loglevel("") {
+ v0_1.Loglevel = Loglevel("")
+ }
++
++ if v0_1.Catalog.MaxEntries <= 0 {
++ v0_1.Catalog.MaxEntries = 1000
++ }
++
+ if v0_1.Storage.Type() == "" {
+ return nil, errors.New("no storage configuration provided")
+ }
+diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go
+index 0d6136e1..48cc9980 100644
+--- a/configuration/configuration_test.go
++++ b/configuration/configuration_test.go
+@@ -71,6 +71,9 @@ var configStruct = Configuration{
+ },
+ },
+ },
++ Catalog: Catalog{
++ MaxEntries: 1000,
++ },
+ HTTP: struct {
+ Addr string `yaml:"addr,omitempty"`
+ Net string `yaml:"net,omitempty"`
+@@ -524,6 +527,7 @@ func copyConfig(config Configuration) *Configuration {
+ configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor())
+ configCopy.Loglevel = config.Loglevel
+ configCopy.Log = config.Log
++ configCopy.Catalog = config.Catalog
+ configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields))
+ for k, v := range config.Log.Fields {
+ configCopy.Log.Fields[k] = v
+diff --git a/registry/api/v2/descriptors.go b/registry/api/v2/descriptors.go
+index a9616c58..c3bf90f7 100644
+--- a/registry/api/v2/descriptors.go
++++ b/registry/api/v2/descriptors.go
+@@ -134,6 +134,19 @@ var (
+ },
+ }
+
++ invalidPaginationResponseDescriptor = ResponseDescriptor{
++ Name: "Invalid pagination number",
++ Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.",
++ StatusCode: http.StatusBadRequest,
++ Body: BodyDescriptor{
++ ContentType: "application/json",
++ Format: errorsBody,
++ },
++ ErrorCodes: []errcode.ErrorCode{
++ ErrorCodePaginationNumberInvalid,
++ },
++ }
++
+ repositoryNotFoundResponseDescriptor = ResponseDescriptor{
+ Name: "No Such Repository Error",
+ StatusCode: http.StatusNotFound,
+@@ -490,6 +503,7 @@ var routeDescriptors = []RouteDescriptor{
+ },
+ },
+ Failures: []ResponseDescriptor{
++ invalidPaginationResponseDescriptor,
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+@@ -1578,6 +1592,9 @@ var routeDescriptors = []RouteDescriptor{
+ },
+ },
+ },
++ Failures: []ResponseDescriptor{
++ invalidPaginationResponseDescriptor,
++ },
+ },
+ },
+ },
+diff --git a/registry/api/v2/errors.go b/registry/api/v2/errors.go
+index 97d6923a..87e9f3c1 100644
+--- a/registry/api/v2/errors.go
++++ b/registry/api/v2/errors.go
+@@ -133,4 +133,13 @@ var (
+ longer proceed.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
++
++ ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
++ Value: "PAGINATION_NUMBER_INVALID",
++ Message: "invalid number of results requested",
++ Description: `Returned when the "n" parameter (number of results
++ to return) is not an integer, "n" is negative or "n" is bigger than
++ the maximum allowed.`,
++ HTTPStatusCode: http.StatusBadRequest,
++ })
+ )
+diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go
+index 2d3edc74..bf037d45 100644
+--- a/registry/handlers/api_test.go
++++ b/registry/handlers/api_test.go
+@@ -81,21 +81,23 @@ func TestCheckAPI(t *testing.T) {
+
+ // TestCatalogAPI tests the /v2/_catalog endpoint
+ func TestCatalogAPI(t *testing.T) {
+- chunkLen := 2
+ env := newTestEnv(t, false)
+ defer env.Shutdown()
+
+- values := url.Values{
+- "last": []string{""},
+- "n": []string{strconv.Itoa(chunkLen)}}
++ maxEntries := env.config.Catalog.MaxEntries
++ allCatalog := []string{
++ "foo/aaaa", "foo/bbbb", "foo/cccc", "foo/dddd", "foo/eeee", "foo/ffff",
++ }
+
+- catalogURL, err := env.builder.BuildCatalogURL(values)
++ chunkLen := maxEntries - 1
++
++ catalogURL, err := env.builder.BuildCatalogURL()
+ if err != nil {
+ t.Fatalf("unexpected error building catalog url: %v", err)
+ }
+
+ // -----------------------------------
+- // try to get an empty catalog
++ // Case No. 1: Empty catalog
+ resp, err := http.Get(catalogURL)
+ if err != nil {
+ t.Fatalf("unexpected error issuing request: %v", err)
+@@ -113,23 +115,22 @@ func TestCatalogAPI(t *testing.T) {
+ t.Fatalf("error decoding fetched manifest: %v", err)
+ }
+
+- // we haven't pushed anything to the registry yet
++ // No images pushed = no image returned
+ if len(ctlg.Repositories) != 0 {
+- t.Fatalf("repositories has unexpected values")
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", 0, len(ctlg.Repositories))
+ }
+
++ // No pagination should be returned
+ if resp.Header.Get("Link") != "" {
+ t.Fatalf("repositories has more data when none expected")
+ }
+
+- // -----------------------------------
+- // push something to the registry and try again
+- images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"}
+-
+- for _, image := range images {
++ for _, image := range allCatalog {
+ createRepository(env, t, image, "sometag")
+ }
+
++ // -----------------------------------
++ // Case No. 2: Catalog populated & n is not provided nil (n internally will be min(100, maxEntries))
+ resp, err = http.Get(catalogURL)
+ if err != nil {
+ t.Fatalf("unexpected error issuing request: %v", err)
+@@ -143,27 +144,60 @@ func TestCatalogAPI(t *testing.T) {
+ t.Fatalf("error decoding fetched manifest: %v", err)
+ }
+
+- if len(ctlg.Repositories) != chunkLen {
+- t.Fatalf("repositories has unexpected values")
++ // it must match max entries
++ if len(ctlg.Repositories) != maxEntries {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", maxEntries, len(ctlg.Repositories))
+ }
+
+- for _, image := range images[:chunkLen] {
++ // it must return the first maxEntries entries from the catalog
++ for _, image := range allCatalog[:maxEntries] {
+ if !contains(ctlg.Repositories, image) {
+ t.Fatalf("didn't find our repository '%s' in the catalog", image)
+ }
+ }
+
++ // fail if there's no pagination
+ link := resp.Header.Get("Link")
+ if link == "" {
+ t.Fatalf("repositories has less data than expected")
+ }
++ // -----------------------------------
++ // Case No. 2.1: Second page (n internally will be min(100, maxEntries))
++
++ // build pagination link
++ values := checkLink(t, link, maxEntries, ctlg.Repositories[len(ctlg.Repositories)-1])
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
+
+- newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1])
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ expectedRemainder := len(allCatalog) - maxEntries
++ if len(ctlg.Repositories) != expectedRemainder {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", expectedRemainder, len(ctlg.Repositories))
++ }
+
+ // -----------------------------------
+- // get the last chunk of data
++ // Case No. 3: request n = maxentries
++ values = url.Values{
++ "last": []string{""},
++ "n": []string{strconv.Itoa(maxEntries)},
++ }
+
+- catalogURL, err = env.builder.BuildCatalogURL(newValues)
++ catalogURL, err = env.builder.BuildCatalogURL(values)
+ if err != nil {
+ t.Fatalf("unexpected error building catalog url: %v", err)
+ }
+@@ -181,18 +215,239 @@ func TestCatalogAPI(t *testing.T) {
+ t.Fatalf("error decoding fetched manifest: %v", err)
+ }
+
+- if len(ctlg.Repositories) != 1 {
+- t.Fatalf("repositories has unexpected values")
++ if len(ctlg.Repositories) != maxEntries {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", maxEntries, len(ctlg.Repositories))
+ }
+
+- lastImage := images[len(images)-1]
+- if !contains(ctlg.Repositories, lastImage) {
+- t.Fatalf("didn't find our repository '%s' in the catalog", lastImage)
++ // fail if there's no pagination
++ link = resp.Header.Get("Link")
++ if link == "" {
++ t.Fatalf("repositories has less data than expected")
++ }
++
++ // -----------------------------------
++ // Case No. 3.1: Second (last) page
++
++ // build pagination link
++ values = checkLink(t, link, maxEntries, ctlg.Repositories[len(ctlg.Repositories)-1])
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
+ }
+
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ expectedRemainder = len(allCatalog) - maxEntries
++ if len(ctlg.Repositories) != expectedRemainder {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", expectedRemainder, len(ctlg.Repositories))
++ }
++
++ // -----------------------------------
++ // Case No. 4: request n < maxentries
++ values = url.Values{
++ "n": []string{strconv.Itoa(chunkLen)},
++ }
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ // returns the requested amount
++ if len(ctlg.Repositories) != chunkLen {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", expectedRemainder, len(ctlg.Repositories))
++ }
++
++ // fail if there's no pagination
+ link = resp.Header.Get("Link")
+- if link != "" {
+- t.Fatalf("catalog has unexpected data")
++ if link == "" {
++ t.Fatalf("repositories has less data than expected")
++ }
++
++ // -----------------------------------
++ // Case No. 4.1: request n < maxentries (second page)
++
++ // build pagination link
++ values = checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1])
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ expectedRemainder = len(allCatalog) - chunkLen
++ if len(ctlg.Repositories) != expectedRemainder {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", expectedRemainder, len(ctlg.Repositories))
++ }
++
++ // -----------------------------------
++ // Case No. 5: request n > maxentries | return err: ErrorCodePaginationNumberInvalid
++ values = url.Values{
++ "n": []string{strconv.Itoa(maxEntries + 10)},
++ }
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
++ checkBodyHasErrorCodes(t, "invalid number of results requested", resp, v2.ErrorCodePaginationNumberInvalid)
++
++ // -----------------------------------
++ // Case No. 6: request n > maxentries but <= total catalog | return err: ErrorCodePaginationNumberInvalid
++ values = url.Values{
++ "n": []string{strconv.Itoa(len(allCatalog))},
++ }
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
++ checkBodyHasErrorCodes(t, "invalid number of results requested", resp, v2.ErrorCodePaginationNumberInvalid)
++
++ // -----------------------------------
++ // Case No. 7: n = 0 | n is set to max(0, min(defaultEntries, maxEntries))
++ values = url.Values{
++ "n": []string{"0"},
++ }
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ // it must be empty
++ if len(ctlg.Repositories) != 0 {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", 0, len(ctlg.Repositories))
++ }
++
++ // -----------------------------------
++ // Case No. 8: n = -1 | n is set to max(0, min(defaultEntries, maxEntries))
++ values = url.Values{
++ "n": []string{"-1"},
++ }
++
++ catalogURL, err = env.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ // it must match max entries
++ if len(ctlg.Repositories) != maxEntries {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", expectedRemainder, len(ctlg.Repositories))
++ }
++
++ // -----------------------------------
++ // Case No. 9: n = 5, max = 5, total catalog = 4
++ values = url.Values{
++ "n": []string{strconv.Itoa(maxEntries)},
++ }
++
++ envWithLessImages := newTestEnv(t, false)
++ for _, image := range allCatalog[0:(maxEntries - 1)] {
++ createRepository(envWithLessImages, t, image, "sometag")
++ }
++
++ catalogURL, err = envWithLessImages.builder.BuildCatalogURL(values)
++ if err != nil {
++ t.Fatalf("unexpected error building catalog url: %v", err)
++ }
++
++ resp, err = http.Get(catalogURL)
++ if err != nil {
++ t.Fatalf("unexpected error issuing request: %v", err)
++ }
++ defer resp.Body.Close()
++
++ checkResponse(t, "issuing catalog api check", resp, http.StatusOK)
++
++ dec = json.NewDecoder(resp.Body)
++ if err = dec.Decode(&ctlg); err != nil {
++ t.Fatalf("error decoding fetched manifest: %v", err)
++ }
++
++ // it must match max entries
++ if len(ctlg.Repositories) != maxEntries-1 {
++ t.Fatalf("repositories returned unexpected entries (expected: %d, returned: %d)", maxEntries-1, len(ctlg.Repositories))
+ }
+ }
+
+@@ -207,7 +462,7 @@ func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Val
+ urlValues := linkURL.Query()
+
+ if urlValues.Get("n") != strconv.Itoa(numEntries) {
+- t.Fatalf("Catalog link entry size is incorrect")
++ t.Fatalf("Catalog link entry size is incorrect (expected: %v, returned: %v)", urlValues.Get("n"), strconv.Itoa(numEntries))
+ }
+
+ if urlValues.Get("last") != last {
+@@ -2023,6 +2278,9 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
+ Proxy: configuration.Proxy{
+ RemoteURL: "http://example.com",
+ },
++ Catalog: configuration.Catalog{
++ MaxEntries: 5,
++ },
+ }
+ config.Compatibility.Schema1.Enabled = true
+
+@@ -2039,6 +2297,9 @@ func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv {
+ "enabled": false,
+ }},
+ },
++ Catalog: configuration.Catalog{
++ MaxEntries: 5,
++ },
+ }
+
+ config.Compatibility.Schema1.Enabled = true
+@@ -2291,7 +2552,6 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus
+ if resp.StatusCode != expectedStatus {
+ t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus)
+ maybeDumpResponse(t, resp)
+-
+ t.FailNow()
+ }
+
+diff --git a/registry/handlers/catalog.go b/registry/handlers/catalog.go
+index eca98468..83ec0a9c 100644
+--- a/registry/handlers/catalog.go
++++ b/registry/handlers/catalog.go
+@@ -9,11 +9,13 @@ import (
+ "strconv"
+
+ "github.com/docker/distribution/registry/api/errcode"
++ v2 "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/storage/driver"
++
+ "github.com/gorilla/handlers"
+ )
+
+-const maximumReturnedEntries = 100
++const defaultReturnedEntries = 100
+
+ func catalogDispatcher(ctx *Context, r *http.Request) http.Handler {
+ catalogHandler := &catalogHandler{
+@@ -38,29 +40,55 @@ func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
+
+ q := r.URL.Query()
+ lastEntry := q.Get("last")
+- maxEntries, err := strconv.Atoi(q.Get("n"))
+- if err != nil || maxEntries < 0 {
+- maxEntries = maximumReturnedEntries
++
++ entries := defaultReturnedEntries
++ maximumConfiguredEntries := ch.App.Config.Catalog.MaxEntries
++
++ // parse n, if n unparseable, or negative assign it to defaultReturnedEntries
++ if n := q.Get("n"); n != "" {
++ parsedMax, err := strconv.Atoi(n)
++ if err == nil {
++ if parsedMax > maximumConfiguredEntries {
++ ch.Errors = append(ch.Errors, v2.ErrorCodePaginationNumberInvalid.WithDetail(map[string]int{"n": parsedMax}))
++ return
++ } else if parsedMax >= 0 {
++ entries = parsedMax
++ }
++ }
+ }
+
+- repos := make([]string, maxEntries)
++ // then enforce entries to be between 0 & maximumConfiguredEntries
++ // max(0, min(entries, maximumConfiguredEntries))
++ if entries < 0 || entries > maximumConfiguredEntries {
++ entries = maximumConfiguredEntries
++ }
+
+- filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry)
+- _, pathNotFound := err.(driver.PathNotFoundError)
++ repos := make([]string, entries)
++ filled := 0
+
+- if err == io.EOF || pathNotFound {
++ // entries is guaranteed to be >= 0 and < maximumConfiguredEntries
++ if entries == 0 {
+ moreEntries = false
+- } else if err != nil {
+- ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
+- return
++ } else {
++ returnedRepositories, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry)
++ if err != nil {
++ _, pathNotFound := err.(driver.PathNotFoundError)
++ if err != io.EOF && !pathNotFound {
++ ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
++ return
++ }
++ // err is either io.EOF or not PathNotFoundError
++ moreEntries = false
++ }
++ filled = returnedRepositories
+ }
+
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+
+ // Add a link header if there are more entries to retrieve
+ if moreEntries {
+- lastEntry = repos[len(repos)-1]
+- urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry)
++ lastEntry = repos[filled-1]
++ urlStr, err := createLinkEntry(r.URL.String(), entries, lastEntry)
+ if err != nil {
+ ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
+ return
+--
+2.40.0
diff --git a/recipes-containers/docker/README b/recipes-containers/docker/README
new file mode 100644
index 00000000..565e3501
--- /dev/null
+++ b/recipes-containers/docker/README
@@ -0,0 +1,7 @@
+if containerd is starting docker, and it is interfering with standalone
+docker operation, you may need to kill the running daemon and restart
+it:
+
+ % ps axf | grep docker | grep -v grep | awk '{print "kill -9 " $1}' | sh
+ % systemctl stop docker
+ % systemctl start docker
diff --git a/recipes-containers/docker/docker-ce_git.bb b/recipes-containers/docker/docker-ce_git.bb
index 12bc73cb..7142060f 100644
--- a/recipes-containers/docker/docker-ce_git.bb
+++ b/recipes-containers/docker/docker-ce_git.bb
@@ -31,19 +31,24 @@ DESCRIPTION = "Linux container runtime \
# so we get that tag, and make it our SRCREVS:
#
-SRCREV_docker = "906f57ff5b7100013dfef066ea8fe367706468df"
-SRCREV_libnetwork = "64b7a4574d1426139437d20e81c0b6d391130ec8"
-SRCREV_cli = "62eae52c2a76f4c1dcf79dfc7b5ea3bf5eebab8b"
+SRCREV_docker = "791d8ab87747169b4cbfcdf2fd57c81952bae6d5"
+SRCREV_libnetwork = "dcdf8f176d1e13ad719e913e796fb698d846de98"
+SRCREV_cli = "911449ca245308472a3d34a7f1a98b918e65c8c3"
SRCREV_FORMAT = "docker_libnetwork"
SRC_URI = "\
- git://github.com/docker/docker.git;branch=20.10;name=docker;protocol=https \
- git://github.com/docker/libnetwork.git;branch=master;name=libnetwork;destsuffix=git/libnetwork;protocol=https \
- git://github.com/docker/cli;branch=20.10;name=cli;destsuffix=git/cli;protocol=https \
- file://0001-libnetwork-use-GO-instead-of-go.patch \
- file://docker.init \
- file://0001-dynbinary-use-go-cross-compiler.patch \
- file://0001-cli-use-external-GO111MODULE-and-cross-compiler.patch \
- "
+ git://github.com/docker/docker.git;branch=20.10;name=docker;protocol=https \
+ git://github.com/docker/libnetwork.git;branch=master;name=libnetwork;destsuffix=git/libnetwork;protocol=https \
+ git://github.com/docker/cli;branch=20.10;name=cli;destsuffix=git/cli;protocol=https \
+ file://docker.init \
+ file://0001-libnetwork-use-GO-instead-of-go.patch \
+ file://0001-cli-use-external-GO111MODULE-and-cross-compiler.patch \
+ file://0001-dynbinary-use-go-cross-compiler.patch \
+ file://0001-Revert-go-updates-from-1.19.12-to-1.18.7.patch;patchdir=src/import \
+ file://0002-Revert-go-updates-from-1.18.7-to-1.17.13.patch;patchdir=src/import \
+ file://0003-builder.go-avoid-using-strings.Cut-from-go-1.18.patch;patchdir=src/import \
+"
+
+DOCKER_COMMIT = "${SRCREV_docker}"
require docker.inc
@@ -51,7 +56,10 @@ require docker.inc
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=4859e97a9c7780e77972d989f0823f28"
-DOCKER_VERSION = "20.10.12-ce"
+# 58 commits after v20.10.25 to include the fixes for go compatibility after
+# https://lists.openembedded.org/g/openembedded-core/message/185082
+# https://github.com/moby/moby/compare/v20.10.25...791d8ab87747169b4cbfcdf2fd57c81952bae6d5
+DOCKER_VERSION = "20.10.25-ce"
PV = "${DOCKER_VERSION}+git${SRCREV_docker}"
-CVE_PRODUCT = "docker"
+CVE_PRODUCT = "docker mobyproject:moby"
diff --git a/recipes-containers/docker/docker-moby_git.bb b/recipes-containers/docker/docker-moby_git.bb
index 0a0ffd60..5f8de075 100644
--- a/recipes-containers/docker/docker-moby_git.bb
+++ b/recipes-containers/docker/docker-moby_git.bb
@@ -34,19 +34,24 @@ DESCRIPTION = "Linux container runtime \
# - The common components of this recipe and docker-ce do need to be moved
# to a docker.inc recipe
-SRCREV_moby = "906f57ff5b7100013dfef066ea8fe367706468df"
-SRCREV_libnetwork = "64b7a4574d1426139437d20e81c0b6d391130ec8"
-SRCREV_cli = "a224086349269551becacce16e5842ceeb2a98d6"
+SRCREV_moby = "791d8ab87747169b4cbfcdf2fd57c81952bae6d5"
+SRCREV_libnetwork = "dcdf8f176d1e13ad719e913e796fb698d846de98"
+SRCREV_cli = "911449ca245308472a3d34a7f1a98b918e65c8c3"
SRCREV_FORMAT = "moby_libnetwork"
SRC_URI = "\
- git://github.com/moby/moby.git;branch=20.10;name=moby;protocol=https \
- git://github.com/docker/libnetwork.git;branch=master;name=libnetwork;destsuffix=git/libnetwork;protocol=https \
- git://github.com/docker/cli;branch=20.10;name=cli;destsuffix=git/cli;protocol=https \
- file://docker.init \
- file://0001-libnetwork-use-GO-instead-of-go.patch \
- file://0001-cli-use-external-GO111MODULE-and-cross-compiler.patch \
- file://0001-dynbinary-use-go-cross-compiler.patch \
- "
+ git://github.com/moby/moby.git;branch=20.10;name=moby;protocol=https \
+ git://github.com/docker/libnetwork.git;branch=master;name=libnetwork;destsuffix=git/libnetwork;protocol=https \
+ git://github.com/docker/cli;branch=20.10;name=cli;destsuffix=git/cli;protocol=https \
+ file://docker.init \
+ file://0001-libnetwork-use-GO-instead-of-go.patch \
+ file://0001-cli-use-external-GO111MODULE-and-cross-compiler.patch \
+ file://0001-dynbinary-use-go-cross-compiler.patch \
+ file://0001-Revert-go-updates-from-1.19.12-to-1.18.7.patch;patchdir=src/import \
+ file://0002-Revert-go-updates-from-1.18.7-to-1.17.13.patch;patchdir=src/import \
+ file://0003-builder.go-avoid-using-strings.Cut-from-go-1.18.patch;patchdir=src/import \
+"
+
+DOCKER_COMMIT = "${SRCREV_moby}"
require docker.inc
@@ -54,7 +59,10 @@ require docker.inc
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=4859e97a9c7780e77972d989f0823f28"
-DOCKER_VERSION = "20.10.12"
+# 58 commits after v20.10.25 to include the fixes for go compatibility after
+# https://lists.openembedded.org/g/openembedded-core/message/185082
+# https://github.com/moby/moby/compare/v20.10.25...791d8ab87747169b4cbfcdf2fd57c81952bae6d5
+DOCKER_VERSION = "20.10.25"
PV = "${DOCKER_VERSION}+git${SRCREV_moby}"
-CVE_PRODUCT = "docker"
+CVE_PRODUCT = "docker mobyproject:moby"
diff --git a/recipes-containers/docker/docker.inc b/recipes-containers/docker/docker.inc
index 40a3642c..2487456e 100644
--- a/recipes-containers/docker/docker.inc
+++ b/recipes-containers/docker/docker.inc
@@ -41,7 +41,7 @@ RPROVIDES:${PN}-dev += "docker-dev"
RPROVIDES:${PN}-contrip += "docker-dev"
inherit pkgconfig
-PACKAGECONFIG ??= "docker-init"
+PACKAGECONFIG ??= "docker-init seccomp"
PACKAGECONFIG[seccomp] = "seccomp,,libseccomp"
PACKAGECONFIG[docker-init] = ",,,docker-init"
PACKAGECONFIG[transient-config] = "transient-config"
@@ -58,6 +58,9 @@ inherit pkgconfig
do_configure[noexec] = "1"
+# Export for possible use in Makefiles, default value comes from go.bbclass
+export GO_LINKSHARED
+
DOCKER_PKG="github.com/docker/docker"
# in order to exclude devicemapper and btrfs - https://github.com/docker/docker/issues/14056
BUILD_TAGS ?= "exclude_graphdriver_btrfs exclude_graphdriver_devicemapper"
@@ -94,14 +97,14 @@ do_compile() {
# this is the unsupported built structure
# that doesn't rely on an existing docker
# to build this:
- VERSION="${DOCKER_VERSION}" DOCKER_GITCOMMIT="${SRCREV_moby}" ./hack/make.sh dynbinary
+ VERSION="${DOCKER_VERSION}" DOCKER_GITCOMMIT="${DOCKER_COMMIT}" ./hack/make.sh dynbinary
# build the cli
cd ${S}/src/import/.gopath/src/github.com/docker/cli
export CFLAGS=""
export LDFLAGS=""
export DOCKER_VERSION=${DOCKER_VERSION}
- VERSION="${DOCKER_VERSION}" DOCKER_GITCOMMIT="${SRCREV_moby}" make dynbinary
+ VERSION="${DOCKER_VERSION}" DOCKER_GITCOMMIT="${DOCKER_COMMIT}" make dynbinary
# build the proxy
cd ${S}/src/import/.gopath/src/github.com/docker/libnetwork
@@ -120,8 +123,7 @@ do_install() {
# replaces one copied from above with one that uses the local registry for a mirror
install -m 644 ${S}/src/import/contrib/init/systemd/docker.service ${D}/${systemd_unitdir}/system
rm -f ${D}/${systemd_unitdir}/system/docker.service.rpm
- fi
- if ${@bb.utils.contains('DISTRO_FEATURES','sysvinit','true','false',d)}; then
+ else
install -d ${D}${sysconfdir}/init.d
install -m 0755 ${WORKDIR}/docker.init ${D}${sysconfdir}/init.d/docker.init
fi
@@ -142,8 +144,10 @@ SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${PN}','',
SYSTEMD_SERVICE:${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','docker.socket','',d)}"
SYSTEMD_AUTO_ENABLE:${PN} = "enable"
-INITSCRIPT_PACKAGES += "${@bb.utils.contains('DISTRO_FEATURES','sysvinit','${PN}','',d)}"
-INITSCRIPT_NAME:${PN} = "${@bb.utils.contains('DISTRO_FEATURES','sysvinit','docker.init','',d)}"
+# inverted logic warning. We ony want the sysvinit init to be installed if systemd
+# is NOT in the distro features
+INITSCRIPT_PACKAGES += "${@bb.utils.contains('DISTRO_FEATURES','systemd','', '${PN}',d)}"
+INITSCRIPT_NAME:${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','', 'docker.init',d)}"
INITSCRIPT_PARAMS:${PN} = "defaults"
inherit useradd
diff --git a/recipes-containers/docker/files/0001-Revert-go-updates-from-1.19.12-to-1.18.7.patch b/recipes-containers/docker/files/0001-Revert-go-updates-from-1.19.12-to-1.18.7.patch
new file mode 100644
index 00000000..2a3f3b97
--- /dev/null
+++ b/recipes-containers/docker/files/0001-Revert-go-updates-from-1.19.12-to-1.18.7.patch
@@ -0,0 +1,144 @@
+From 2cc349a336cd3cb4fa33554216a99dbce3879d29 Mon Sep 17 00:00:00 2001
+From: Martin Jansa <Martin.Jansa@gmail.com>
+Date: Mon, 14 Aug 2023 16:02:30 +0200
+Subject: [PATCH] Revert go updates from 1.19.12 to 1.18.7
+
+To continue using 1.17 version as implemented for some reason in:
+https://git.yoctoproject.org/meta-virtualization/commit/?h=kirkstone&id=927537108bcf2b98859512ce3eae59a73439994d
+with 0001-revert-go-1.8-update.patch
+
+Revert "update to go1.19.12"
+This reverts commit c42d7c7f6d0dccfe5d85f0126da2c8ec77573616.
+Revert "[20.10] update go to go1.19.11"
+This reverts commit 43fe787c232c54b53bda21301bbf3a463c4e4056.
+Revert "[20.10] update go to go1.19.10"
+This reverts commit 99f10dec91409c4f6ed2c4867638f4e5ea678f0a.
+Revert "Dockerfile: temporarily skip CRIU stage"
+This reverts commit af0477880cc8c0197517c0bc8de0cbd6cb9bd9a9.
+Revert "[20.10] update go to go1.19.9"
+This reverts commit 7f91a52b8969f5604fdca36ee30475cc9600be5b.
+Revert "[20.10] update go to go1.19.8"
+This reverts commit a09b3e9cf9de906438a6300760754fcb087f166a.
+Revert "update to go1.19.7"
+This reverts commit 9aa5d55a8ba8725133a6fbb5ac98d1fab341fdc7.
+Revert "update to go1.19.6"
+This reverts commit 98c9e3f43833bf87c21e02aca77c75c7a475ecd6.
+Revert "update to go1.19.5"
+This reverts commit 5b48f300dd0c6a2bbfc942408bc7e3fbc39609f0.
+Revert "update to go1.19.4"
+This reverts commit 82b0ac1166cc553e2757c5d490ce69078064ef6e.
+Revert "Update to Go 1.19.3 to address CVE-2022-41716"
+This reverts commit 4701ca9f719f5386d2ca2417b566b5950aa8a929.
+Revert "Update to go 1.19.2 to address CVE-2022-2879, CVE-2022-2880, CVE-2022-41715"
+This reverts commit 1c8c16524f94ae69eb33c5c9000e87615ce973a6.
+Revert "Update to go 1.19.1 to address CVE-2022-27664, CVE-2022-32190"
+This reverts commit 6cc1ef32a28e7e6e74383d4775bd178d36495181.
+Revert "update to golang 1.19"
+This reverts commit 5091f13a5d027b785084028aedb03beb5b0bd9a6.
+Revert "Dockerfile: configure code dir as "safe" directory"
+This reverts commit 0312e468da5c99267654d5c6b62785e29ffe6e0d.
+Revert "[20.10] update to go1.18.10"
+This reverts commit 625903f3fda862c69492256016b386628954b3a4.
+Revert "[20.10] update gotestsum to v1.8.2"
+This reverts commit edca413033cea5252f69bfc740e9450e5d0a0ef9.
+Revert "[20.10] update to go1.18.9"
+This reverts commit f8b0d77bfe109c5248ba6eb6c6db7f32e99a2e75.
+Revert "[20.10] update to Go 1.18.8 to address CVE-2022-41716"
+This reverts commit 0211f9e44dba888fa62dc2ba59ea573ad70e9708.
+
+Upstream-Status: Inapropriate
+---
+ Dockerfile | 12 ++----------
+ Dockerfile.e2e | 2 +-
+ Dockerfile.simple | 2 +-
+ Dockerfile.windows | 4 ++--
+ hack/dockerfile/install/gotestsum.installer | 2 +-
+ 5 files changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/Dockerfile b/Dockerfile
+index 4ee4d8dd2c..9472c512a6 100644
+--- a/Dockerfile
++++ b/Dockerfile
+@@ -3,7 +3,7 @@
+ ARG CROSS="false"
+ ARG SYSTEMD="false"
+ # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
+-ARG GO_VERSION=1.19.12
++ARG GO_VERSION=1.18.7
+ ARG DEBIAN_FRONTEND=noninteractive
+ ARG VPNKIT_VERSION=0.5.0
+ ARG DOCKER_BUILDTAGS="apparmor seccomp"
+@@ -267,9 +267,6 @@ RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
+ RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
+ RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
+ RUN ldconfig
+-# Set dev environment as safe git directory to prevent "dubious ownership" errors
+-# when bind-mounting the source into the dev-container. See https://github.com/moby/moby/pull/44930
+-RUN git config --global --add safe.directory $GOPATH/src/github.com/docker/docker
+ # This should only install packages that are specifically needed for the dev environment and nothing else
+ # Do you really need to add another package here? Can it be done in a different build stage?
+ RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
+@@ -316,12 +313,7 @@ COPY --from=swagger /build/ /usr/local/bin/
+ COPY --from=tomlv /build/ /usr/local/bin/
+ COPY --from=tini /build/ /usr/local/bin/
+ COPY --from=registry /build/ /usr/local/bin/
+-
+-# Skip the CRIU stage for now, as the opensuse package repository is sometimes
+-# unstable, and we're currently not using it in CI.
+-#
+-# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
+-# COPY --from=criu /build/ /usr/local/
++COPY --from=criu /build/ /usr/local/
+ COPY --from=vndr /build/ /usr/local/bin/
+ COPY --from=gotestsum /build/ /usr/local/bin/
+ COPY --from=golangci_lint /build/ /usr/local/bin/
+diff --git a/Dockerfile.e2e b/Dockerfile.e2e
+index 31f836fc16..f92bec85b0 100644
+--- a/Dockerfile.e2e
++++ b/Dockerfile.e2e
+@@ -1,4 +1,4 @@
+-ARG GO_VERSION=1.19.12
++ARG GO_VERSION=1.18.7
+
+ FROM golang:${GO_VERSION}-alpine AS base
+ ENV GO111MODULE=off
+diff --git a/Dockerfile.simple b/Dockerfile.simple
+index af5fc13c25..8aa6d7ff94 100644
+--- a/Dockerfile.simple
++++ b/Dockerfile.simple
+@@ -5,7 +5,7 @@
+
+ # This represents the bare minimum required to build and test Docker.
+
+-ARG GO_VERSION=1.19.12
++ARG GO_VERSION=1.18.7
+
+ FROM golang:${GO_VERSION}-buster
+ ENV GO111MODULE=off
+diff --git a/Dockerfile.windows b/Dockerfile.windows
+index 7c2fe66389..6f8242decc 100644
+--- a/Dockerfile.windows
++++ b/Dockerfile.windows
+@@ -165,8 +165,8 @@ FROM microsoft/windowsservercore
+ # Use PowerShell as the default shell
+ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
+
+-ARG GO_VERSION=1.19.12
+-ARG GOTESTSUM_VERSION=v1.8.2
++ARG GO_VERSION=1.18.7
++ARG GOTESTSUM_VERSION=v1.7.0
+
+ # Environment variable notes:
+ # - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
+diff --git a/hack/dockerfile/install/gotestsum.installer b/hack/dockerfile/install/gotestsum.installer
+index 8e88fec77b..5024179958 100755
+--- a/hack/dockerfile/install/gotestsum.installer
++++ b/hack/dockerfile/install/gotestsum.installer
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-: ${GOTESTSUM_VERSION:=v1.8.2}
++: ${GOTESTSUM_VERSION:=v1.7.0}
+
+ install_gotestsum() (
+ set -e
diff --git a/recipes-containers/docker/files/0001-cli-use-external-GO111MODULE-and-cross-compiler.patch b/recipes-containers/docker/files/0001-cli-use-external-GO111MODULE-and-cross-compiler.patch
index dc322612..d68de1cf 100644
--- a/recipes-containers/docker/files/0001-cli-use-external-GO111MODULE-and-cross-compiler.patch
+++ b/recipes-containers/docker/files/0001-cli-use-external-GO111MODULE-and-cross-compiler.patch
@@ -8,20 +8,17 @@ Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
git/cli/scripts/build/binary | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
-diff --git git/cli/scripts/build/binary git/cli/scripts/build/binary
-index e4c5e12a6b..7c47b75c2f 100755
---- git/cli/scripts/build/binary
+Index: git/cli/scripts/build/binary
+===================================================================
+--- git.orig/cli/scripts/build/binary
+++ git/cli/scripts/build/binary
-@@ -73,8 +73,7 @@ fi
+@@ -73,8 +73,7 @@
echo "Building $GO_LINKMODE $(basename "${TARGET}")"
-export GO111MODULE=auto
--go build -o "${TARGET}" -tags "${GO_BUILDTAGS}" --ldflags "${LDFLAGS}" ${GO_BUILDMODE} "${SOURCE}"
-+${GO} build -o "${TARGET}" -tags "${GO_BUILDTAGS}" --ldflags "${LDFLAGS}" ${GO_BUILDMODE} "${SOURCE}"
+-go build -o "${TARGET}" -tags "${GO_BUILDTAGS}" --ldflags "${GO_LDFLAGS}" ${GO_BUILDMODE} "${SOURCE}"
++${GO} build -trimpath -o "${TARGET}" -tags "${GO_BUILDTAGS}" --ldflags "${GO_LDFLAGS}" ${GO_BUILDMODE} "${SOURCE}"
ln -sf "$(basename "${TARGET}")" "$(dirname "${TARGET}")/docker"
---
-2.19.1
-
diff --git a/recipes-containers/docker/files/0001-dynbinary-use-go-cross-compiler.patch b/recipes-containers/docker/files/0001-dynbinary-use-go-cross-compiler.patch
index 971c60d7..c6edaf46 100644
--- a/recipes-containers/docker/files/0001-dynbinary-use-go-cross-compiler.patch
+++ b/recipes-containers/docker/files/0001-dynbinary-use-go-cross-compiler.patch
@@ -17,7 +17,7 @@ Index: git/src/import/hack/make/.binary
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
- go build \
-+ ${GO} build \
++ ${GO} build -trimpath \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
diff --git a/recipes-containers/docker/files/0001-libnetwork-use-GO-instead-of-go.patch b/recipes-containers/docker/files/0001-libnetwork-use-GO-instead-of-go.patch
index c623b260..b9b41de5 100644
--- a/recipes-containers/docker/files/0001-libnetwork-use-GO-instead-of-go.patch
+++ b/recipes-containers/docker/files/0001-libnetwork-use-GO-instead-of-go.patch
@@ -22,9 +22,9 @@ Index: git/libnetwork/Makefile
- go build -tags experimental -o "bin/dnet" ./cmd/dnet
- go build -o "bin/docker-proxy" ./cmd/proxy
- CGO_ENABLED=0 go build -o "bin/diagnosticClient" ./cmd/diagnostic
-+ $(GO) build -tags experimental -o "bin/dnet" ./cmd/dnet
-+ $(GO) build -o "bin/proxy" ./cmd/proxy
-+ CGO_ENABLED=0 $(GO) build -o "bin/diagnosticClient" ./cmd/diagnostic
++ $(GO) build -trimpath -tags experimental -o "bin/dnet" ./cmd/dnet
++ $(GO) build -trimpath -o "bin/proxy" ./cmd/proxy
++ CGO_ENABLED=0 $(GO) build -trimpath -o "bin/diagnosticClient" ./cmd/diagnostic
CGO_ENABLED=0 go build -o "bin/testMain" ./cmd/networkdb-test/testMain.go
build-images:
@@ -34,8 +34,8 @@ Index: git/libnetwork/Makefile
@echo "🐳 $@"
- go build -o "bin/dnet-$$GOOS-$$GOARCH" ./cmd/dnet
- go build -o "bin/docker-proxy-$$GOOS-$$GOARCH" ./cmd/proxy
-+ @$(GO) build -linkshared $(GOBUILDFLAGS) -o "bin/docker-proxy-$$GOOS-$$GOARCH" ./cmd/proxy
-+ @$(GO) build -linkshared $(GOBUILDFLAGS) -o "bin/dnet-$$GOOS-$$GOARCH" ./cmd/dnet
++ @$(GO) build -trimpath $(GO_LINKSHARED) $(GOBUILDFLAGS) -o "bin/docker-proxy-$$GOOS-$$GOARCH" ./cmd/proxy
++ @$(GO) build -trimpath $(GO_LINKSHARED) $(GOBUILDFLAGS) -o "bin/dnet-$$GOOS-$$GOARCH" ./cmd/dnet
# Rebuild protocol buffers.
# These may need to be rebuilt after vendoring updates, so .proto files are declared .PHONY so they are always rebuilt.
diff --git a/recipes-containers/docker/files/0002-Revert-go-updates-from-1.18.7-to-1.17.13.patch b/recipes-containers/docker/files/0002-Revert-go-updates-from-1.18.7-to-1.17.13.patch
new file mode 100644
index 00000000..7cacccb8
--- /dev/null
+++ b/recipes-containers/docker/files/0002-Revert-go-updates-from-1.18.7-to-1.17.13.patch
@@ -0,0 +1,1201 @@
+From 575302e9c6567b8547b308b2b0c6a07b27e3be3b Mon Sep 17 00:00:00 2001
+From: Adrian Freihofer <adrian.freihofer@siemens.com>
+Date: Sun, 4 Dec 2022 18:02:54 +0100
+Subject: [PATCH] Revert go updates from 1.18.7 to 1.17.13
+
+Upstream-Status: Inapropriate
+
+Updating this patch:
+ git revert -m 1 7d4cc78c0289edbb4727e3d50d4b130ce0f9c47e
+
+This reverts commit 7d4cc78c0289edbb4727e3d50d4b130ce0f9c47e, reversing
+changes made to 32debe0986f4516bfe17bf9122447f0c735e61b4.
+---
+ Dockerfile | 2 +-
+ Dockerfile.e2e | 2 +-
+ Dockerfile.simple | 2 +-
+ Dockerfile.windows | 2 +-
+ daemon/logger/templates/templates.go | 2 +-
+ pkg/plugins/pluginrpc-gen/template.go | 2 +-
+ vendor/archive/tar/common.go | 40 ++++----
+ vendor/archive/tar/format.go | 138 +++++++++++++-------------
+ vendor/archive/tar/fuzz_test.go | 80 ---------------
+ vendor/archive/tar/reader.go | 102 ++++++++++---------
+ vendor/archive/tar/reader_test.go | 30 +++---
+ vendor/archive/tar/stat_actime1.go | 1 +
+ vendor/archive/tar/stat_actime2.go | 1 +
+ vendor/archive/tar/strconv.go | 43 +++++---
+ vendor/archive/tar/tar_test.go | 2 +-
+ vendor/archive/tar/writer.go | 89 ++++++++---------
+ vendor/archive/tar/writer_test.go | 24 +++--
+ 17 files changed, 250 insertions(+), 312 deletions(-)
+ delete mode 100644 vendor/archive/tar/fuzz_test.go
+
+diff --git a/Dockerfile b/Dockerfile
+index 9472c512a6..f3f7956414 100644
+--- a/Dockerfile
++++ b/Dockerfile
+@@ -3,7 +3,7 @@
+ ARG CROSS="false"
+ ARG SYSTEMD="false"
+ # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
+-ARG GO_VERSION=1.18.7
++ARG GO_VERSION=1.17.13
+ ARG DEBIAN_FRONTEND=noninteractive
+ ARG VPNKIT_VERSION=0.5.0
+ ARG DOCKER_BUILDTAGS="apparmor seccomp"
+diff --git a/Dockerfile.e2e b/Dockerfile.e2e
+index f92bec85b0..d0f0b08acd 100644
+--- a/Dockerfile.e2e
++++ b/Dockerfile.e2e
+@@ -1,4 +1,4 @@
+-ARG GO_VERSION=1.18.7
++ARG GO_VERSION=1.17.13
+
+ FROM golang:${GO_VERSION}-alpine AS base
+ ENV GO111MODULE=off
+diff --git a/Dockerfile.simple b/Dockerfile.simple
+index 8aa6d7ff94..1db20c1e35 100644
+--- a/Dockerfile.simple
++++ b/Dockerfile.simple
+@@ -5,7 +5,7 @@
+
+ # This represents the bare minimum required to build and test Docker.
+
+-ARG GO_VERSION=1.18.7
++ARG GO_VERSION=1.17.13
+
+ FROM golang:${GO_VERSION}-buster
+ ENV GO111MODULE=off
+diff --git a/Dockerfile.windows b/Dockerfile.windows
+index 6f8242decc..b0ee068aab 100644
+--- a/Dockerfile.windows
++++ b/Dockerfile.windows
+@@ -165,7 +165,7 @@ FROM microsoft/windowsservercore
+ # Use PowerShell as the default shell
+ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
+
+-ARG GO_VERSION=1.18.7
++ARG GO_VERSION=1.17.13
+ ARG GOTESTSUM_VERSION=v1.7.0
+
+ # Environment variable notes:
+diff --git a/daemon/logger/templates/templates.go b/daemon/logger/templates/templates.go
+index d8b4ce5d85..ab76d0f1c2 100644
+--- a/daemon/logger/templates/templates.go
++++ b/daemon/logger/templates/templates.go
+@@ -20,7 +20,7 @@ var basicFunctions = template.FuncMap{
+ },
+ "split": strings.Split,
+ "join": strings.Join,
+- "title": strings.Title, //nolint:staticcheck // SA1019: strings.Title is deprecated: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.
++ "title": strings.Title,
+ "lower": strings.ToLower,
+ "upper": strings.ToUpper,
+ "pad": padWithSpace,
+diff --git a/pkg/plugins/pluginrpc-gen/template.go b/pkg/plugins/pluginrpc-gen/template.go
+index c34a5add11..50ed9293c1 100644
+--- a/pkg/plugins/pluginrpc-gen/template.go
++++ b/pkg/plugins/pluginrpc-gen/template.go
+@@ -64,7 +64,7 @@ func title(s string) string {
+ if strings.ToLower(s) == "id" {
+ return "ID"
+ }
+- return strings.Title(s) //nolint:staticcheck // SA1019: strings.Title is deprecated: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.
++ return strings.Title(s)
+ }
+
+ var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(`
+diff --git a/vendor/archive/tar/common.go b/vendor/archive/tar/common.go
+index f6d701d925..8706ede431 100644
+--- a/vendor/archive/tar/common.go
++++ b/vendor/archive/tar/common.go
+@@ -319,10 +319,10 @@ func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
+ // fileState tracks the number of logical (includes sparse holes) and physical
+ // (actual in tar archive) bytes remaining for the current file.
+ //
+-// Invariant: logicalRemaining >= physicalRemaining
++// Invariant: LogicalRemaining >= PhysicalRemaining
+ type fileState interface {
+- logicalRemaining() int64
+- physicalRemaining() int64
++ LogicalRemaining() int64
++ PhysicalRemaining() int64
+ }
+
+ // allowedFormats determines which formats can be used.
+@@ -416,22 +416,22 @@ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err
+
+ // Check basic fields.
+ var blk block
+- v7 := blk.toV7()
+- ustar := blk.toUSTAR()
+- gnu := blk.toGNU()
+- verifyString(h.Name, len(v7.name()), "Name", paxPath)
+- verifyString(h.Linkname, len(v7.linkName()), "Linkname", paxLinkpath)
+- verifyString(h.Uname, len(ustar.userName()), "Uname", paxUname)
+- verifyString(h.Gname, len(ustar.groupName()), "Gname", paxGname)
+- verifyNumeric(h.Mode, len(v7.mode()), "Mode", paxNone)
+- verifyNumeric(int64(h.Uid), len(v7.uid()), "Uid", paxUid)
+- verifyNumeric(int64(h.Gid), len(v7.gid()), "Gid", paxGid)
+- verifyNumeric(h.Size, len(v7.size()), "Size", paxSize)
+- verifyNumeric(h.Devmajor, len(ustar.devMajor()), "Devmajor", paxNone)
+- verifyNumeric(h.Devminor, len(ustar.devMinor()), "Devminor", paxNone)
+- verifyTime(h.ModTime, len(v7.modTime()), "ModTime", paxMtime)
+- verifyTime(h.AccessTime, len(gnu.accessTime()), "AccessTime", paxAtime)
+- verifyTime(h.ChangeTime, len(gnu.changeTime()), "ChangeTime", paxCtime)
++ v7 := blk.V7()
++ ustar := blk.USTAR()
++ gnu := blk.GNU()
++ verifyString(h.Name, len(v7.Name()), "Name", paxPath)
++ verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
++ verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
++ verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
++ verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
++ verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
++ verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
++ verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
++ verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
++ verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
++ verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
++ verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
++ verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
+
+ // Check for header-only types.
+ var whyOnlyPAX, whyOnlyGNU string
+@@ -541,7 +541,7 @@ type headerFileInfo struct {
+ func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+ func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+ func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+-func (fi headerFileInfo) Sys() any { return fi.h }
++func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+ // Name returns the base name of the file.
+ func (fi headerFileInfo) Name() string {
+diff --git a/vendor/archive/tar/format.go b/vendor/archive/tar/format.go
+index 8898c438b5..6642364de1 100644
+--- a/vendor/archive/tar/format.go
++++ b/vendor/archive/tar/format.go
+@@ -160,28 +160,28 @@ var zeroBlock block
+ type block [blockSize]byte
+
+ // Convert block to any number of formats.
+-func (b *block) toV7() *headerV7 { return (*headerV7)(b) }
+-func (b *block) toGNU() *headerGNU { return (*headerGNU)(b) }
+-func (b *block) toSTAR() *headerSTAR { return (*headerSTAR)(b) }
+-func (b *block) toUSTAR() *headerUSTAR { return (*headerUSTAR)(b) }
+-func (b *block) toSparse() sparseArray { return sparseArray(b[:]) }
++func (b *block) V7() *headerV7 { return (*headerV7)(b) }
++func (b *block) GNU() *headerGNU { return (*headerGNU)(b) }
++func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) }
++func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
++func (b *block) Sparse() sparseArray { return sparseArray(b[:]) }
+
+ // GetFormat checks that the block is a valid tar header based on the checksum.
+ // It then attempts to guess the specific format based on magic values.
+ // If the checksum fails, then FormatUnknown is returned.
+-func (b *block) getFormat() Format {
++func (b *block) GetFormat() Format {
+ // Verify checksum.
+ var p parser
+- value := p.parseOctal(b.toV7().chksum())
+- chksum1, chksum2 := b.computeChecksum()
++ value := p.parseOctal(b.V7().Chksum())
++ chksum1, chksum2 := b.ComputeChecksum()
+ if p.err != nil || (value != chksum1 && value != chksum2) {
+ return FormatUnknown
+ }
+
+ // Guess the magic values.
+- magic := string(b.toUSTAR().magic())
+- version := string(b.toUSTAR().version())
+- trailer := string(b.toSTAR().trailer())
++ magic := string(b.USTAR().Magic())
++ version := string(b.USTAR().Version())
++ trailer := string(b.STAR().Trailer())
+ switch {
+ case magic == magicUSTAR && trailer == trailerSTAR:
+ return formatSTAR
+@@ -194,23 +194,23 @@ func (b *block) getFormat() Format {
+ }
+ }
+
+-// setFormat writes the magic values necessary for specified format
++// SetFormat writes the magic values necessary for specified format
+ // and then updates the checksum accordingly.
+-func (b *block) setFormat(format Format) {
++func (b *block) SetFormat(format Format) {
+ // Set the magic values.
+ switch {
+ case format.has(formatV7):
+ // Do nothing.
+ case format.has(FormatGNU):
+- copy(b.toGNU().magic(), magicGNU)
+- copy(b.toGNU().version(), versionGNU)
++ copy(b.GNU().Magic(), magicGNU)
++ copy(b.GNU().Version(), versionGNU)
+ case format.has(formatSTAR):
+- copy(b.toSTAR().magic(), magicUSTAR)
+- copy(b.toSTAR().version(), versionUSTAR)
+- copy(b.toSTAR().trailer(), trailerSTAR)
++ copy(b.STAR().Magic(), magicUSTAR)
++ copy(b.STAR().Version(), versionUSTAR)
++ copy(b.STAR().Trailer(), trailerSTAR)
+ case format.has(FormatUSTAR | FormatPAX):
+- copy(b.toUSTAR().magic(), magicUSTAR)
+- copy(b.toUSTAR().version(), versionUSTAR)
++ copy(b.USTAR().Magic(), magicUSTAR)
++ copy(b.USTAR().Version(), versionUSTAR)
+ default:
+ panic("invalid format")
+ }
+@@ -218,17 +218,17 @@ func (b *block) setFormat(format Format) {
+ // Update checksum.
+ // This field is special in that it is terminated by a NULL then space.
+ var f formatter
+- field := b.toV7().chksum()
+- chksum, _ := b.computeChecksum() // Possible values are 256..128776
++ field := b.V7().Chksum()
++ chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
+ f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
+ field[7] = ' '
+ }
+
+-// computeChecksum computes the checksum for the header block.
++// ComputeChecksum computes the checksum for the header block.
+ // POSIX specifies a sum of the unsigned byte values, but the Sun tar used
+ // signed byte values.
+ // We compute and return both.
+-func (b *block) computeChecksum() (unsigned, signed int64) {
++func (b *block) ComputeChecksum() (unsigned, signed int64) {
+ for i, c := range b {
+ if 148 <= i && i < 156 {
+ c = ' ' // Treat the checksum field itself as all spaces.
+@@ -240,68 +240,68 @@ func (b *block) computeChecksum() (unsigned, signed int64) {
+ }
+
+ // Reset clears the block with all zeros.
+-func (b *block) reset() {
++func (b *block) Reset() {
+ *b = block{}
+ }
+
+ type headerV7 [blockSize]byte
+
+-func (h *headerV7) name() []byte { return h[000:][:100] }
+-func (h *headerV7) mode() []byte { return h[100:][:8] }
+-func (h *headerV7) uid() []byte { return h[108:][:8] }
+-func (h *headerV7) gid() []byte { return h[116:][:8] }
+-func (h *headerV7) size() []byte { return h[124:][:12] }
+-func (h *headerV7) modTime() []byte { return h[136:][:12] }
+-func (h *headerV7) chksum() []byte { return h[148:][:8] }
+-func (h *headerV7) typeFlag() []byte { return h[156:][:1] }
+-func (h *headerV7) linkName() []byte { return h[157:][:100] }
++func (h *headerV7) Name() []byte { return h[000:][:100] }
++func (h *headerV7) Mode() []byte { return h[100:][:8] }
++func (h *headerV7) UID() []byte { return h[108:][:8] }
++func (h *headerV7) GID() []byte { return h[116:][:8] }
++func (h *headerV7) Size() []byte { return h[124:][:12] }
++func (h *headerV7) ModTime() []byte { return h[136:][:12] }
++func (h *headerV7) Chksum() []byte { return h[148:][:8] }
++func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
++func (h *headerV7) LinkName() []byte { return h[157:][:100] }
+
+ type headerGNU [blockSize]byte
+
+-func (h *headerGNU) v7() *headerV7 { return (*headerV7)(h) }
+-func (h *headerGNU) magic() []byte { return h[257:][:6] }
+-func (h *headerGNU) version() []byte { return h[263:][:2] }
+-func (h *headerGNU) userName() []byte { return h[265:][:32] }
+-func (h *headerGNU) groupName() []byte { return h[297:][:32] }
+-func (h *headerGNU) devMajor() []byte { return h[329:][:8] }
+-func (h *headerGNU) devMinor() []byte { return h[337:][:8] }
+-func (h *headerGNU) accessTime() []byte { return h[345:][:12] }
+-func (h *headerGNU) changeTime() []byte { return h[357:][:12] }
+-func (h *headerGNU) sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
+-func (h *headerGNU) realSize() []byte { return h[483:][:12] }
++func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) }
++func (h *headerGNU) Magic() []byte { return h[257:][:6] }
++func (h *headerGNU) Version() []byte { return h[263:][:2] }
++func (h *headerGNU) UserName() []byte { return h[265:][:32] }
++func (h *headerGNU) GroupName() []byte { return h[297:][:32] }
++func (h *headerGNU) DevMajor() []byte { return h[329:][:8] }
++func (h *headerGNU) DevMinor() []byte { return h[337:][:8] }
++func (h *headerGNU) AccessTime() []byte { return h[345:][:12] }
++func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] }
++func (h *headerGNU) Sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
++func (h *headerGNU) RealSize() []byte { return h[483:][:12] }
+
+ type headerSTAR [blockSize]byte
+
+-func (h *headerSTAR) v7() *headerV7 { return (*headerV7)(h) }
+-func (h *headerSTAR) magic() []byte { return h[257:][:6] }
+-func (h *headerSTAR) version() []byte { return h[263:][:2] }
+-func (h *headerSTAR) userName() []byte { return h[265:][:32] }
+-func (h *headerSTAR) groupName() []byte { return h[297:][:32] }
+-func (h *headerSTAR) devMajor() []byte { return h[329:][:8] }
+-func (h *headerSTAR) devMinor() []byte { return h[337:][:8] }
+-func (h *headerSTAR) prefix() []byte { return h[345:][:131] }
+-func (h *headerSTAR) accessTime() []byte { return h[476:][:12] }
+-func (h *headerSTAR) changeTime() []byte { return h[488:][:12] }
+-func (h *headerSTAR) trailer() []byte { return h[508:][:4] }
++func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) }
++func (h *headerSTAR) Magic() []byte { return h[257:][:6] }
++func (h *headerSTAR) Version() []byte { return h[263:][:2] }
++func (h *headerSTAR) UserName() []byte { return h[265:][:32] }
++func (h *headerSTAR) GroupName() []byte { return h[297:][:32] }
++func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] }
++func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] }
++func (h *headerSTAR) Prefix() []byte { return h[345:][:131] }
++func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
++func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
++func (h *headerSTAR) Trailer() []byte { return h[508:][:4] }
+
+ type headerUSTAR [blockSize]byte
+
+-func (h *headerUSTAR) v7() *headerV7 { return (*headerV7)(h) }
+-func (h *headerUSTAR) magic() []byte { return h[257:][:6] }
+-func (h *headerUSTAR) version() []byte { return h[263:][:2] }
+-func (h *headerUSTAR) userName() []byte { return h[265:][:32] }
+-func (h *headerUSTAR) groupName() []byte { return h[297:][:32] }
+-func (h *headerUSTAR) devMajor() []byte { return h[329:][:8] }
+-func (h *headerUSTAR) devMinor() []byte { return h[337:][:8] }
+-func (h *headerUSTAR) prefix() []byte { return h[345:][:155] }
++func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) }
++func (h *headerUSTAR) Magic() []byte { return h[257:][:6] }
++func (h *headerUSTAR) Version() []byte { return h[263:][:2] }
++func (h *headerUSTAR) UserName() []byte { return h[265:][:32] }
++func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
++func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] }
++func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] }
++func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] }
+
+ type sparseArray []byte
+
+-func (s sparseArray) entry(i int) sparseElem { return sparseElem(s[i*24:]) }
+-func (s sparseArray) isExtended() []byte { return s[24*s.maxEntries():][:1] }
+-func (s sparseArray) maxEntries() int { return len(s) / 24 }
++func (s sparseArray) Entry(i int) sparseElem { return sparseElem(s[i*24:]) }
++func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] }
++func (s sparseArray) MaxEntries() int { return len(s) / 24 }
+
+ type sparseElem []byte
+
+-func (s sparseElem) offset() []byte { return s[00:][:12] }
+-func (s sparseElem) length() []byte { return s[12:][:12] }
++func (s sparseElem) Offset() []byte { return s[00:][:12] }
++func (s sparseElem) Length() []byte { return s[12:][:12] }
+diff --git a/vendor/archive/tar/fuzz_test.go b/vendor/archive/tar/fuzz_test.go
+deleted file mode 100644
+index e73e0d2609..0000000000
+--- a/vendor/archive/tar/fuzz_test.go
++++ /dev/null
+@@ -1,80 +0,0 @@
+-// Copyright 2021 The Go Authors. All rights reserved.
+-// Use of this source code is governed by a BSD-style
+-// license that can be found in the LICENSE file.
+-
+-package tar
+-
+-import (
+- "bytes"
+- "io"
+- "testing"
+-)
+-
+-func FuzzReader(f *testing.F) {
+- b := bytes.NewBuffer(nil)
+- w := NewWriter(b)
+- inp := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")
+- err := w.WriteHeader(&Header{
+- Name: "lorem.txt",
+- Mode: 0600,
+- Size: int64(len(inp)),
+- })
+- if err != nil {
+- f.Fatalf("failed to create writer: %s", err)
+- }
+- _, err = w.Write(inp)
+- if err != nil {
+- f.Fatalf("failed to write file to archive: %s", err)
+- }
+- if err := w.Close(); err != nil {
+- f.Fatalf("failed to write archive: %s", err)
+- }
+- f.Add(b.Bytes())
+-
+- f.Fuzz(func(t *testing.T, b []byte) {
+- r := NewReader(bytes.NewReader(b))
+- type file struct {
+- header *Header
+- content []byte
+- }
+- files := []file{}
+- for {
+- hdr, err := r.Next()
+- if err == io.EOF {
+- break
+- }
+- if err != nil {
+- return
+- }
+- buf := bytes.NewBuffer(nil)
+- if _, err := io.Copy(buf, r); err != nil {
+- continue
+- }
+- files = append(files, file{header: hdr, content: buf.Bytes()})
+- }
+-
+- // If we were unable to read anything out of the archive don't
+- // bother trying to roundtrip it.
+- if len(files) == 0 {
+- return
+- }
+-
+- out := bytes.NewBuffer(nil)
+- w := NewWriter(out)
+- for _, f := range files {
+- if err := w.WriteHeader(f.header); err != nil {
+- t.Fatalf("unable to write previously parsed header: %s", err)
+- }
+- if _, err := w.Write(f.content); err != nil {
+- t.Fatalf("unable to write previously parsed content: %s", err)
+- }
+- }
+- if err := w.Close(); err != nil {
+- t.Fatalf("Unable to write archive: %s", err)
+- }
+-
+- // TODO: We may want to check if the archive roundtrips. This would require
+- // taking into account addition of the two zero trailer blocks that Writer.Close
+- // appends.
+- })
+-}
+diff --git a/vendor/archive/tar/reader.go b/vendor/archive/tar/reader.go
+index 45848304ed..ca32bdea1f 100644
+--- a/vendor/archive/tar/reader.go
++++ b/vendor/archive/tar/reader.go
+@@ -65,7 +65,7 @@ func (tr *Reader) next() (*Header, error) {
+ format := FormatUSTAR | FormatPAX | FormatGNU
+ for {
+ // Discard the remainder of the file and any padding.
+- if err := discard(tr.r, tr.curr.physicalRemaining()); err != nil {
++ if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil {
+ return nil, err
+ }
+ if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
+@@ -355,7 +355,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
+ }
+
+ // Verify the header matches a known format.
+- format := tr.blk.getFormat()
++ format := tr.blk.GetFormat()
+ if format == FormatUnknown {
+ return nil, nil, ErrHeader
+ }
+@@ -364,30 +364,30 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
+ hdr := new(Header)
+
+ // Unpack the V7 header.
+- v7 := tr.blk.toV7()
+- hdr.Typeflag = v7.typeFlag()[0]
+- hdr.Name = p.parseString(v7.name())
+- hdr.Linkname = p.parseString(v7.linkName())
+- hdr.Size = p.parseNumeric(v7.size())
+- hdr.Mode = p.parseNumeric(v7.mode())
+- hdr.Uid = int(p.parseNumeric(v7.uid()))
+- hdr.Gid = int(p.parseNumeric(v7.gid()))
+- hdr.ModTime = time.Unix(p.parseNumeric(v7.modTime()), 0)
++ v7 := tr.blk.V7()
++ hdr.Typeflag = v7.TypeFlag()[0]
++ hdr.Name = p.parseString(v7.Name())
++ hdr.Linkname = p.parseString(v7.LinkName())
++ hdr.Size = p.parseNumeric(v7.Size())
++ hdr.Mode = p.parseNumeric(v7.Mode())
++ hdr.Uid = int(p.parseNumeric(v7.UID()))
++ hdr.Gid = int(p.parseNumeric(v7.GID()))
++ hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
+
+ // Unpack format specific fields.
+ if format > formatV7 {
+- ustar := tr.blk.toUSTAR()
+- hdr.Uname = p.parseString(ustar.userName())
+- hdr.Gname = p.parseString(ustar.groupName())
+- hdr.Devmajor = p.parseNumeric(ustar.devMajor())
+- hdr.Devminor = p.parseNumeric(ustar.devMinor())
++ ustar := tr.blk.USTAR()
++ hdr.Uname = p.parseString(ustar.UserName())
++ hdr.Gname = p.parseString(ustar.GroupName())
++ hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
++ hdr.Devminor = p.parseNumeric(ustar.DevMinor())
+
+ var prefix string
+ switch {
+ case format.has(FormatUSTAR | FormatPAX):
+ hdr.Format = format
+- ustar := tr.blk.toUSTAR()
+- prefix = p.parseString(ustar.prefix())
++ ustar := tr.blk.USTAR()
++ prefix = p.parseString(ustar.Prefix())
+
+ // For Format detection, check if block is properly formatted since
+ // the parser is more liberal than what USTAR actually permits.
+@@ -396,23 +396,23 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
+ hdr.Format = FormatUnknown // Non-ASCII characters in block.
+ }
+ nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
+- if !(nul(v7.size()) && nul(v7.mode()) && nul(v7.uid()) && nul(v7.gid()) &&
+- nul(v7.modTime()) && nul(ustar.devMajor()) && nul(ustar.devMinor())) {
++ if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
++ nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
+ hdr.Format = FormatUnknown // Numeric fields must end in NUL
+ }
+ case format.has(formatSTAR):
+- star := tr.blk.toSTAR()
+- prefix = p.parseString(star.prefix())
+- hdr.AccessTime = time.Unix(p.parseNumeric(star.accessTime()), 0)
+- hdr.ChangeTime = time.Unix(p.parseNumeric(star.changeTime()), 0)
++ star := tr.blk.STAR()
++ prefix = p.parseString(star.Prefix())
++ hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
++ hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
+ case format.has(FormatGNU):
+ hdr.Format = format
+ var p2 parser
+- gnu := tr.blk.toGNU()
+- if b := gnu.accessTime(); b[0] != 0 {
++ gnu := tr.blk.GNU()
++ if b := gnu.AccessTime(); b[0] != 0 {
+ hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
+ }
+- if b := gnu.changeTime(); b[0] != 0 {
++ if b := gnu.ChangeTime(); b[0] != 0 {
+ hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
+ }
+
+@@ -439,8 +439,8 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
+ // See https://golang.org/issues/21005
+ if p2.err != nil {
+ hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
+- ustar := tr.blk.toUSTAR()
+- if s := p.parseString(ustar.prefix()); isASCII(s) {
++ ustar := tr.blk.USTAR()
++ if s := p.parseString(ustar.Prefix()); isASCII(s) {
+ prefix = s
+ }
+ hdr.Format = FormatUnknown // Buggy file is not GNU
+@@ -465,38 +465,38 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, err
+ // Make sure that the input format is GNU.
+ // Unfortunately, the STAR format also has a sparse header format that uses
+ // the same type flag but has a completely different layout.
+- if blk.getFormat() != FormatGNU {
++ if blk.GetFormat() != FormatGNU {
+ return nil, ErrHeader
+ }
+ hdr.Format.mayOnlyBe(FormatGNU)
+
+ var p parser
+- hdr.Size = p.parseNumeric(blk.toGNU().realSize())
++ hdr.Size = p.parseNumeric(blk.GNU().RealSize())
+ if p.err != nil {
+ return nil, p.err
+ }
+- s := blk.toGNU().sparse()
+- spd := make(sparseDatas, 0, s.maxEntries())
++ s := blk.GNU().Sparse()
++ spd := make(sparseDatas, 0, s.MaxEntries())
+ for {
+- for i := 0; i < s.maxEntries(); i++ {
++ for i := 0; i < s.MaxEntries(); i++ {
+ // This termination condition is identical to GNU and BSD tar.
+- if s.entry(i).offset()[0] == 0x00 {
++ if s.Entry(i).Offset()[0] == 0x00 {
+ break // Don't return, need to process extended headers (even if empty)
+ }
+- offset := p.parseNumeric(s.entry(i).offset())
+- length := p.parseNumeric(s.entry(i).length())
++ offset := p.parseNumeric(s.Entry(i).Offset())
++ length := p.parseNumeric(s.Entry(i).Length())
+ if p.err != nil {
+ return nil, p.err
+ }
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
+ }
+
+- if s.isExtended()[0] > 0 {
++ if s.IsExtended()[0] > 0 {
+ // There are more entries. Read an extension header and parse its entries.
+ if _, err := mustReadFull(tr.r, blk[:]); err != nil {
+ return nil, err
+ }
+- s = blk.toSparse()
++ s = blk.Sparse()
+ continue
+ }
+ return spd, nil // Done
+@@ -678,13 +678,11 @@ func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
+ return io.Copy(w, struct{ io.Reader }{fr})
+ }
+
+-// logicalRemaining implements fileState.logicalRemaining.
+-func (fr regFileReader) logicalRemaining() int64 {
++func (fr regFileReader) LogicalRemaining() int64 {
+ return fr.nb
+ }
+
+-// logicalRemaining implements fileState.physicalRemaining.
+-func (fr regFileReader) physicalRemaining() int64 {
++func (fr regFileReader) PhysicalRemaining() int64 {
+ return fr.nb
+ }
+
+@@ -696,9 +694,9 @@ type sparseFileReader struct {
+ }
+
+ func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
+- finished := int64(len(b)) >= sr.logicalRemaining()
++ finished := int64(len(b)) >= sr.LogicalRemaining()
+ if finished {
+- b = b[:sr.logicalRemaining()]
++ b = b[:sr.LogicalRemaining()]
+ }
+
+ b0 := b
+@@ -726,7 +724,7 @@ func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
+ return n, errMissData // Less data in dense file than sparse file
+ case err != nil:
+ return n, err
+- case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
++ case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+ return n, errUnrefData // More data in dense file than sparse file
+ case finished:
+ return n, io.EOF
+@@ -748,7 +746,7 @@ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
+
+ var writeLastByte bool
+ pos0 := sr.pos
+- for sr.logicalRemaining() > 0 && !writeLastByte && err == nil {
++ for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
+ var nf int64 // Size of fragment
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+ if sr.pos < holeStart { // In a data fragment
+@@ -756,7 +754,7 @@ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
+ nf, err = io.CopyN(ws, sr.fr, nf)
+ } else { // In a hole fragment
+ nf = holeEnd - sr.pos
+- if sr.physicalRemaining() == 0 {
++ if sr.PhysicalRemaining() == 0 {
+ writeLastByte = true
+ nf--
+ }
+@@ -781,18 +779,18 @@ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
+ return n, errMissData // Less data in dense file than sparse file
+ case err != nil:
+ return n, err
+- case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
++ case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+ return n, errUnrefData // More data in dense file than sparse file
+ default:
+ return n, nil
+ }
+ }
+
+-func (sr sparseFileReader) logicalRemaining() int64 {
++func (sr sparseFileReader) LogicalRemaining() int64 {
+ return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
+ }
+-func (sr sparseFileReader) physicalRemaining() int64 {
+- return sr.fr.physicalRemaining()
++func (sr sparseFileReader) PhysicalRemaining() int64 {
++ return sr.fr.PhysicalRemaining()
+ }
+
+ type zeroReader struct{}
+diff --git a/vendor/archive/tar/reader_test.go b/vendor/archive/tar/reader_test.go
+index 140c736429..5a644a43a4 100644
+--- a/vendor/archive/tar/reader_test.go
++++ b/vendor/archive/tar/reader_test.go
+@@ -1030,12 +1030,12 @@ func TestParsePAX(t *testing.T) {
+
+ func TestReadOldGNUSparseMap(t *testing.T) {
+ populateSparseMap := func(sa sparseArray, sps []string) []string {
+- for i := 0; len(sps) > 0 && i < sa.maxEntries(); i++ {
+- copy(sa.entry(i), sps[0])
++ for i := 0; len(sps) > 0 && i < sa.MaxEntries(); i++ {
++ copy(sa.Entry(i), sps[0])
+ sps = sps[1:]
+ }
+ if len(sps) > 0 {
+- copy(sa.isExtended(), "\x80")
++ copy(sa.IsExtended(), "\x80")
+ }
+ return sps
+ }
+@@ -1043,19 +1043,19 @@ func TestReadOldGNUSparseMap(t *testing.T) {
+ makeInput := func(format Format, size string, sps ...string) (out []byte) {
+ // Write the initial GNU header.
+ var blk block
+- gnu := blk.toGNU()
+- sparse := gnu.sparse()
+- copy(gnu.realSize(), size)
++ gnu := blk.GNU()
++ sparse := gnu.Sparse()
++ copy(gnu.RealSize(), size)
+ sps = populateSparseMap(sparse, sps)
+ if format != FormatUnknown {
+- blk.setFormat(format)
++ blk.SetFormat(format)
+ }
+ out = append(out, blk[:]...)
+
+ // Write extended sparse blocks.
+ for len(sps) > 0 {
+ var blk block
+- sps = populateSparseMap(blk.toSparse(), sps)
++ sps = populateSparseMap(blk.Sparse(), sps)
+ out = append(out, blk[:]...)
+ }
+ return out
+@@ -1368,11 +1368,11 @@ func TestFileReader(t *testing.T) {
+ wantCnt int64
+ wantErr error
+ }
+- testRemaining struct { // logicalRemaining() == wantLCnt, physicalRemaining() == wantPCnt
++ testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt
+ wantLCnt int64
+ wantPCnt int64
+ }
+- testFnc any // testRead | testWriteTo | testRemaining
++ testFnc interface{} // testRead | testWriteTo | testRemaining
+ )
+
+ type (
+@@ -1385,7 +1385,7 @@ func TestFileReader(t *testing.T) {
+ spd sparseDatas
+ size int64
+ }
+- fileMaker any // makeReg | makeSparse
++ fileMaker interface{} // makeReg | makeSparse
+ )
+
+ vectors := []struct {
+@@ -1605,11 +1605,11 @@ func TestFileReader(t *testing.T) {
+ t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
+ }
+ case testRemaining:
+- if got := fr.logicalRemaining(); got != tf.wantLCnt {
+- t.Errorf("test %d.%d, logicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
++ if got := fr.LogicalRemaining(); got != tf.wantLCnt {
++ t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
+ }
+- if got := fr.physicalRemaining(); got != tf.wantPCnt {
+- t.Errorf("test %d.%d, physicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
++ if got := fr.PhysicalRemaining(); got != tf.wantPCnt {
++ t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
+ }
+ default:
+ t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
+diff --git a/vendor/archive/tar/stat_actime1.go b/vendor/archive/tar/stat_actime1.go
+index c4c2480fee..4fdf2a04b3 100644
+--- a/vendor/archive/tar/stat_actime1.go
++++ b/vendor/archive/tar/stat_actime1.go
+@@ -3,6 +3,7 @@
+ // license that can be found in the LICENSE file.
+
+ //go:build aix || linux || dragonfly || openbsd || solaris
++// +build aix linux dragonfly openbsd solaris
+
+ package tar
+
+diff --git a/vendor/archive/tar/stat_actime2.go b/vendor/archive/tar/stat_actime2.go
+index f76d6be220..5a9a35cbb4 100644
+--- a/vendor/archive/tar/stat_actime2.go
++++ b/vendor/archive/tar/stat_actime2.go
+@@ -3,6 +3,7 @@
+ // license that can be found in the LICENSE file.
+
+ //go:build darwin || freebsd || netbsd
++// +build darwin freebsd netbsd
+
+ package tar
+
+diff --git a/vendor/archive/tar/strconv.go b/vendor/archive/tar/strconv.go
+index ac3196370e..fde45c9dbf 100644
+--- a/vendor/archive/tar/strconv.go
++++ b/vendor/archive/tar/strconv.go
+@@ -14,7 +14,7 @@ import (
+
+ // hasNUL reports whether the NUL character exists within s.
+ func hasNUL(s string) bool {
+- return strings.Contains(s, "\x00")
++ return strings.IndexByte(s, 0) >= 0
+ }
+
+ // isASCII reports whether the input is an ASCII C-style string.
+@@ -201,7 +201,10 @@ func parsePAXTime(s string) (time.Time, error) {
+ const maxNanoSecondDigits = 9
+
+ // Split string into seconds and sub-seconds parts.
+- ss, sn, _ := strings.Cut(s, ".")
++ ss, sn := s, ""
++ if pos := strings.IndexByte(s, '.'); pos >= 0 {
++ ss, sn = s[:pos], s[pos+1:]
++ }
+
+ // Parse the seconds.
+ secs, err := strconv.ParseInt(ss, 10, 64)
+@@ -251,32 +254,48 @@ func formatPAXTime(ts time.Time) (s string) {
+ // return the remainder as r.
+ func parsePAXRecord(s string) (k, v, r string, err error) {
+ // The size field ends at the first space.
+- nStr, rest, ok := strings.Cut(s, " ")
+- if !ok {
++ sp := strings.IndexByte(s, ' ')
++ if sp == -1 {
+ return "", "", s, ErrHeader
+ }
+
+ // Parse the first token as a decimal integer.
+- n, perr := strconv.ParseInt(nStr, 10, 0) // Intentionally parse as native int
+- if perr != nil || n < 5 || n > int64(len(s)) {
++ n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
++ if perr != nil || n < 5 || int64(len(s)) < n {
+ return "", "", s, ErrHeader
+ }
+- n -= int64(len(nStr) + 1) // convert from index in s to index in rest
+- if n <= 0 {
++
++ afterSpace := int64(sp + 1)
++ beforeLastNewLine := n - 1
++ // In some cases, "length" was perhaps padded/malformed, and
++ // trying to index past where the space supposedly is goes past
++ // the end of the actual record.
++ // For example:
++ // "0000000000000000000000000000000030 mtime=1432668921.098285006\n30 ctime=2147483649.15163319"
++ // ^ ^
++ // | |
++ // | afterSpace=35
++ // |
++ // beforeLastNewLine=29
++ // yet indexOf(firstSpace) MUST BE before endOfRecord.
++ //
++ // See https://golang.org/issues/40196.
++ if afterSpace >= beforeLastNewLine {
+ return "", "", s, ErrHeader
+ }
+
+ // Extract everything between the space and the final newline.
+- rec, nl, rem := rest[:n-1], rest[n-1:n], rest[n:]
++ rec, nl, rem := s[afterSpace:beforeLastNewLine], s[beforeLastNewLine:n], s[n:]
+ if nl != "\n" {
+ return "", "", s, ErrHeader
+ }
+
+ // The first equals separates the key from the value.
+- k, v, ok = strings.Cut(rec, "=")
+- if !ok {
++ eq := strings.IndexByte(rec, '=')
++ if eq == -1 {
+ return "", "", s, ErrHeader
+ }
++ k, v = rec[:eq], rec[eq+1:]
+
+ if !validPAXRecord(k, v) {
+ return "", "", s, ErrHeader
+@@ -315,7 +334,7 @@ func formatPAXRecord(k, v string) (string, error) {
+ // for the PAX version of the USTAR string fields.
+ // The key must not contain an '=' character.
+ func validPAXRecord(k, v string) bool {
+- if k == "" || strings.Contains(k, "=") {
++ if k == "" || strings.IndexByte(k, '=') >= 0 {
+ return false
+ }
+ switch k {
+diff --git a/vendor/archive/tar/tar_test.go b/vendor/archive/tar/tar_test.go
+index a476f5eb01..e9fafc7cc7 100644
+--- a/vendor/archive/tar/tar_test.go
++++ b/vendor/archive/tar/tar_test.go
+@@ -23,7 +23,7 @@ import (
+
+ type testError struct{ error }
+
+-type fileOps []any // []T where T is (string | int64)
++type fileOps []interface{} // []T where T is (string | int64)
+
+ // testFile is an io.ReadWriteSeeker where the IO operations performed
+ // on it must match the list of operations in ops.
+diff --git a/vendor/archive/tar/writer.go b/vendor/archive/tar/writer.go
+index 9b2e3e25d4..893eac00ae 100644
+--- a/vendor/archive/tar/writer.go
++++ b/vendor/archive/tar/writer.go
+@@ -50,7 +50,7 @@ func (tw *Writer) Flush() error {
+ if tw.err != nil {
+ return tw.err
+ }
+- if nb := tw.curr.logicalRemaining(); nb > 0 {
++ if nb := tw.curr.LogicalRemaining(); nb > 0 {
+ return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
+ }
+ if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
+@@ -117,8 +117,8 @@ func (tw *Writer) writeUSTARHeader(hdr *Header) error {
+ // Pack the main header.
+ var f formatter
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
+- f.formatString(blk.toUSTAR().prefix(), namePrefix)
+- blk.setFormat(FormatUSTAR)
++ f.formatString(blk.USTAR().Prefix(), namePrefix)
++ blk.SetFormat(FormatUSTAR)
+ if f.err != nil {
+ return f.err // Should never happen since header is validated
+ }
+@@ -211,7 +211,7 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+ var f formatter // Ignore errors since they are expected
+ fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
+ blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
+- blk.setFormat(FormatPAX)
++ blk.SetFormat(FormatPAX)
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+ return err
+ }
+@@ -253,10 +253,10 @@ func (tw *Writer) writeGNUHeader(hdr *Header) error {
+ var spb []byte
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
+ if !hdr.AccessTime.IsZero() {
+- f.formatNumeric(blk.toGNU().accessTime(), hdr.AccessTime.Unix())
++ f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
+ }
+ if !hdr.ChangeTime.IsZero() {
+- f.formatNumeric(blk.toGNU().changeTime(), hdr.ChangeTime.Unix())
++ f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
+ }
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+@@ -296,7 +296,7 @@ func (tw *Writer) writeGNUHeader(hdr *Header) error {
+ f.formatNumeric(blk.GNU().RealSize(), realSize)
+ }
+ */
+- blk.setFormat(FormatGNU)
++ blk.SetFormat(FormatGNU)
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+ return err
+ }
+@@ -324,28 +324,28 @@ type (
+ // The block returned is only valid until the next call to
+ // templateV7Plus or writeRawFile.
+ func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
+- tw.blk.reset()
++ tw.blk.Reset()
+
+ modTime := hdr.ModTime
+ if modTime.IsZero() {
+ modTime = time.Unix(0, 0)
+ }
+
+- v7 := tw.blk.toV7()
+- v7.typeFlag()[0] = hdr.Typeflag
+- fmtStr(v7.name(), hdr.Name)
+- fmtStr(v7.linkName(), hdr.Linkname)
+- fmtNum(v7.mode(), hdr.Mode)
+- fmtNum(v7.uid(), int64(hdr.Uid))
+- fmtNum(v7.gid(), int64(hdr.Gid))
+- fmtNum(v7.size(), hdr.Size)
+- fmtNum(v7.modTime(), modTime.Unix())
++ v7 := tw.blk.V7()
++ v7.TypeFlag()[0] = hdr.Typeflag
++ fmtStr(v7.Name(), hdr.Name)
++ fmtStr(v7.LinkName(), hdr.Linkname)
++ fmtNum(v7.Mode(), hdr.Mode)
++ fmtNum(v7.UID(), int64(hdr.Uid))
++ fmtNum(v7.GID(), int64(hdr.Gid))
++ fmtNum(v7.Size(), hdr.Size)
++ fmtNum(v7.ModTime(), modTime.Unix())
+
+- ustar := tw.blk.toUSTAR()
+- fmtStr(ustar.userName(), hdr.Uname)
+- fmtStr(ustar.groupName(), hdr.Gname)
+- fmtNum(ustar.devMajor(), hdr.Devmajor)
+- fmtNum(ustar.devMinor(), hdr.Devminor)
++ ustar := tw.blk.USTAR()
++ fmtStr(ustar.UserName(), hdr.Uname)
++ fmtStr(ustar.GroupName(), hdr.Gname)
++ fmtNum(ustar.DevMajor(), hdr.Devmajor)
++ fmtNum(ustar.DevMinor(), hdr.Devminor)
+
+ return &tw.blk
+ }
+@@ -354,7 +354,7 @@ func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum num
+ // It uses format to encode the header format and will write data as the body.
+ // It uses default values for all of the other fields (as BSD and GNU tar does).
+ func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
+- tw.blk.reset()
++ tw.blk.Reset()
+
+ // Best effort for the filename.
+ name = toASCII(name)
+@@ -364,15 +364,15 @@ func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) erro
+ name = strings.TrimRight(name, "/")
+
+ var f formatter
+- v7 := tw.blk.toV7()
+- v7.typeFlag()[0] = flag
+- f.formatString(v7.name(), name)
+- f.formatOctal(v7.mode(), 0)
+- f.formatOctal(v7.uid(), 0)
+- f.formatOctal(v7.gid(), 0)
+- f.formatOctal(v7.size(), int64(len(data))) // Must be < 8GiB
+- f.formatOctal(v7.modTime(), 0)
+- tw.blk.setFormat(format)
++ v7 := tw.blk.V7()
++ v7.TypeFlag()[0] = flag
++ f.formatString(v7.Name(), name)
++ f.formatOctal(v7.Mode(), 0)
++ f.formatOctal(v7.UID(), 0)
++ f.formatOctal(v7.GID(), 0)
++ f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
++ f.formatOctal(v7.ModTime(), 0)
++ tw.blk.SetFormat(format)
+ if f.err != nil {
+ return f.err // Only occurs if size condition is violated
+ }
+@@ -514,13 +514,10 @@ func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
+ return io.Copy(struct{ io.Writer }{fw}, r)
+ }
+
+-// logicalRemaining implements fileState.logicalRemaining.
+-func (fw regFileWriter) logicalRemaining() int64 {
++func (fw regFileWriter) LogicalRemaining() int64 {
+ return fw.nb
+ }
+-
+-// logicalRemaining implements fileState.physicalRemaining.
+-func (fw regFileWriter) physicalRemaining() int64 {
++func (fw regFileWriter) PhysicalRemaining() int64 {
+ return fw.nb
+ }
+
+@@ -532,9 +529,9 @@ type sparseFileWriter struct {
+ }
+
+ func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
+- overwrite := int64(len(b)) > sw.logicalRemaining()
++ overwrite := int64(len(b)) > sw.LogicalRemaining()
+ if overwrite {
+- b = b[:sw.logicalRemaining()]
++ b = b[:sw.LogicalRemaining()]
+ }
+
+ b0 := b
+@@ -562,7 +559,7 @@ func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
+ return n, errMissData // Not possible; implies bug in validation logic
+ case err != nil:
+ return n, err
+- case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
++ case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+ return n, errUnrefData // Not possible; implies bug in validation logic
+ case overwrite:
+ return n, ErrWriteTooLong
+@@ -584,12 +581,12 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
+
+ var readLastByte bool
+ pos0 := sw.pos
+- for sw.logicalRemaining() > 0 && !readLastByte && err == nil {
++ for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
+ var nf int64 // Size of fragment
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+ if sw.pos < dataStart { // In a hole fragment
+ nf = dataStart - sw.pos
+- if sw.physicalRemaining() == 0 {
++ if sw.PhysicalRemaining() == 0 {
+ readLastByte = true
+ nf--
+ }
+@@ -619,18 +616,18 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
+ return n, errMissData // Not possible; implies bug in validation logic
+ case err != nil:
+ return n, err
+- case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
++ case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+ return n, errUnrefData // Not possible; implies bug in validation logic
+ default:
+ return n, ensureEOF(rs)
+ }
+ }
+
+-func (sw sparseFileWriter) logicalRemaining() int64 {
++func (sw sparseFileWriter) LogicalRemaining() int64 {
+ return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
+ }
+-func (sw sparseFileWriter) physicalRemaining() int64 {
+- return sw.fw.physicalRemaining()
++func (sw sparseFileWriter) PhysicalRemaining() int64 {
++ return sw.fw.PhysicalRemaining()
+ }
+
+ // zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
+diff --git a/vendor/archive/tar/writer_test.go b/vendor/archive/tar/writer_test.go
+index 640264984a..4e709e5cac 100644
+--- a/vendor/archive/tar/writer_test.go
++++ b/vendor/archive/tar/writer_test.go
+@@ -67,7 +67,7 @@ func TestWriter(t *testing.T) {
+ testClose struct { // Close() == wantErr
+ wantErr error
+ }
+- testFnc any // testHeader | testWrite | testReadFrom | testClose
++ testFnc interface{} // testHeader | testWrite | testReadFrom | testClose
+ )
+
+ vectors := []struct {
+@@ -987,9 +987,11 @@ func TestIssue12594(t *testing.T) {
+ // The prefix field should never appear in the GNU format.
+ var blk block
+ copy(blk[:], b.Bytes())
+- prefix := string(blk.toUSTAR().prefix())
+- prefix, _, _ = strings.Cut(prefix, "\x00") // Truncate at the NUL terminator
+- if blk.getFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
++ prefix := string(blk.USTAR().Prefix())
++ if i := strings.IndexByte(prefix, 0); i >= 0 {
++ prefix = prefix[:i] // Truncate at the NUL terminator
++ }
++ if blk.GetFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
+ t.Errorf("test %d, found prefix in GNU format: %s", i, prefix)
+ }
+
+@@ -1054,11 +1056,11 @@ func TestFileWriter(t *testing.T) {
+ wantCnt int64
+ wantErr error
+ }
+- testRemaining struct { // logicalRemaining() == wantLCnt, physicalRemaining() == wantPCnt
++ testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt
+ wantLCnt int64
+ wantPCnt int64
+ }
+- testFnc any // testWrite | testReadFrom | testRemaining
++ testFnc interface{} // testWrite | testReadFrom | testRemaining
+ )
+
+ type (
+@@ -1071,7 +1073,7 @@ func TestFileWriter(t *testing.T) {
+ sph sparseHoles
+ size int64
+ }
+- fileMaker any // makeReg | makeSparse
++ fileMaker interface{} // makeReg | makeSparse
+ )
+
+ vectors := []struct {
+@@ -1317,11 +1319,11 @@ func TestFileWriter(t *testing.T) {
+ t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
+ }
+ case testRemaining:
+- if got := fw.logicalRemaining(); got != tf.wantLCnt {
+- t.Errorf("test %d.%d, logicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
++ if got := fw.LogicalRemaining(); got != tf.wantLCnt {
++ t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
+ }
+- if got := fw.physicalRemaining(); got != tf.wantPCnt {
+- t.Errorf("test %d.%d, physicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
++ if got := fw.PhysicalRemaining(); got != tf.wantPCnt {
++ t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
+ }
+ default:
+ t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
diff --git a/recipes-containers/docker/files/0003-builder.go-avoid-using-strings.Cut-from-go-1.18.patch b/recipes-containers/docker/files/0003-builder.go-avoid-using-strings.Cut-from-go-1.18.patch
new file mode 100644
index 00000000..0574d9ed
--- /dev/null
+++ b/recipes-containers/docker/files/0003-builder.go-avoid-using-strings.Cut-from-go-1.18.patch
@@ -0,0 +1,32 @@
+From 6867fc1f6bd01596c2d3dc7bc07e26fa98965185 Mon Sep 17 00:00:00 2001
+From: Martin Jansa <Martin.Jansa@gmail.com>
+Date: Mon, 14 Aug 2023 16:41:42 +0200
+Subject: [PATCH] builder.go: avoid using strings.Cut from go-1.18
+
+* we're still using go-1.17
+
+Upstream-Status: Inapropriate
+---
+ builder/builder-next/builder.go | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go
+index ee6b9f0fb1..a9bda8c370 100644
+--- a/builder/builder-next/builder.go
++++ b/builder/builder-next/builder.go
+@@ -555,10 +555,13 @@ func toBuildkitExtraHosts(inp []string, hostGatewayIP net.IP) (string, error) {
+ }
+ hosts := make([]string, 0, len(inp))
+ for _, h := range inp {
+- host, ip, ok := strings.Cut(h, ":")
+- if !ok || host == "" || ip == "" {
++ parts := strings.Split(h, ":")
++
++ if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
+ return "", errors.Errorf("invalid host %s", h)
+ }
++ host := parts[0]
++ ip := parts[1]
+ // If the IP Address is a "host-gateway", replace this value with the
+ // IP address stored in the daemon level HostGatewayIP config variable.
+ if ip == opts.HostGatewayName {
diff --git a/recipes-containers/kubernetes/kubernetes/0001-Makefile.generated_files-Fix-race-issue-for-installi.patch b/recipes-containers/kubernetes/kubernetes/0001-Makefile.generated_files-Fix-race-issue-for-installi.patch
index 02bb5e91..1b08b8c3 100644
--- a/recipes-containers/kubernetes/kubernetes/0001-Makefile.generated_files-Fix-race-issue-for-installi.patch
+++ b/recipes-containers/kubernetes/kubernetes/0001-Makefile.generated_files-Fix-race-issue-for-installi.patch
@@ -1,7 +1,7 @@
From 441df8a24a2c80e320f140b5d9bc352c7ce8a64a Mon Sep 17 00:00:00 2001
From: Robert Yang <liezhi.yang@windriver.com>
Date: Thu, 15 Oct 2020 07:27:35 +0000
-Subject: [PATCH] src/import/build/root/Makefile.generated_files: Fix race issue for installing
+Subject: [PATCH] src/import/build/root/Makefile.generated_files: Fix race issue for installing
go2make
The src/import/build/root/Makefile.generated_files are called several times during the build, so the
@@ -25,14 +25,14 @@ Upstream-Status: Pending
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
---
- src/import/build/root/Makefile.generated_files | 4 +++-
+ build/root/Makefile.generated_files | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
-Index: kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630/src/import/build/root/Makefile.generated_files
-===================================================================
---- kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630.orig/src/import/build/root/Makefile.generated_files
-+++ kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630/src/import/build/root/Makefile.generated_files
-@@ -67,7 +67,9 @@
+diff --git a/build/root/Makefile.generated_files b/build/root/Makefile.generated_files
+index d86a90cbb39..19a3d332476 100644
+--- a/build/root/Makefile.generated_files
++++ b/build/root/Makefile.generated_files
+@@ -67,7 +67,9 @@ $(META_DIR)/$(GO_PKGDEPS_FILE): FORCE
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: calculating Go dependencies"; \
fi
@@ -43,3 +43,5 @@ Index: kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630/src/import
hack/run-in-gopath.sh go2make \
k8s.io/kubernetes/... \
--prune k8s.io/kubernetes/staging \
+--
+2.40.0
diff --git a/recipes-containers/kubernetes/kubernetes/0001-build-golang.sh-convert-remaining-go-calls-to-use.patch b/recipes-containers/kubernetes/kubernetes/0001-build-golang.sh-convert-remaining-go-calls-to-use.patch
index 8adbafb3..00425c7d 100644
--- a/recipes-containers/kubernetes/kubernetes/0001-build-golang.sh-convert-remaining-go-calls-to-use.patch
+++ b/recipes-containers/kubernetes/kubernetes/0001-build-golang.sh-convert-remaining-go-calls-to-use.patch
@@ -8,11 +8,11 @@ Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
hack/lib/golang.sh | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
-diff --git a/src/import/hack/lib/golang.sh b/src/import/hack/lib/golang.sh
-index e9148ec08fa..71d3c987563 100755
---- a/src/import/hack/lib/golang.sh
-+++ b/src/import/hack/lib/golang.sh
-@@ -651,7 +651,7 @@ kube::golang::build_some_binaries() {
+diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh
+index d0f4b00dadf..cef0c2075a3 100755
+--- a/hack/lib/golang.sh
++++ b/hack/lib/golang.sh
+@@ -654,7 +654,7 @@ kube::golang::build_some_binaries() {
kube::golang::create_coverage_dummy_test "${package}"
kube::util::trap_add "kube::golang::delete_coverage_dummy_test \"${package}\"" EXIT
@@ -21,7 +21,7 @@ index e9148ec08fa..71d3c987563 100755
-covermode count \
-coverpkg k8s.io/...,k8s.io/kubernetes/vendor/k8s.io/... \
"${build_args[@]}" \
-@@ -663,13 +663,13 @@ kube::golang::build_some_binaries() {
+@@ -666,13 +666,13 @@ kube::golang::build_some_binaries() {
done
if [[ "${#uncovered[@]}" != 0 ]]; then
V=2 kube::log::info "Building ${uncovered[*]} without coverage..."
@@ -37,7 +37,7 @@ index e9148ec08fa..71d3c987563 100755
fi
}
-@@ -725,7 +725,7 @@ kube::golang::build_binaries_for_platform() {
+@@ -730,7 +730,7 @@ kube::golang::build_binaries_for_platform() {
testpkg=$(dirname "${test}")
mkdir -p "$(dirname "${outfile}")"
@@ -47,5 +47,5 @@ index e9148ec08fa..71d3c987563 100755
-gcflags "${gogcflags:-}" \
-asmflags "${goasmflags:-}" \
--
-2.19.1
+2.40.0
diff --git a/recipes-containers/kubernetes/kubernetes/0001-cross-don-t-build-tests-by-default.patch b/recipes-containers/kubernetes/kubernetes/0001-cross-don-t-build-tests-by-default.patch
index 659e3013..cd5e46f1 100644
--- a/recipes-containers/kubernetes/kubernetes/0001-cross-don-t-build-tests-by-default.patch
+++ b/recipes-containers/kubernetes/kubernetes/0001-cross-don-t-build-tests-by-default.patch
@@ -15,10 +15,10 @@ Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
hack/make-rules/cross.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
-diff --git a/src/import/hack/make-rules/cross.sh b/hack/make-rules/cross.sh
-index 8e1e938..0898c5c 100755
---- a/src/import/hack/make-rules/cross.sh
-+++ b/src/import/hack/make-rules/cross.sh
+diff --git a/hack/make-rules/cross.sh b/hack/make-rules/cross.sh
+index f8a6d0dbf5e..d22bf52b1cc 100755
+--- a/hack/make-rules/cross.sh
++++ b/hack/make-rules/cross.sh
@@ -33,6 +33,6 @@ make all WHAT="${KUBE_NODE_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_NODE_PLATFO
make all WHAT="${KUBE_CLIENT_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_CLIENT_PLATFORMS[*]}"
@@ -29,5 +29,5 @@ index 8e1e938..0898c5c 100755
-make all WHAT="${KUBE_TEST_SERVER_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_TEST_SERVER_PLATFORMS[*]}"
+#make all WHAT="${KUBE_TEST_SERVER_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_TEST_SERVER_PLATFORMS[*]}"
--
-2.7.4
+2.40.0
diff --git a/recipes-containers/kubernetes/kubernetes/0001-hack-lib-golang.sh-use-CC-from-environment.patch b/recipes-containers/kubernetes/kubernetes/0001-hack-lib-golang.sh-use-CC-from-environment.patch
index 3a22a2ef..8684a94a 100644
--- a/recipes-containers/kubernetes/kubernetes/0001-hack-lib-golang.sh-use-CC-from-environment.patch
+++ b/recipes-containers/kubernetes/kubernetes/0001-hack-lib-golang.sh-use-CC-from-environment.patch
@@ -11,11 +11,11 @@ Signed-off-by: Koen Kooi <koen.kooi@linaro.org>
hack/lib/golang.sh | 4 ----
1 file changed, 4 deletions(-)
-Index: kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630/src/import/hack/lib/golang.sh
-===================================================================
---- kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630.orig/src/import/hack/lib/golang.sh
-+++ kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630/src/import/hack/lib/golang.sh
-@@ -414,19 +414,15 @@
+diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh
+index e16a60d1867..d0f4b00dadf 100755
+--- a/hack/lib/golang.sh
++++ b/hack/lib/golang.sh
+@@ -420,19 +420,15 @@ kube::golang::set_platform_envs() {
;;
"linux/arm")
export CGO_ENABLED=1
@@ -35,3 +35,5 @@ Index: kubernetes-v1.21.1+git45da3fc33872083fb225c1a8c4d03e530d6f7630/src/import
;;
esac
fi
+--
+2.40.0
diff --git a/recipes-containers/kubernetes/kubernetes/CVE-2023-2431.patch b/recipes-containers/kubernetes/kubernetes/CVE-2023-2431.patch
new file mode 100644
index 00000000..56c3a6e1
--- /dev/null
+++ b/recipes-containers/kubernetes/kubernetes/CVE-2023-2431.patch
@@ -0,0 +1,863 @@
+From 73174f870735251e7d4240cdc36983d1bef7db5f Mon Sep 17 00:00:00 2001
+From: Craig Ingram <cjingram@google.com>
+Date: Fri, 24 Feb 2023 15:24:49 -0500
+Subject: [PATCH] Return error for localhost seccomp type with no localhost
+ profile defined
+
+CVE: CVE-2023-2431
+
+Upstream-Status: Backport [https://github.com/kubernetes/kubernetes/commit/73174f870735251e7d4240cdc36983d1bef7db5f]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ pkg/kubelet/kuberuntime/helpers.go | 66 ++--
+ pkg/kubelet/kuberuntime/helpers_test.go | 350 ++++--------------
+ .../kuberuntime_container_linux.go | 16 +-
+ .../kuberuntime_container_linux_test.go | 22 +-
+ pkg/kubelet/kuberuntime/security_context.go | 15 +-
+ 5 files changed, 153 insertions(+), 316 deletions(-)
+
+diff --git a/pkg/kubelet/kuberuntime/helpers.go b/pkg/kubelet/kuberuntime/helpers.go
+index fa580335cf8..b36e01166f8 100644
+--- a/pkg/kubelet/kuberuntime/helpers.go
++++ b/pkg/kubelet/kuberuntime/helpers.go
+@@ -209,28 +209,32 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.Runtim
+ return &kubecontainer.RuntimeStatus{Conditions: conditions}
+ }
+
+-func fieldProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) string {
++func fieldProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (string, error) {
+ if scmp == nil {
+ if fallbackToRuntimeDefault {
+- return v1.SeccompProfileRuntimeDefault
++ return v1.SeccompProfileRuntimeDefault, nil
+ }
+- return ""
++ return "", nil
+ }
+ if scmp.Type == v1.SeccompProfileTypeRuntimeDefault {
+- return v1.SeccompProfileRuntimeDefault
+- }
+- if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 {
+- fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile)
+- return v1.SeccompLocalhostProfileNamePrefix + fname
++ return v1.SeccompProfileRuntimeDefault, nil
++ }
++ if scmp.Type == v1.SeccompProfileTypeLocalhost {
++ if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 {
++ fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile)
++ return v1.SeccompLocalhostProfileNamePrefix + fname, nil
++ } else {
++ return "", fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.")
++ }
+ }
+ if scmp.Type == v1.SeccompProfileTypeUnconfined {
+- return v1.SeccompProfileNameUnconfined
++ return v1.SeccompProfileNameUnconfined, nil
+ }
+
+ if fallbackToRuntimeDefault {
+- return v1.SeccompProfileRuntimeDefault
++ return v1.SeccompProfileRuntimeDefault, nil
+ }
+- return ""
++ return "", nil
+ }
+
+ func annotationProfile(profile, profileRootPath string) string {
+@@ -243,7 +247,7 @@ func annotationProfile(profile, profileRootPath string) string {
+ }
+
+ func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string]string, containerName string,
+- podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) string {
++ podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (string, error) {
+ // container fields are applied first
+ if containerSecContext != nil && containerSecContext.SeccompProfile != nil {
+ return fieldProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault)
+@@ -252,7 +256,7 @@ func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string
+ // if container field does not exist, try container annotation (deprecated)
+ if containerName != "" {
+ if profile, ok := annotations[v1.SeccompContainerAnnotationKeyPrefix+containerName]; ok {
+- return annotationProfile(profile, m.seccompProfileRoot)
++ return annotationProfile(profile, m.seccompProfileRoot), nil
+ }
+ }
+
+@@ -263,46 +267,50 @@ func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string
+
+ // as last resort, try to apply pod annotation (deprecated)
+ if profile, ok := annotations[v1.SeccompPodAnnotationKey]; ok {
+- return annotationProfile(profile, m.seccompProfileRoot)
++ return annotationProfile(profile, m.seccompProfileRoot), nil
+ }
+
+ if fallbackToRuntimeDefault {
+- return v1.SeccompProfileRuntimeDefault
++ return v1.SeccompProfileRuntimeDefault, nil
+ }
+
+- return ""
++ return "", nil
+ }
+
+-func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile {
++func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) {
+ if scmp == nil {
+ if fallbackToRuntimeDefault {
+ return &runtimeapi.SecurityProfile{
+ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
+- }
++ }, nil
+ }
+ return &runtimeapi.SecurityProfile{
+ ProfileType: runtimeapi.SecurityProfile_Unconfined,
+- }
++ }, nil
+ }
+ if scmp.Type == v1.SeccompProfileTypeRuntimeDefault {
+ return &runtimeapi.SecurityProfile{
+ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
+- }
++ }, nil
+ }
+- if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 {
+- fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile)
+- return &runtimeapi.SecurityProfile{
+- ProfileType: runtimeapi.SecurityProfile_Localhost,
+- LocalhostRef: fname,
++ if scmp.Type == v1.SeccompProfileTypeLocalhost {
++ if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 {
++ fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile)
++ return &runtimeapi.SecurityProfile{
++ ProfileType: runtimeapi.SecurityProfile_Localhost,
++ LocalhostRef: fname,
++ }, nil
++ } else {
++ return nil, fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.")
+ }
+ }
+ return &runtimeapi.SecurityProfile{
+ ProfileType: runtimeapi.SecurityProfile_Unconfined,
+- }
++ }, nil
+ }
+
+ func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]string, containerName string,
+- podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile {
++ podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) {
+ // container fields are applied first
+ if containerSecContext != nil && containerSecContext.SeccompProfile != nil {
+ return fieldSeccompProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault)
+@@ -316,12 +324,12 @@ func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]str
+ if fallbackToRuntimeDefault {
+ return &runtimeapi.SecurityProfile{
+ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
+- }
++ }, nil
+ }
+
+ return &runtimeapi.SecurityProfile{
+ ProfileType: runtimeapi.SecurityProfile_Unconfined,
+- }
++ }, nil
+ }
+
+ func ipcNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
+diff --git a/pkg/kubelet/kuberuntime/helpers_test.go b/pkg/kubelet/kuberuntime/helpers_test.go
+index 25065f30411..70ad7250ce2 100644
+--- a/pkg/kubelet/kuberuntime/helpers_test.go
++++ b/pkg/kubelet/kuberuntime/helpers_test.go
+@@ -242,17 +242,18 @@ func TestFieldProfile(t *testing.T) {
+ scmpProfile *v1.SeccompProfile
+ rootPath string
+ expectedProfile string
++ expectedError string
+ }{
+ {
+ description: "no seccompProfile should return empty",
+ expectedProfile: "",
+ },
+ {
+- description: "type localhost without profile should return empty",
++ description: "type localhost without profile should return error",
+ scmpProfile: &v1.SeccompProfile{
+ Type: v1.SeccompProfileTypeLocalhost,
+ },
+- expectedProfile: "",
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+ description: "unknown type should return empty",
+@@ -279,7 +280,7 @@ func TestFieldProfile(t *testing.T) {
+ description: "SeccompProfileTypeLocalhost should return localhost",
+ scmpProfile: &v1.SeccompProfile{
+ Type: v1.SeccompProfileTypeLocalhost,
+- LocalhostProfile: utilpointer.StringPtr("profile.json"),
++ LocalhostProfile: utilpointer.String("profile.json"),
+ },
+ rootPath: "/test/",
+ expectedProfile: "localhost//test/profile.json",
+@@ -287,8 +288,13 @@ func TestFieldProfile(t *testing.T) {
+ }
+
+ for i, test := range tests {
+- seccompProfile := fieldProfile(test.scmpProfile, test.rootPath, false)
+- assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ seccompProfile, err := fieldProfile(test.scmpProfile, test.rootPath, false)
++ if test.expectedError != "" {
++ assert.EqualError(t, err, test.expectedError, "TestCase[%d]: %s", i, test.description)
++ } else {
++ assert.NoError(t, err, "TestCase[%d]: %s", i, test.description)
++ assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ }
+ }
+ }
+
+@@ -298,17 +304,18 @@ func TestFieldProfileDefaultSeccomp(t *testing.T) {
+ scmpProfile *v1.SeccompProfile
+ rootPath string
+ expectedProfile string
++ expectedError string
+ }{
+ {
+ description: "no seccompProfile should return runtime/default",
+ expectedProfile: v1.SeccompProfileRuntimeDefault,
+ },
+ {
+- description: "type localhost without profile should return runtime/default",
++ description: "type localhost without profile should return error",
+ scmpProfile: &v1.SeccompProfile{
+ Type: v1.SeccompProfileTypeLocalhost,
+ },
+- expectedProfile: v1.SeccompProfileRuntimeDefault,
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+ description: "unknown type should return runtime/default",
+@@ -335,7 +342,7 @@ func TestFieldProfileDefaultSeccomp(t *testing.T) {
+ description: "SeccompProfileTypeLocalhost should return localhost",
+ scmpProfile: &v1.SeccompProfile{
+ Type: v1.SeccompProfileTypeLocalhost,
+- LocalhostProfile: utilpointer.StringPtr("profile.json"),
++ LocalhostProfile: utilpointer.String("profile.json"),
+ },
+ rootPath: "/test/",
+ expectedProfile: "localhost//test/profile.json",
+@@ -343,8 +350,13 @@ func TestFieldProfileDefaultSeccomp(t *testing.T) {
+ }
+
+ for i, test := range tests {
+- seccompProfile := fieldProfile(test.scmpProfile, test.rootPath, true)
+- assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ seccompProfile, err := fieldProfile(test.scmpProfile, test.rootPath, true)
++ if test.expectedError != "" {
++ assert.EqualError(t, err, test.expectedError, "TestCase[%d]: %s", i, test.description)
++ } else {
++ assert.NoError(t, err, "TestCase[%d]: %s", i, test.description)
++ assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ }
+ }
+ }
+
+@@ -359,6 +371,7 @@ func TestGetSeccompProfilePath(t *testing.T) {
+ containerSc *v1.SecurityContext
+ containerName string
+ expectedProfile string
++ expectedError string
+ }{
+ {
+ description: "no seccomp should return empty",
+@@ -369,91 +382,6 @@ func TestGetSeccompProfilePath(t *testing.T) {
+ containerName: "container1",
+ expectedProfile: "",
+ },
+- {
+- description: "annotations: pod runtime/default seccomp profile should return runtime/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
+- },
+- expectedProfile: "runtime/default",
+- },
+- {
+- description: "annotations: pod docker/default seccomp profile should return docker/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.DeprecatedSeccompProfileDockerDefault,
+- },
+- expectedProfile: "docker/default",
+- },
+- {
+- description: "annotations: pod runtime/default seccomp profile with containerName should return runtime/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
+- },
+- containerName: "container1",
+- expectedProfile: "runtime/default",
+- },
+- {
+- description: "annotations: pod docker/default seccomp profile with containerName should return docker/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.DeprecatedSeccompProfileDockerDefault,
+- },
+- containerName: "container1",
+- expectedProfile: "docker/default",
+- },
+- {
+- description: "annotations: pod unconfined seccomp profile should return unconfined",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileNameUnconfined,
+- },
+- expectedProfile: "unconfined",
+- },
+- {
+- description: "annotations: pod unconfined seccomp profile with containerName should return unconfined",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileNameUnconfined,
+- },
+- containerName: "container1",
+- expectedProfile: "unconfined",
+- },
+- {
+- description: "annotations: pod localhost seccomp profile should return local profile path",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/chmod.json",
+- },
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: pod localhost seccomp profile with containerName should return local profile path",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/chmod.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: container localhost seccomp profile with containerName should return local profile path",
+- annotation: map[string]string{
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/chmod.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: container localhost seccomp profile should override pod profile",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileNameUnconfined,
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/chmod.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: container localhost seccomp profile with unmatched containerName should return empty",
+- annotation: map[string]string{
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/chmod.json",
+- },
+- containerName: "container2",
+- expectedProfile: "",
+- },
+ {
+ description: "pod seccomp profile set to unconfined returns unconfined",
+ podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}},
+@@ -480,14 +408,14 @@ func TestGetSeccompProfilePath(t *testing.T) {
+ expectedProfile: seccompLocalhostPath("filename"),
+ },
+ {
+- description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns empty",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: "",
++ description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+- description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns empty",
+- containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: "",
++ description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+ description: "container seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile",
+@@ -500,41 +428,16 @@ func TestGetSeccompProfilePath(t *testing.T) {
+ containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}},
+ expectedProfile: "runtime/default",
+ },
+- {
+- description: "prioritise container field over container annotation, pod field and pod annotation",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}},
+- containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-cont-profile.json")}},
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/annota-pod-profile.json",
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/annota-cont-profile.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("field-cont-profile.json"),
+- },
+- {
+- description: "prioritise container annotation over pod field",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}},
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/annota-pod-profile.json",
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/annota-cont-profile.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("annota-cont-profile.json"),
+- },
+- {
+- description: "prioritise pod field over pod annotation",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}},
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/annota-pod-profile.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("field-pod-profile.json"),
+- },
+ }
+
+ for i, test := range tests {
+- seccompProfile := m.getSeccompProfilePath(test.annotation, test.containerName, test.podSc, test.containerSc, false)
+- assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ seccompProfile, err := m.getSeccompProfilePath(test.annotation, test.containerName, test.podSc, test.containerSc, false)
++ if test.expectedError != "" {
++ assert.EqualError(t, err, test.expectedError, "TestCase[%d]: %s", i, test.description)
++ } else {
++ assert.NoError(t, err, "TestCase[%d]: %s", i, test.description)
++ assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ }
+ }
+ }
+
+@@ -549,6 +452,7 @@ func TestGetSeccompProfilePathDefaultSeccomp(t *testing.T) {
+ containerSc *v1.SecurityContext
+ containerName string
+ expectedProfile string
++ expectedError string
+ }{
+ {
+ description: "no seccomp should return runtime/default",
+@@ -559,91 +463,6 @@ func TestGetSeccompProfilePathDefaultSeccomp(t *testing.T) {
+ containerName: "container1",
+ expectedProfile: v1.SeccompProfileRuntimeDefault,
+ },
+- {
+- description: "annotations: pod runtime/default seccomp profile should return runtime/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
+- },
+- expectedProfile: v1.SeccompProfileRuntimeDefault,
+- },
+- {
+- description: "annotations: pod docker/default seccomp profile should return docker/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.DeprecatedSeccompProfileDockerDefault,
+- },
+- expectedProfile: "docker/default",
+- },
+- {
+- description: "annotations: pod runtime/default seccomp profile with containerName should return runtime/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
+- },
+- containerName: "container1",
+- expectedProfile: v1.SeccompProfileRuntimeDefault,
+- },
+- {
+- description: "annotations: pod docker/default seccomp profile with containerName should return docker/default",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.DeprecatedSeccompProfileDockerDefault,
+- },
+- containerName: "container1",
+- expectedProfile: "docker/default",
+- },
+- {
+- description: "annotations: pod unconfined seccomp profile should return unconfined",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileNameUnconfined,
+- },
+- expectedProfile: "unconfined",
+- },
+- {
+- description: "annotations: pod unconfined seccomp profile with containerName should return unconfined",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileNameUnconfined,
+- },
+- containerName: "container1",
+- expectedProfile: "unconfined",
+- },
+- {
+- description: "annotations: pod localhost seccomp profile should return local profile path",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/chmod.json",
+- },
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: pod localhost seccomp profile with containerName should return local profile path",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/chmod.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: container localhost seccomp profile with containerName should return local profile path",
+- annotation: map[string]string{
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/chmod.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: container localhost seccomp profile should override pod profile",
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: v1.SeccompProfileNameUnconfined,
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/chmod.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("chmod.json"),
+- },
+- {
+- description: "annotations: container localhost seccomp profile with unmatched containerName should return runtime/default",
+- annotation: map[string]string{
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/chmod.json",
+- },
+- containerName: "container2",
+- expectedProfile: v1.SeccompProfileRuntimeDefault,
+- },
+ {
+ description: "pod seccomp profile set to unconfined returns unconfined",
+ podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}},
+@@ -670,14 +489,14 @@ func TestGetSeccompProfilePathDefaultSeccomp(t *testing.T) {
+ expectedProfile: seccompLocalhostPath("filename"),
+ },
+ {
+- description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns runtime/default",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: v1.SeccompProfileRuntimeDefault,
++ description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+- description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns runtime/default",
+- containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: v1.SeccompProfileRuntimeDefault,
++ description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+ description: "container seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile",
+@@ -690,41 +509,16 @@ func TestGetSeccompProfilePathDefaultSeccomp(t *testing.T) {
+ containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}},
+ expectedProfile: "runtime/default",
+ },
+- {
+- description: "prioritise container field over container annotation, pod field and pod annotation",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}},
+- containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-cont-profile.json")}},
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/annota-pod-profile.json",
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/annota-cont-profile.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("field-cont-profile.json"),
+- },
+- {
+- description: "prioritise container annotation over pod field",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}},
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/annota-pod-profile.json",
+- v1.SeccompContainerAnnotationKeyPrefix + "container1": "localhost/annota-cont-profile.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("annota-cont-profile.json"),
+- },
+- {
+- description: "prioritise pod field over pod annotation",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}},
+- annotation: map[string]string{
+- v1.SeccompPodAnnotationKey: "localhost/annota-pod-profile.json",
+- },
+- containerName: "container1",
+- expectedProfile: seccompLocalhostPath("field-pod-profile.json"),
+- },
+ }
+
+ for i, test := range tests {
+- seccompProfile := m.getSeccompProfilePath(test.annotation, test.containerName, test.podSc, test.containerSc, true)
+- assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ seccompProfile, err := m.getSeccompProfilePath(test.annotation, test.containerName, test.podSc, test.containerSc, true)
++ if test.expectedError != "" {
++ assert.EqualError(t, err, test.expectedError, "TestCase[%d]: %s", i, test.description)
++ } else {
++ assert.NoError(t, err, "TestCase[%d]: %s", i, test.description)
++ assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ }
+ }
+ }
+
+@@ -747,6 +541,7 @@ func TestGetSeccompProfile(t *testing.T) {
+ containerSc *v1.SecurityContext
+ containerName string
+ expectedProfile *runtimeapi.SecurityProfile
++ expectedError string
+ }{
+ {
+ description: "no seccomp should return unconfined",
+@@ -781,14 +576,14 @@ func TestGetSeccompProfile(t *testing.T) {
+ },
+ },
+ {
+- description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns unconfined",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: unconfinedProfile,
++ description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+- description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns unconfined",
+- containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: unconfinedProfile,
++ description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+ description: "container seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile",
+@@ -817,8 +612,13 @@ func TestGetSeccompProfile(t *testing.T) {
+ }
+
+ for i, test := range tests {
+- seccompProfile := m.getSeccompProfile(test.annotation, test.containerName, test.podSc, test.containerSc, false)
+- assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ seccompProfile, err := m.getSeccompProfile(test.annotation, test.containerName, test.podSc, test.containerSc, false)
++ if test.expectedError != "" {
++ assert.EqualError(t, err, test.expectedError, "TestCase[%d]: %s", i, test.description)
++ } else {
++ assert.NoError(t, err, "TestCase[%d]: %s", i, test.description)
++ assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ }
+ }
+ }
+
+@@ -841,6 +641,7 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) {
+ containerSc *v1.SecurityContext
+ containerName string
+ expectedProfile *runtimeapi.SecurityProfile
++ expectedError string
+ }{
+ {
+ description: "no seccomp should return RuntimeDefault",
+@@ -875,14 +676,14 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) {
+ },
+ },
+ {
+- description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns unconfined",
+- podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: unconfinedProfile,
++ description: "pod seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+- description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns unconfined",
+- containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
+- expectedProfile: unconfinedProfile,
++ description: "container seccomp profile set to SeccompProfileTypeLocalhost with empty LocalhostProfile returns error",
++ containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost}},
++ expectedError: "localhostProfile must be set if seccompProfile type is Localhost.",
+ },
+ {
+ description: "container seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile",
+@@ -911,8 +712,13 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) {
+ }
+
+ for i, test := range tests {
+- seccompProfile := m.getSeccompProfile(test.annotation, test.containerName, test.podSc, test.containerSc, true)
+- assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ seccompProfile, err := m.getSeccompProfile(test.annotation, test.containerName, test.podSc, test.containerSc, true)
++ if test.expectedError != "" {
++ assert.EqualError(t, err, test.expectedError, "TestCase[%d]: %s", i, test.description)
++ } else {
++ assert.NoError(t, err, "TestCase[%d]: %s", i, test.description)
++ assert.Equal(t, test.expectedProfile, seccompProfile, "TestCase[%d]: %s", i, test.description)
++ }
+ }
+ }
+
+diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
+index 6cb9e54729e..54670673bcd 100644
+--- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
++++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
+@@ -46,15 +46,23 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config
+ libcontainercgroups.IsCgroup2UnifiedMode() {
+ enforceMemoryQoS = true
+ }
+- config.Linux = m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget, enforceMemoryQoS)
++ cl, err := m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget, enforceMemoryQoS)
++ if err != nil {
++ return err
++ }
++ config.Linux = cl
+ return nil
+ }
+
+ // generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
+-func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) *runtimeapi.LinuxContainerConfig {
++func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) (*runtimeapi.LinuxContainerConfig, error) {
++ sc, err := m.determineEffectiveSecurityContext(pod, container, uid, username)
++ if err != nil {
++ return nil, err
++ }
+ lc := &runtimeapi.LinuxContainerConfig{
+ Resources: &runtimeapi.LinuxContainerResources{},
+- SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
++ SecurityContext: sc,
+ }
+
+ if nsTarget != nil && lc.SecurityContext.NamespaceOptions.Pid == runtimeapi.NamespaceMode_CONTAINER {
+@@ -125,7 +133,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
+ }
+ }
+
+- return lc
++ return lc, nil
+ }
+
+ // calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits
+diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
+index 46817e00fb0..98f635cc932 100644
+--- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
++++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
+@@ -47,6 +47,8 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
+ restartCountUint32 := uint32(restartCount)
+ envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
+
++ l, _ := m.generateLinuxContainerConfig(container, pod, new(int64), "", nil, enforceMemoryQoS)
++
+ expectedConfig := &runtimeapi.ContainerConfig{
+ Metadata: &runtimeapi.ContainerMetadata{
+ Name: container.Name,
+@@ -64,7 +66,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
+ Stdin: container.Stdin,
+ StdinOnce: container.StdinOnce,
+ Tty: container.TTY,
+- Linux: m.generateLinuxContainerConfig(container, pod, new(int64), "", nil, enforceMemoryQoS),
++ Linux: l,
+ Envs: envs,
+ }
+ return expectedConfig
+@@ -215,7 +217,8 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
+ },
+ }
+
+- linuxConfig := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false)
++ linuxConfig, err := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false)
++ assert.NoError(t, err)
+ assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name)
+ assert.Equal(t, test.expected.CpuQuota, linuxConfig.GetResources().CpuQuota, test.name)
+ assert.Equal(t, test.expected.CpuShares, linuxConfig.GetResources().CpuShares, test.name)
+@@ -329,6 +332,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
+ memoryLow int64
+ memoryHigh int64
+ }
++ l1, _ := m.generateLinuxContainerConfig(&pod1.Spec.Containers[0], pod1, new(int64), "", nil, true)
++ l2, _ := m.generateLinuxContainerConfig(&pod2.Spec.Containers[0], pod2, new(int64), "", nil, true)
+ tests := []struct {
+ name string
+ pod *v1.Pod
+@@ -338,7 +343,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
+ name: "Request128MBLimit256MB",
+ pod: pod1,
+ expected: &expectedResult{
+- m.generateLinuxContainerConfig(&pod1.Spec.Containers[0], pod1, new(int64), "", nil, true),
++ l1,
+ 128 * 1024 * 1024,
+ int64(float64(256*1024*1024) * m.memoryThrottlingFactor),
+ },
+@@ -347,7 +352,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
+ name: "Request128MBWithoutLimit",
+ pod: pod2,
+ expected: &expectedResult{
+- m.generateLinuxContainerConfig(&pod2.Spec.Containers[0], pod2, new(int64), "", nil, true),
++ l2,
+ 128 * 1024 * 1024,
+ int64(pod2MemoryHigh),
+ },
+@@ -355,7 +360,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
+ }
+
+ for _, test := range tests {
+- linuxConfig := m.generateLinuxContainerConfig(&test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true)
++ linuxConfig, err := m.generateLinuxContainerConfig(&test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true)
++ assert.NoError(t, err)
+ assert.Equal(t, test.expected.containerConfig, linuxConfig, test.name)
+ assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.min"], strconv.FormatInt(test.expected.memoryLow, 10), test.name)
+ assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.high"], strconv.FormatInt(test.expected.memoryHigh, 10), test.name)
+@@ -578,7 +584,8 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) {
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+- got := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false)
++ got, err := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false)
++ assert.NoError(t, err)
+ if diff := cmp.Diff(tc.want, got.SecurityContext.NamespaceOptions); diff != "" {
+ t.Errorf("%v: diff (-want +got):\n%v", t.Name(), diff)
+ }
+@@ -669,7 +676,8 @@ func TestGenerateLinuxContainerConfigSwap(t *testing.T) {
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ m.memorySwapBehavior = tc.swapSetting
+- actual := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", nil, false)
++ actual, err := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", nil, false)
++ assert.NoError(t, err)
+ assert.Equal(t, tc.expected, actual.Resources.MemorySwapLimitInBytes, "memory swap config for %s", tc.name)
+ })
+ }
+diff --git a/pkg/kubelet/kuberuntime/security_context.go b/pkg/kubelet/kuberuntime/security_context.go
+index c9d33e44305..3b575c8e974 100644
+--- a/pkg/kubelet/kuberuntime/security_context.go
++++ b/pkg/kubelet/kuberuntime/security_context.go
+@@ -24,7 +24,7 @@ import (
+ )
+
+ // determineEffectiveSecurityContext gets container's security context from v1.Pod and v1.Container.
+-func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username string) *runtimeapi.LinuxContainerSecurityContext {
++func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username string) (*runtimeapi.LinuxContainerSecurityContext, error) {
+ effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
+ synthesized := convertToRuntimeSecurityContext(effectiveSc)
+ if synthesized == nil {
+@@ -36,9 +36,16 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
+
+ // TODO: Deprecated, remove after we switch to Seccomp field
+ // set SeccompProfilePath.
+- synthesized.SeccompProfilePath = m.getSeccompProfilePath(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault)
++ var err error
++ synthesized.SeccompProfilePath, err = m.getSeccompProfilePath(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault)
++ if err != nil {
++ return nil, err
++ }
+
+- synthesized.Seccomp = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault)
++ synthesized.Seccomp, err = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault)
++ if err != nil {
++ return nil, err
++ }
+
+ // set ApparmorProfile.
+ synthesized.ApparmorProfile = apparmor.GetProfileNameFromPodAnnotations(pod.Annotations, container.Name)
+@@ -74,7 +81,7 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
+ synthesized.MaskedPaths = securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount)
+ synthesized.ReadonlyPaths = securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount)
+
+- return synthesized
++ return synthesized, nil
+ }
+
+ // convertToRuntimeSecurityContext converts v1.SecurityContext to runtimeapi.SecurityContext.
+--
+2.40.0
diff --git a/recipes-containers/kubernetes/kubernetes/CVE-2023-2727-CVE-2023-2728.patch b/recipes-containers/kubernetes/kubernetes/CVE-2023-2727-CVE-2023-2728.patch
new file mode 100644
index 00000000..2a9e8489
--- /dev/null
+++ b/recipes-containers/kubernetes/kubernetes/CVE-2023-2727-CVE-2023-2728.patch
@@ -0,0 +1,559 @@
+From f754a4dee31455a0d7fc0f51cb85348af9ea5e1f Mon Sep 17 00:00:00 2001
+From: Rita Zhang <rita.z.zhang@gmail.com>
+Date: Tue, 30 May 2023 20:35:33 +0000
+Subject: [PATCH] Add ephemeralcontainer to imagepolicy securityaccount
+ admission plugin
+
+Signed-off-by: Rita Zhang <rita.z.zhang@gmail.com>
+
+CVE: CVE-2023-2727, CVE-2023-2728
+
+Upstream-Status: Backport [https://github.com/kubernetes/kubernetes/commit/f754a4dee31455a0d7fc0f51cb85348af9ea5e1f]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ plugin/pkg/admission/imagepolicy/admission.go | 28 ++--
+ .../admission/imagepolicy/admission_test.go | 148 +++++++++++++++++-
+ .../pkg/admission/serviceaccount/admission.go | 57 ++++++-
+ .../serviceaccount/admission_test.go | 88 +++++++++++
+ 4 files changed, 297 insertions(+), 24 deletions(-)
+
+diff --git a/plugin/pkg/admission/imagepolicy/admission.go b/plugin/pkg/admission/imagepolicy/admission.go
+index aea4f713eb5..3dfcbf95eef 100644
+--- a/plugin/pkg/admission/imagepolicy/admission.go
++++ b/plugin/pkg/admission/imagepolicy/admission.go
+@@ -46,6 +46,7 @@ import (
+
+ // PluginName indicates name of admission plugin.
+ const PluginName = "ImagePolicyWebhook"
++const ephemeralcontainers = "ephemeralcontainers"
+
+ // AuditKeyPrefix is used as the prefix for all audit keys handled by this
+ // pluggin. Some well known suffixes are listed below.
+@@ -132,8 +133,9 @@ func (a *Plugin) webhookError(pod *api.Pod, attributes admission.Attributes, err
+
+ // Validate makes an admission decision based on the request attributes
+ func (a *Plugin) Validate(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) (err error) {
+- // Ignore all calls to subresources or resources other than pods.
+- if attributes.GetSubresource() != "" || attributes.GetResource().GroupResource() != api.Resource("pods") {
++ // Ignore all calls to subresources other than ephemeralcontainers or calls to resources other than pods.
++ subresource := attributes.GetSubresource()
++ if (subresource != "" && subresource != ephemeralcontainers) || attributes.GetResource().GroupResource() != api.Resource("pods") {
+ return nil
+ }
+
+@@ -144,13 +146,21 @@ func (a *Plugin) Validate(ctx context.Context, attributes admission.Attributes,
+
+ // Build list of ImageReviewContainerSpec
+ var imageReviewContainerSpecs []v1alpha1.ImageReviewContainerSpec
+- containers := make([]api.Container, 0, len(pod.Spec.Containers)+len(pod.Spec.InitContainers))
+- containers = append(containers, pod.Spec.Containers...)
+- containers = append(containers, pod.Spec.InitContainers...)
+- for _, c := range containers {
+- imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{
+- Image: c.Image,
+- })
++ if subresource == "" {
++ containers := make([]api.Container, 0, len(pod.Spec.Containers)+len(pod.Spec.InitContainers))
++ containers = append(containers, pod.Spec.Containers...)
++ containers = append(containers, pod.Spec.InitContainers...)
++ for _, c := range containers {
++ imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{
++ Image: c.Image,
++ })
++ }
++ } else if subresource == ephemeralcontainers {
++ for _, c := range pod.Spec.EphemeralContainers {
++ imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{
++ Image: c.Image,
++ })
++ }
+ }
+ imageReview := v1alpha1.ImageReview{
+ Spec: v1alpha1.ImageReviewSpec{
+diff --git a/plugin/pkg/admission/imagepolicy/admission_test.go b/plugin/pkg/admission/imagepolicy/admission_test.go
+index d1f81d51950..a9188462fb9 100644
+--- a/plugin/pkg/admission/imagepolicy/admission_test.go
++++ b/plugin/pkg/admission/imagepolicy/admission_test.go
+@@ -37,7 +37,6 @@ import (
+ api "k8s.io/kubernetes/pkg/apis/core"
+
+ "fmt"
+- "io/ioutil"
+ "os"
+ "path/filepath"
+ "text/template"
+@@ -67,7 +66,7 @@ imagePolicy:
+ `
+
+ func TestNewFromConfig(t *testing.T) {
+- dir, err := ioutil.TempDir("", "")
++ dir, err := os.MkdirTemp("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+@@ -92,7 +91,7 @@ func TestNewFromConfig(t *testing.T) {
+ {data.Key, clientKey},
+ }
+ for _, file := range files {
+- if err := ioutil.WriteFile(file.name, file.data, 0400); err != nil {
++ if err := os.WriteFile(file.name, file.data, 0400); err != nil {
+ t.Fatal(err)
+ }
+ }
+@@ -196,7 +195,7 @@ current-context: default
+ // Use a closure so defer statements trigger between loop iterations.
+ t.Run(tt.msg, func(t *testing.T) {
+ err := func() error {
+- tempfile, err := ioutil.TempFile("", "")
++ tempfile, err := os.CreateTemp("", "")
+ if err != nil {
+ return err
+ }
+@@ -211,7 +210,7 @@ current-context: default
+ return fmt.Errorf("failed to execute test template: %v", err)
+ }
+
+- tempconfigfile, err := ioutil.TempFile("", "")
++ tempconfigfile, err := os.CreateTemp("", "")
+ if err != nil {
+ return err
+ }
+@@ -359,7 +358,7 @@ func (m *mockService) HTTPStatusCode() int { return m.statusCode }
+ // newImagePolicyWebhook creates a temporary kubeconfig file from the provided arguments and attempts to load
+ // a new newImagePolicyWebhook from it.
+ func newImagePolicyWebhook(callbackURL string, clientCert, clientKey, ca []byte, cacheTime time.Duration, defaultAllow bool) (*Plugin, error) {
+- tempfile, err := ioutil.TempFile("", "")
++ tempfile, err := os.CreateTemp("", "")
+ if err != nil {
+ return nil, err
+ }
+@@ -381,7 +380,7 @@ func newImagePolicyWebhook(callbackURL string, clientCert, clientKey, ca []byte,
+ return nil, err
+ }
+
+- tempconfigfile, err := ioutil.TempFile("", "")
++ tempconfigfile, err := os.CreateTemp("", "")
+ if err != nil {
+ return nil, err
+ }
+@@ -595,17 +594,23 @@ func TestContainerCombinations(t *testing.T) {
+ test string
+ pod *api.Pod
+ wantAllowed, wantErr bool
++ subresource string
++ operation admission.Operation
+ }{
+ {
+ test: "Single container allowed",
+ pod: goodPod("good"),
+ wantAllowed: true,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "Single container denied",
+ pod: goodPod("bad"),
+ wantAllowed: false,
+ wantErr: true,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "One good container, one bad",
+@@ -627,6 +632,8 @@ func TestContainerCombinations(t *testing.T) {
+ },
+ wantAllowed: false,
+ wantErr: true,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "Multiple good containers",
+@@ -648,6 +655,8 @@ func TestContainerCombinations(t *testing.T) {
+ },
+ wantAllowed: true,
+ wantErr: false,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "Multiple bad containers",
+@@ -669,6 +678,8 @@ func TestContainerCombinations(t *testing.T) {
+ },
+ wantAllowed: false,
+ wantErr: true,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "Good container, bad init container",
+@@ -692,6 +703,8 @@ func TestContainerCombinations(t *testing.T) {
+ },
+ wantAllowed: false,
+ wantErr: true,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "Bad container, good init container",
+@@ -715,6 +728,8 @@ func TestContainerCombinations(t *testing.T) {
+ },
+ wantAllowed: false,
+ wantErr: true,
++ subresource: "",
++ operation: admission.Create,
+ },
+ {
+ test: "Good container, good init container",
+@@ -738,6 +753,123 @@ func TestContainerCombinations(t *testing.T) {
+ },
+ wantAllowed: true,
+ wantErr: false,
++ subresource: "",
++ operation: admission.Create,
++ },
++ {
++ test: "Good container, good init container, bad ephemeral container when updating ephemeralcontainers subresource",
++ pod: &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: "default",
++ SecurityContext: &api.PodSecurityContext{},
++ Containers: []api.Container{
++ {
++ Image: "good",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ InitContainers: []api.Container{
++ {
++ Image: "good",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ EphemeralContainers: []api.EphemeralContainer{
++ {
++ EphemeralContainerCommon: api.EphemeralContainerCommon{
++ Image: "bad",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ },
++ },
++ },
++ wantAllowed: false,
++ wantErr: true,
++ subresource: "ephemeralcontainers",
++ operation: admission.Update,
++ },
++ {
++ test: "Good container, good init container, bad ephemeral container when updating subresource=='' which sets initContainer and container only",
++ pod: &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: "default",
++ SecurityContext: &api.PodSecurityContext{},
++ Containers: []api.Container{
++ {
++ Image: "good",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ InitContainers: []api.Container{
++ {
++ Image: "good",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ EphemeralContainers: []api.EphemeralContainer{
++ {
++ EphemeralContainerCommon: api.EphemeralContainerCommon{
++ Image: "bad",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ },
++ },
++ },
++ wantAllowed: true,
++ wantErr: false,
++ subresource: "",
++ operation: admission.Update,
++ },
++
++ {
++ test: "Bad container, good ephemeral container when updating subresource=='ephemeralcontainers' which sets ephemeralcontainers only",
++ pod: &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: "default",
++ SecurityContext: &api.PodSecurityContext{},
++ Containers: []api.Container{
++ {
++ Image: "bad",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ EphemeralContainers: []api.EphemeralContainer{
++ {
++ EphemeralContainerCommon: api.EphemeralContainerCommon{
++ Image: "good",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ },
++ },
++ },
++ wantAllowed: true,
++ wantErr: false,
++ subresource: "ephemeralcontainers",
++ operation: admission.Update,
++ },
++ {
++ test: "Good ephemeral container",
++ pod: &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: "default",
++ SecurityContext: &api.PodSecurityContext{},
++ EphemeralContainers: []api.EphemeralContainer{
++ {
++ EphemeralContainerCommon: api.EphemeralContainerCommon{
++ Image: "good",
++ SecurityContext: &api.SecurityContext{},
++ },
++ },
++ },
++ },
++ },
++ wantAllowed: true,
++ wantErr: false,
++ subresource: "ephemeralcontainers",
++ operation: admission.Update,
+ },
+ }
+ for _, tt := range tests {
+@@ -759,7 +891,7 @@ func TestContainerCombinations(t *testing.T) {
+ return
+ }
+
+- attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{})
++ attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), tt.subresource, tt.operation, &metav1.CreateOptions{}, false, &user.DefaultInfo{})
+
+ err = wh.Validate(context.TODO(), attr, nil)
+ if tt.wantAllowed {
+diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go
+index 035d54ea8ea..f6e25f3c19d 100644
+--- a/plugin/pkg/admission/serviceaccount/admission.go
++++ b/plugin/pkg/admission/serviceaccount/admission.go
+@@ -100,7 +100,7 @@ var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&Plugin{})
+ // 5. If MountServiceAccountToken is true, it adds a VolumeMount with the pod's ServiceAccount's api token secret to containers
+ func NewServiceAccount() *Plugin {
+ return &Plugin{
+- Handler: admission.NewHandler(admission.Create),
++ Handler: admission.NewHandler(admission.Create, admission.Update),
+ // TODO: enable this once we've swept secret usage to account for adding secret references to service accounts
+ LimitSecretReferences: false,
+ // Auto mount service account API token secrets
+@@ -140,7 +140,10 @@ func (s *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
+ if shouldIgnore(a) {
+ return nil
+ }
+-
++ if a.GetOperation() != admission.Create {
++ // we only mutate pods during create requests
++ return nil
++ }
+ pod := a.GetObject().(*api.Pod)
+
+ // Don't modify the spec of mirror pods.
+@@ -157,7 +160,7 @@ func (s *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
+
+ serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName)
+ if err != nil {
+- return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err))
++ return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %w", a.GetNamespace(), pod.Spec.ServiceAccountName, err))
+ }
+ if s.MountServiceAccountToken && shouldAutomount(serviceAccount, pod) {
+ s.mountServiceAccountToken(serviceAccount, pod)
+@@ -180,6 +183,15 @@ func (s *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi
+
+ pod := a.GetObject().(*api.Pod)
+
++ if a.GetOperation() == admission.Update && a.GetSubresource() == "ephemeralcontainers" {
++ return s.limitEphemeralContainerSecretReferences(pod, a)
++ }
++
++ if a.GetOperation() != admission.Create {
++ // we only validate pod specs during create requests
++ return nil
++ }
++
+ // Mirror pods have restrictions on what they can reference
+ if _, isMirrorPod := pod.Annotations[api.MirrorPodAnnotationKey]; isMirrorPod {
+ if len(pod.Spec.ServiceAccountName) != 0 {
+@@ -205,6 +217,10 @@ func (s *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi
+ return nil
+ }
+
++ // Require container pods to have service accounts
++ if len(pod.Spec.ServiceAccountName) == 0 {
++ return admission.NewForbidden(a, fmt.Errorf("no service account specified for pod %s/%s", a.GetNamespace(), pod.Name))
++ }
+ // Ensure the referenced service account exists
+ serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName)
+ if err != nil {
+@@ -221,10 +237,7 @@ func (s *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi
+ }
+
+ func shouldIgnore(a admission.Attributes) bool {
+- if a.GetResource().GroupResource() != api.Resource("pods") {
+- return true
+- }
+- if a.GetSubresource() != "" {
++ if a.GetResource().GroupResource() != api.Resource("pods") || (a.GetSubresource() != "" && a.GetSubresource() != "ephemeralcontainers") {
+ return true
+ }
+ obj := a.GetObject()
+@@ -350,6 +363,36 @@ func (s *Plugin) limitSecretReferences(serviceAccount *corev1.ServiceAccount, po
+ return nil
+ }
+
++func (s *Plugin) limitEphemeralContainerSecretReferences(pod *api.Pod, a admission.Attributes) error {
++ // Require ephemeral container pods to have service accounts
++ if len(pod.Spec.ServiceAccountName) == 0 {
++ return admission.NewForbidden(a, fmt.Errorf("no service account specified for pod %s/%s", a.GetNamespace(), pod.Name))
++ }
++ // Ensure the referenced service account exists
++ serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName)
++ if err != nil {
++ return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %w", a.GetNamespace(), pod.Spec.ServiceAccountName, err))
++ }
++ if !s.enforceMountableSecrets(serviceAccount) {
++ return nil
++ }
++ // Ensure all secrets the ephemeral containers reference are allowed by the service account
++ mountableSecrets := sets.NewString()
++ for _, s := range serviceAccount.Secrets {
++ mountableSecrets.Insert(s.Name)
++ }
++ for _, container := range pod.Spec.EphemeralContainers {
++ for _, env := range container.Env {
++ if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil {
++ if !mountableSecrets.Has(env.ValueFrom.SecretKeyRef.Name) {
++ return fmt.Errorf("ephemeral container %s with envVar %s referencing secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", container.Name, env.Name, env.ValueFrom.SecretKeyRef.Name, serviceAccount.Name)
++ }
++ }
++ }
++ }
++ return nil
++}
++
+ func (s *Plugin) mountServiceAccountToken(serviceAccount *corev1.ServiceAccount, pod *api.Pod) {
+ // Find the volume and volume name for the ServiceAccountTokenSecret if it already exists
+ tokenVolumeName := ""
+diff --git a/plugin/pkg/admission/serviceaccount/admission_test.go b/plugin/pkg/admission/serviceaccount/admission_test.go
+index ca43abf9c3f..f5359253985 100644
+--- a/plugin/pkg/admission/serviceaccount/admission_test.go
++++ b/plugin/pkg/admission/serviceaccount/admission_test.go
+@@ -545,6 +545,34 @@ func TestAllowsReferencedSecret(t *testing.T) {
+ if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
++
++ pod2 = &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: DefaultServiceAccountName,
++ EphemeralContainers: []api.EphemeralContainer{
++ {
++ EphemeralContainerCommon: api.EphemeralContainerCommon{
++ Name: "container-2",
++ Env: []api.EnvVar{
++ {
++ Name: "env-1",
++ ValueFrom: &api.EnvVarSource{
++ SecretKeyRef: &api.SecretKeySelector{
++ LocalObjectReference: api.LocalObjectReference{Name: "foo"},
++ },
++ },
++ },
++ },
++ },
++ },
++ },
++ },
++ }
++ // validate enforces restrictions on secret mounts when operation==create and subresource=='' or operation==update and subresource==ephemeralcontainers"
++ attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "ephemeralcontainers", admission.Update, &metav1.UpdateOptions{}, false, nil)
++ if err := admit.Validate(context.TODO(), attrs, nil); err != nil {
++ t.Errorf("Unexpected error: %v", err)
++ }
+ }
+
+ func TestRejectsUnreferencedSecretVolumes(t *testing.T) {
+@@ -622,6 +650,66 @@ func TestRejectsUnreferencedSecretVolumes(t *testing.T) {
+ if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") {
+ t.Errorf("Unexpected error: %v", err)
+ }
++
++ pod2 = &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: DefaultServiceAccountName,
++ InitContainers: []api.Container{
++ {
++ Name: "container-1",
++ Env: []api.EnvVar{
++ {
++ Name: "env-1",
++ ValueFrom: &api.EnvVarSource{
++ SecretKeyRef: &api.SecretKeySelector{
++ LocalObjectReference: api.LocalObjectReference{Name: "foo"},
++ },
++ },
++ },
++ },
++ },
++ },
++ },
++ }
++ attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil)
++ if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err != nil {
++ t.Errorf("admit only enforces restrictions on secret mounts when operation==create. Unexpected error: %v", err)
++ }
++ attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil)
++ if err := admit.Validate(context.TODO(), attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") {
++ t.Errorf("validate only enforces restrictions on secret mounts when operation==create and subresource==''. Unexpected error: %v", err)
++ }
++
++ pod2 = &api.Pod{
++ Spec: api.PodSpec{
++ ServiceAccountName: DefaultServiceAccountName,
++ EphemeralContainers: []api.EphemeralContainer{
++ {
++ EphemeralContainerCommon: api.EphemeralContainerCommon{
++ Name: "container-2",
++ Env: []api.EnvVar{
++ {
++ Name: "env-1",
++ ValueFrom: &api.EnvVarSource{
++ SecretKeyRef: &api.SecretKeySelector{
++ LocalObjectReference: api.LocalObjectReference{Name: "foo"},
++ },
++ },
++ },
++ },
++ },
++ },
++ },
++ },
++ }
++ attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil)
++ if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err != nil {
++ t.Errorf("admit only enforces restrictions on secret mounts when operation==create and subresource==''. Unexpected error: %v", err)
++ }
++ attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "ephemeralcontainers", admission.Update, &metav1.UpdateOptions{}, false, nil)
++ if err := admit.Validate(context.TODO(), attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") {
++ t.Errorf("validate enforces restrictions on secret mounts when operation==update and subresource==ephemeralcontainers. Unexpected error: %v", err)
++ }
+ }
+
+ func TestAllowUnreferencedSecretVolumesForPermissiveSAs(t *testing.T) {
+--
+2.40.0
diff --git a/recipes-containers/kubernetes/kubernetes_git.bb b/recipes-containers/kubernetes/kubernetes_git.bb
index 96cae5c6..b0c87c47 100644
--- a/recipes-containers/kubernetes/kubernetes_git.bb
+++ b/recipes-containers/kubernetes/kubernetes_git.bb
@@ -5,15 +5,15 @@ applications across multiple hosts, providing basic mechanisms for deployment, \
maintenance, and scaling of applications. \
"
-PV = "v1.23.6+git${SRCREV_kubernetes}"
-SRCREV_kubernetes = "fbcfa33018159c033aee77b0d5456df6771aa9b5"
+PV = "v1.23.17+git${SRCREV_kubernetes}"
+SRCREV_kubernetes = "953be8927218ec8067e1af2641e540238ffd7576"
SRCREV_kubernetes-release = "7c1aa83dac555de6f05500911467b70aca4949f0"
PE = "1"
BBCLASSEXTEND = "devupstream:target"
LIC_FILES_CHKSUM:class-devupstream = "file://src/import/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
DEFAULT_PREFERENCE:class-devupstream = "-1"
-SRC_URI:classedevupstream = "git://github.com/kubernetes/kubernetes.git;branch=master;name=kubernetes;protocol=https \
+SRC_URI:class-devupstream = "git://github.com/kubernetes/kubernetes.git;branch=master;name=kubernetes;protocol=https \
git://github.com/kubernetes/release;branch=master;name=kubernetes-release;destsuffix=git/release;protocol=https \
"
SRCREV_kubernetes:class-devupstream = "d2f6eb6339de25cef04850b6d9be8335d52324cd"
@@ -26,10 +26,12 @@ SRC_URI = "git://github.com/kubernetes/kubernetes.git;branch=release-1.23;name=k
git://github.com/kubernetes/release;branch=master;name=kubernetes-release;destsuffix=git/release;protocol=https"
SRC_URI:append = " \
- file://0001-hack-lib-golang.sh-use-CC-from-environment.patch \
- file://0001-cross-don-t-build-tests-by-default.patch \
- file://0001-build-golang.sh-convert-remaining-go-calls-to-use.patch \
- file://0001-Makefile.generated_files-Fix-race-issue-for-installi.patch \
+ file://0001-hack-lib-golang.sh-use-CC-from-environment.patch;patchdir=src/import \
+ file://0001-cross-don-t-build-tests-by-default.patch;patchdir=src/import \
+ file://0001-build-golang.sh-convert-remaining-go-calls-to-use.patch;patchdir=src/import \
+ file://0001-Makefile.generated_files-Fix-race-issue-for-installi.patch;patchdir=src/import \
+ file://CVE-2023-2431.patch;patchdir=src/import \
+ file://CVE-2023-2727-CVE-2023-2728.patch;patchdir=src/import \
file://cni-containerd-net.conflist \
file://k8s-init \
file://99-kubernetes.conf \
@@ -142,7 +144,7 @@ FILES:kube-proxy = "${bindir}/kube-proxy"
FILES:${PN}-misc = "${bindir} ${sysconfdir}/sysctl.d"
ALLOW_EMPTY:${PN}-host = "1"
-FILE:${PN}-host = "${BIN_PREFIX}/bin/k8s-init"
+FILES:${PN}-host = "${BIN_PREFIX}/bin/k8s-init"
RDEPENDS:${PN}-host = "${PN}"
RRECOMMENDS:${PN} = "\
diff --git a/recipes-containers/lxc/files/0001-Patching-an-incoming-CVE-CVE-2022-47952.patch b/recipes-containers/lxc/files/0001-Patching-an-incoming-CVE-CVE-2022-47952.patch
new file mode 100644
index 00000000..d5a02f40
--- /dev/null
+++ b/recipes-containers/lxc/files/0001-Patching-an-incoming-CVE-CVE-2022-47952.patch
@@ -0,0 +1,76 @@
+From 1b0469530d7a38b8f8990e114b52530d1bf7f3b8 Mon Sep 17 00:00:00 2001
+From: Maher Azzouzi <maherazz04@gmail.com>
+Date: Sun, 25 Dec 2022 13:50:25 +0100
+Subject: [PATCH] Patching an incoming CVE (CVE-2022-47952)
+
+lxc-user-nic in lxc through 5.0.1 is installed setuid root, and may
+allow local users to infer whether any file exists, even within a
+protected directory tree, because "Failed to open" often indicates
+that a file does not exist, whereas "does not refer to a network
+namespace path" often indicates that a file exists. NOTE: this is
+different from CVE-2018-6556 because the CVE-2018-6556 fix design was
+based on the premise that "we will report back to the user that the
+open() failed but the user has no way of knowing why it failed";
+however, in many realistic cases, there are no plausible reasons for
+failing except that the file does not exist.
+
+PoC:
+> % ls /l
+> ls: cannot open directory '/l': Permission denied
+> % /usr/lib/x86_64-linux-gnu/lxc/lxc-user-nic delete lol lol /l/h/tt h h
+> cmd/lxc_user_nic.c: 1096: main: Failed to open "/l/h/tt" <----- file does not exist.
+> % /usr/lib/x86_64-linux-gnu/lxc/lxc-user-nic delete lol lol /l/h/t h h
+> cmd/lxc_user_nic.c: 1101: main: Path "/l/h/t" does not refer to a network namespace path <---- file exist!
+
+Upstream-Status: Backport from https://github.com/lxc/lxc/commit/1b0469530d7a38b8f8990e114b52530d1bf7f3b8
+CVE: CVE-2022-47952
+
+Signed-off-by: MaherAzzouzi <maherazz04@gmail.com>
+Acked-by: Serge Hallyn <serge@hallyn.com>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+---
+ src/lxc/cmd/lxc_user_nic.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/src/lxc/cmd/lxc_user_nic.c b/src/lxc/cmd/lxc_user_nic.c
+index a91e2259d..69bc6f17d 100644
+--- a/src/lxc/cmd/lxc_user_nic.c
++++ b/src/lxc/cmd/lxc_user_nic.c
+@@ -1085,20 +1085,17 @@ int main(int argc, char *argv[])
+ } else if (request == LXC_USERNIC_DELETE) {
+ char opath[LXC_PROC_PID_FD_LEN];
+
+- /* Open the path with O_PATH which will not trigger an actual
+- * open(). Don't report an errno to the caller to not leak
+- * information whether the path exists or not.
+- * When stracing setuid is stripped so this is not a concern
+- * either.
+- */
++ // Keep in mind CVE-2022-47952: It's crucial not to leak any
++ // information whether open() succeeded of failed.
++
+ netns_fd = open(args.pid, O_PATH | O_CLOEXEC);
+ if (netns_fd < 0) {
+- usernic_error("Failed to open \"%s\"\n", args.pid);
++ usernic_error("Failed while opening netns file for \"%s\"\n", args.pid);
+ _exit(EXIT_FAILURE);
+ }
+
+ if (!fhas_fs_type(netns_fd, NSFS_MAGIC)) {
+- usernic_error("Path \"%s\" does not refer to a network namespace path\n", args.pid);
++ usernic_error("Failed while opening netns file for \"%s\"\n", args.pid);
+ close(netns_fd);
+ _exit(EXIT_FAILURE);
+ }
+@@ -1112,7 +1109,7 @@ int main(int argc, char *argv[])
+ /* Now get an fd that we can use in setns() calls. */
+ ret = open(opath, O_RDONLY | O_CLOEXEC);
+ if (ret < 0) {
+- CMD_SYSERROR("Failed to open \"%s\"\n", args.pid);
++ CMD_SYSERROR("Failed while opening netns file for \"%s\"\n", args.pid);
+ close(netns_fd);
+ _exit(EXIT_FAILURE);
+ }
+--
+2.34.1
+
diff --git a/recipes-containers/lxc/files/templates-use-curl-instead-of-wget.patch b/recipes-containers/lxc/files/templates-use-curl-instead-of-wget.patch
index f06e5969..a3347236 100644
--- a/recipes-containers/lxc/files/templates-use-curl-instead-of-wget.patch
+++ b/recipes-containers/lxc/files/templates-use-curl-instead-of-wget.patch
@@ -1,22 +1,24 @@
-From 1db2db7783bd7ec2aa1da86e640019891634c659 Mon Sep 17 00:00:00 2001
-From: Joakim Roubert <joakimr@axis.com>
-Date: Fri, 16 Aug 2019 07:52:48 +0200
-Subject: [PATCH] Use curl instead of wget
+From 3e4cb0b738649f7750413cefbcfdb2115213ad0d Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@gmail.com>
+Date: Sun, 14 Aug 2022 14:08:56 -0400
+Subject: [PATCH] download: Use curl instead of wget
When curl's MIT license is preferable to wget's GPLv3.
-Change-Id: I4684ae7569704514fdcc63e0655c556efcaf44f8
+Upstream-Status: Inappropriate [embedded specific]
+
Signed-off-by: Joakim Roubert <joakimr@axis.com>
Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
+Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
---
- templates/lxc-download.in | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
+ templates/lxc-download.in | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
-Index: git/templates/lxc-download.in
-===================================================================
---- git.orig/templates/lxc-download.in
-+++ git/templates/lxc-download.in
-@@ -59,9 +59,9 @@
+diff --git a/templates/lxc-download.in b/templates/lxc-download.in
+index a62ddf482..690307338 100755
+--- a/templates/lxc-download.in
++++ b/templates/lxc-download.in
+@@ -59,9 +59,9 @@ cleanup() {
fi
}
@@ -28,19 +30,16 @@ Index: git/templates/lxc-download.in
return 0
fi
done
-@@ -70,8 +70,9 @@
+@@ -70,7 +70,7 @@ wget_wrapper() {
}
download_file() {
- if ! wget_wrapper --user-agent="lxc/@PACKAGE_VERSION@ compat:${DOWNLOAD_COMPAT_LEVEL}" -T 30 -q "https://${DOWNLOAD_SERVER}/$1" -O "$2" >/dev/null 2>&1; then
-- if [ "$3" = "noexit" ]; then
-+ if ! curl_wrapper --user-agent="lxc/@PACKAGE_VERSION@ compat:${DOWNLOAD_COMPAT_LEVEL}" -m 30 -s "https://${DOWNLOAD_SERVER}/$1" -o "$2" >/dev/null 2>&1; then
-+ if ! curl_wrapper --user-agent="lxc/@PACKAGE_VERSION@ compat:${DOWNLOAD_COMPAT_LEVEL}" -m 30 -s "http://${DOWNLOAD_SERVER}/$1" -o "$2" >/dev/null 2>&1; then
-+ if [ "$3" = "noexit" ]; then
++ if ! curl_wrapper -L -f --user-agent "lxc/@PACKAGE_VERSION@ compat:${DOWNLOAD_COMPAT_LEVEL}" -m 30 -s "https://${DOWNLOAD_SERVER}/$1" -o "$2" >/dev/null 2>&1; then
+ if [ "$3" = "noexit" ]; then
return 1
else
- echo "ERROR: Failed to download https://${DOWNLOAD_SERVER}/$1" 1>&2
-@@ -176,7 +177,7 @@
+@@ -176,7 +176,7 @@ while :; do
done
# Check for required binaries
@@ -49,3 +48,6 @@ Index: git/templates/lxc-download.in
if ! command -V "${bin}" >/dev/null 2>&1; then
echo "ERROR: Missing required tool: ${bin}" 1>&2
exit 1
+--
+2.25.1
+
diff --git a/recipes-containers/lxc/lxc_git.bb b/recipes-containers/lxc/lxc_git.bb
index cecb5914..71dce7de 100644
--- a/recipes-containers/lxc/lxc_git.bb
+++ b/recipes-containers/lxc/lxc_git.bb
@@ -48,6 +48,7 @@ SRC_URI = "git://github.com/lxc/lxc.git;branch=stable-4.0;protocol=https \
file://tests-our-init-is-not-busybox.patch \
file://dnsmasq.conf \
file://lxc-net \
+ file://0001-Patching-an-incoming-CVE-CVE-2022-47952.patch \
"
SRCREV = "5ba5725cb4a210c25707beeca64fde5f561d1c71"
diff --git a/recipes-containers/nerdctl/nerdctl_git.bb b/recipes-containers/nerdctl/nerdctl_git.bb
index 5d4d827a..3d8140a2 100644
--- a/recipes-containers/nerdctl/nerdctl_git.bb
+++ b/recipes-containers/nerdctl/nerdctl_git.bb
@@ -13,7 +13,7 @@ DEPENDS = " \
SRCREV_FORMAT="nerdcli_cgroups"
SRCREV_nerdcli = "48f189a53a24c12838433f5bb5dd57f536816a8a"
-SRC_URI = "git://github.com/containerd/nerdctl.git;name=nerdcli;branch=master;protocol=https"
+SRC_URI = "git://github.com/containerd/nerdctl.git;name=nerdcli;branch=main;protocol=https"
include src_uri.inc
@@ -239,8 +239,8 @@ do_compile() {
}
do_install() {
- install -d "${D}${BIN_PREFIX}/bin"
- install -m 755 "${S}/src/import/_output/nerdctl" "${D}${BIN_PREFIX}/bin"
+ install -d "${D}${BIN_PREFIX}${base_bindir}"
+ install -m 755 "${S}/src/import/_output/nerdctl" "${D}${BIN_PREFIX}${base_bindir}"
}
INHIBIT_PACKAGE_STRIP = "1"
diff --git a/recipes-containers/podman/podman/0001-Rename-BUILDFLAGS-to-GOBUILDFLAGS.patch b/recipes-containers/podman/podman/0001-Rename-BUILDFLAGS-to-GOBUILDFLAGS.patch
new file mode 100644
index 00000000..13a736e4
--- /dev/null
+++ b/recipes-containers/podman/podman/0001-Rename-BUILDFLAGS-to-GOBUILDFLAGS.patch
@@ -0,0 +1,125 @@
+From 3e18f3a4db638a3df48f49aa0a539f8bb048afc9 Mon Sep 17 00:00:00 2001
+From: Andrei Gherzan <andrei.gherzan@huawei.com>
+Date: Tue, 5 Jul 2022 11:51:56 +0200
+Subject: [PATCH] Rename BUILDFLAGS to GOBUILDFLAGS
+
+Yocto uses GOBUILDFLAGS to pass the right build flags while the Makefile
+uses BUILDFLAGS. Align them accordingly.
+
+See go.bbclass for more information.
+
+Upstream-Status: Inappropriate [OE specific]
+Signed-off-by: Andrei Gherzan <andrei.gherzan@huawei.com>
+---
+ Makefile | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index cb230d8e9..538b28d41 100644
+--- a/Makefile
++++ b/Makefile
+@@ -69,7 +69,7 @@ PRE_COMMIT = $(shell command -v bin/venv/bin/pre-commit ~/.local/bin/pre-commit
+ # triggered.
+ SOURCES = $(shell find . -path './.*' -prune -o \( \( -name '*.go' -o -name '*.c' \) -a ! -name '*_test.go' \) -print)
+
+-BUILDFLAGS := -mod=vendor $(BUILDFLAGS)
++GOBUILDFLAGS := -mod=vendor $(GOBUILDFLAGS)
+
+ BUILDTAGS_CROSS ?= containers_image_openpgp exclude_graphdriver_btrfs exclude_graphdriver_devicemapper exclude_graphdriver_overlay
+ CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
+@@ -264,11 +264,11 @@ gofmt: ## Verify the source code gofmt
+
+ .PHONY: test/checkseccomp/checkseccomp
+ test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
+- $(GOCMD) build $(BUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o $@ ./test/checkseccomp
++ $(GOCMD) build $(GOBUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o $@ ./test/checkseccomp
+
+ .PHONY: test/testvol/testvol
+ test/testvol/testvol: .gopathok $(wildcard test/testvol/*.go)
+- $(GOCMD) build $(BUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -o $@ ./test/testvol
++ $(GOCMD) build $(GOBUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -o $@ ./test/testvol
+
+ .PHONY: volume-plugin-test-image
+ volume-plugin-test-img:
+@@ -276,7 +276,7 @@ volume-plugin-test-img:
+
+ .PHONY: test/goecho/goecho
+ test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go)
+- $(GOCMD) build $(BUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -o $@ ./test/goecho
++ $(GOCMD) build $(GOBUILDFLAGS) $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' -o $@ ./test/goecho
+
+ test/version/version: .gopathok version/version.go
+ $(GO) build -o $@ ./test/version/
+@@ -318,7 +318,7 @@ ifeq (,$(findstring systemd,$(BUILDTAGS)))
+ distro for journald support."
+ endif
+ $(GOCMD) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' \
+ -tags "$(BUILDTAGS)" \
+ -o $@ ./cmd/podman
+@@ -329,14 +329,14 @@ $(SRCBINDIR):
+
+ $(SRCBINDIR)/podman$(BINSFX): $(SRCBINDIR) .gopathok $(SOURCES) go.mod go.sum
+ $(GOCMD) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' \
+ -tags "${REMOTETAGS}" \
+ -o $@ ./cmd/podman
+
+ $(SRCBINDIR)/podman-remote-static: $(SRCBINDIR) .gopathok $(SOURCES) go.mod go.sum
+ $(GOCMD) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ $(GO_LDFLAGS) '$(LDFLAGS_PODMAN_STATIC)' \
+ -tags "${REMOTETAGS}" \
+ -o $@ ./cmd/podman
+@@ -371,7 +371,7 @@ podman-winpath: .gopathok $(SOURCES) go.mod go.sum
+ CGO_ENABLED=0 \
+ GOOS=windows \
+ $(GO) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ -ldflags -H=windowsgui \
+ -o bin/windows/winpath.exe \
+ ./cmd/winpath
+@@ -390,14 +390,14 @@ podman-mac-helper: ## Build podman-mac-helper for macOS
+ GOOS=darwin \
+ GOARCH=$(GOARCH) \
+ $(GO) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ -o bin/darwin/podman-mac-helper \
+ ./cmd/podman-mac-helper
+
+ bin/rootlessport: .gopathok $(SOURCES) go.mod go.sum
+ CGO_ENABLED=$(CGO_ENABLED) \
+ $(GO) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ -o $@ ./cmd/rootlessport
+
+ .PHONY: rootlessport
+@@ -420,7 +420,7 @@ bin/podman.cross.%: .gopathok
+ GOARCH="$${TARGET##*.}"; \
+ CGO_ENABLED=0 \
+ $(GO) build \
+- $(BUILDFLAGS) \
++ $(GOBUILDFLAGS) \
+ $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' \
+ -tags '$(BUILDTAGS_CROSS)' \
+ -o "$@" ./cmd/podman
+@@ -864,7 +864,7 @@ install.tools: .install.goimports .install.gitvalidation .install.md2man .instal
+ .PHONY: .install.ginkgo
+ .install.ginkgo: .gopathok
+ if [ ! -x "$(GOBIN)/ginkgo" ]; then \
+- $(GO) install $(BUILDFLAGS) ./vendor/github.com/onsi/ginkgo/ginkgo ; \
++ $(GO) install $(GOBUILDFLAGS) ./vendor/github.com/onsi/ginkgo/ginkgo ; \
+ fi
+
+ .PHONY: .install.gitvalidation
+--
+2.25.1
+
diff --git a/recipes-containers/podman/podman/0002-Define-ActKillThread-equal-to-ActKill.patch b/recipes-containers/podman/podman/0002-Define-ActKillThread-equal-to-ActKill.patch
new file mode 100644
index 00000000..ba51d4ac
--- /dev/null
+++ b/recipes-containers/podman/podman/0002-Define-ActKillThread-equal-to-ActKill.patch
@@ -0,0 +1,90 @@
+From f2aa0359bcc776239bda8a4eb84957b97ef55c35 Mon Sep 17 00:00:00 2001
+From: Tonis Tiigi <tonistiigi@gmail.com>
+Date: Fri, 28 Jan 2022 14:44:56 -0800
+Subject: [PATCH] Define ActKillThread equal to ActKill
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+These constants are equal in libseccomp but Go definitions
+were defined separately. This resulted in dead code that
+never executed due to identical case statements in switch.
+Go can usually detect these error cases and refuses to build
+but for some reason this detection doesn’t work with cgo+gcc.
+Clang detects the equal constants correctly and therefore
+libseccomp-golang builds with clang broke after ActKillThread
+was added.
+
+In order to fix the clang build only removal of the
+switch case is needed. But I assumed that the setter/getter
+logic is supposed to work for ActKillThread as well
+and only way to ensure that is to set them equal like they
+are in C.
+
+Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
+Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
+Acked-by: Tom Hromatka <tom.hromatka@oracle.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Andrei Gherzan <andrei.gherzan@huawei.com>
+Upstream-status: Backport [https://github.com/seccomp/libseccomp-golang/commit/c35397d0ea8f285a0be78693bb2fd37b06952453]
+---
+ seccomp.go | 8 ++++----
+ seccomp_internal.go | 4 ----
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/seccomp.go b/seccomp.go
+index e9b92e2..32f6ab2 100644
+--- a/seccomp.go
++++ b/seccomp.go
+@@ -214,14 +214,14 @@ const (
+ // This action is only usable when libseccomp API level 3 or higher is
+ // supported.
+ ActLog ScmpAction = iota
+- // ActKillThread kills the thread that violated the rule. It is the same as ActKill.
+- // All other threads from the same thread group will continue to execute.
+- ActKillThread ScmpAction = iota
+ // ActKillProcess kills the process that violated the rule.
+ // All threads in the thread group are also terminated.
+ // This action is only usable when libseccomp API level 3 or higher is
+ // supported.
+ ActKillProcess ScmpAction = iota
++ // ActKillThread kills the thread that violated the rule. It is the same as ActKill.
++ // All other threads from the same thread group will continue to execute.
++ ActKillThread = ActKill
+ )
+
+ const (
+@@ -394,7 +394,7 @@ func (a ScmpCompareOp) String() string {
+ // String returns a string representation of a seccomp match action
+ func (a ScmpAction) String() string {
+ switch a & 0xFFFF {
+- case ActKill, ActKillThread:
++ case ActKillThread:
+ return "Action: Kill thread"
+ case ActKillProcess:
+ return "Action: Kill process"
+diff --git a/seccomp_internal.go b/seccomp_internal.go
+index 8dc7b29..8fc9914 100644
+--- a/seccomp_internal.go
++++ b/seccomp_internal.go
+@@ -612,8 +612,6 @@ func (a ScmpCompareOp) toNative() C.int {
+ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
+ aTmp := a & 0xFFFF
+ switch a & 0xFFFF0000 {
+- case C.C_ACT_KILL:
+- return ActKill, nil
+ case C.C_ACT_KILL_PROCESS:
+ return ActKillProcess, nil
+ case C.C_ACT_KILL_THREAD:
+@@ -638,8 +636,6 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
+ // Only use with sanitized actions, no error handling
+ func (a ScmpAction) toNative() C.uint32_t {
+ switch a & 0xFFFF {
+- case ActKill:
+- return C.C_ACT_KILL
+ case ActKillProcess:
+ return C.C_ACT_KILL_PROCESS
+ case ActKillThread:
+--
+2.25.1
+
diff --git a/recipes-containers/podman/podman/50-podman-rootless.conf b/recipes-containers/podman/podman/50-podman-rootless.conf
new file mode 100644
index 00000000..aaede0e1
--- /dev/null
+++ b/recipes-containers/podman/podman/50-podman-rootless.conf
@@ -0,0 +1,6 @@
+# SPDX-FileCopyrightText: Huawei Inc.
+#
+# SPDX-License-Identifier: MIT
+
+# User namespaces are required for rootless containers.
+user.max_user_namespaces = 15000
diff --git a/recipes-containers/podman/podman/CVE-2022-27649.patch b/recipes-containers/podman/podman/CVE-2022-27649.patch
new file mode 100644
index 00000000..cb786ad5
--- /dev/null
+++ b/recipes-containers/podman/podman/CVE-2022-27649.patch
@@ -0,0 +1,106 @@
+From aafa80918a245edcbdaceb1191d749570f1872d0 Mon Sep 17 00:00:00 2001
+From: Giuseppe Scrivano <gscrivan@redhat.com>
+Date: Mon, 28 Feb 2022 09:48:52 +0100
+Subject: [PATCH] do not set the inheritable capabilities
+
+The kernel never sets the inheritable capabilities for a process, they
+are only set by userspace. Emulate the same behavior.
+
+CVE: CVE-2022-27649
+Upstream-Status: Backport [https://github.com/containers/podman/commit/aafa80918a245edcbdaceb1191d749570f1872d0]
+Signed-off-by: Andrei Gherzan <andrei.gherzan@huawei.com>
+---
+ libpod/oci_conmon_exec_linux.go | 7 +++++--
+ pkg/specgen/generate/security.go | 7 +++++--
+ test/e2e/run_test.go | 6 +++---
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
+index aa970bbde28..65123b37e6a 100644
+--- a/libpod/oci_conmon_exec_linux.go
++++ b/libpod/oci_conmon_exec_linux.go
+@@ -758,11 +758,14 @@ func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessio
+ } else {
+ pspec.Capabilities.Bounding = ctrSpec.Process.Capabilities.Bounding
+ }
++
++ // Always unset the inheritable capabilities similarly to what the Linux kernel does
++ // They are used only when using capabilities with uid != 0.
++ pspec.Capabilities.Inheritable = []string{}
++
+ if execUser.Uid == 0 {
+ pspec.Capabilities.Effective = pspec.Capabilities.Bounding
+- pspec.Capabilities.Inheritable = pspec.Capabilities.Bounding
+ pspec.Capabilities.Permitted = pspec.Capabilities.Bounding
+- pspec.Capabilities.Ambient = pspec.Capabilities.Bounding
+ } else {
+ if user == c.config.User {
+ pspec.Capabilities.Effective = ctrSpec.Process.Capabilities.Effective
+diff --git a/pkg/specgen/generate/security.go b/pkg/specgen/generate/security.go
+index 9c67099054f..988c2983267 100644
+--- a/pkg/specgen/generate/security.go
++++ b/pkg/specgen/generate/security.go
+@@ -146,6 +146,10 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
+
+ configSpec := g.Config
+ configSpec.Process.Capabilities.Ambient = []string{}
++
++ // Always unset the inheritable capabilities similarly to what the Linux kernel does
++ // They are used only when using capabilities with uid != 0.
++ configSpec.Process.Capabilities.Inheritable = []string{}
+ configSpec.Process.Capabilities.Bounding = caplist
+
+ user := strings.Split(s.User, ":")[0]
+@@ -153,7 +157,6 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
+ if (user == "" && s.UserNS.NSMode != specgen.KeepID) || user == "root" || user == "0" {
+ configSpec.Process.Capabilities.Effective = caplist
+ configSpec.Process.Capabilities.Permitted = caplist
+- configSpec.Process.Capabilities.Inheritable = caplist
+ } else {
+ mergedCaps, err := capabilities.MergeCapabilities(nil, s.CapAdd, nil)
+ if err != nil {
+@@ -175,12 +178,12 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
+ }
+ configSpec.Process.Capabilities.Effective = userCaps
+ configSpec.Process.Capabilities.Permitted = userCaps
+- configSpec.Process.Capabilities.Inheritable = userCaps
+
+ // Ambient capabilities were added to Linux 4.3. Set ambient
+ // capabilities only when the kernel supports them.
+ if supportAmbientCapabilities() {
+ configSpec.Process.Capabilities.Ambient = userCaps
++ configSpec.Process.Capabilities.Inheritable = userCaps
+ }
+ }
+
+diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
+index 91a2eddadf6..f4a6e573355 100644
+--- a/test/e2e/run_test.go
++++ b/test/e2e/run_test.go
+@@ -498,7 +498,7 @@ var _ = Describe("Podman run", func() {
+ session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapInh", "/proc/self/status"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+- Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb"))
++ Expect(session.OutputToString()).To(ContainSubstring("0000000000000000"))
+
+ session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapBnd", "/proc/self/status"})
+ session.WaitWithDefaultTimeout()
+@@ -533,7 +533,7 @@ var _ = Describe("Podman run", func() {
+ session = podmanTest.Podman([]string{"run", "--user=0:0", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+- Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb"))
++ Expect(session.OutputToString()).To(ContainSubstring("0000000000000000"))
+
+ if os.Geteuid() > 0 {
+ if os.Getenv("SKIP_USERNS") != "" {
+@@ -550,7 +550,7 @@ var _ = Describe("Podman run", func() {
+ session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--privileged", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+- Expect(session.OutputToString()).To(ContainSubstring("0000000000000000"))
++ Expect(session.OutputToString()).To(ContainSubstring("0000000000000002"))
+
+ session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapInh", "/proc/self/status"})
+ session.WaitWithDefaultTimeout()
diff --git a/recipes-containers/podman/podman_git.bb b/recipes-containers/podman/podman_git.bb
index f805cb6d..09bf8270 100644
--- a/recipes-containers/podman/podman_git.bb
+++ b/recipes-containers/podman/podman_git.bb
@@ -20,6 +20,10 @@ DEPENDS = " \
SRCREV = "717edd7b844dcd66468f5d991991d87e9fc14c12"
SRC_URI = " \
git://github.com/containers/libpod.git;branch=v4.0;protocol=https \
+ file://0001-Rename-BUILDFLAGS-to-GOBUILDFLAGS.patch;patchdir=src/import \
+ file://0002-Define-ActKillThread-equal-to-ActKill.patch;patchdir=src/import/vendor/github.com/seccomp/libseccomp-golang \
+ file://CVE-2022-27649.patch;patchdir=src/import \
+ ${@bb.utils.contains('PACKAGECONFIG', 'rootless', 'file://50-podman-rootless.conf', '', d)} \
"
LICENSE = "Apache-2.0"
@@ -41,6 +45,9 @@ exclude_graphdriver_btrfs exclude_graphdriver_devicemapper"
# overide LDFLAGS to allow podman to build without: "flag provided but not # defined: -Wl,-O1
export LDFLAGS=""
+# https://github.com/llvm/llvm-project/issues/53999
+TOOLCHAIN = "gcc"
+
inherit go goarch
inherit systemd pkgconfig
@@ -92,6 +99,15 @@ do_install() {
if ${@bb.utils.contains('PACKAGECONFIG', 'docker', 'true', 'false', d)}; then
oe_runmake install.docker DESTDIR="${D}"
fi
+
+ # Silence docker emulation warnings.
+ mkdir -p ${D}/etc/containers
+ touch ${D}/etc/containers/nodocker
+
+ if ${@bb.utils.contains('PACKAGECONFIG', 'rootless', 'true', 'false', d)}; then
+ install -d "${D}${sysconfdir}/sysctl.d"
+ install -m 0644 "${WORKDIR}/50-podman-rootless.conf" "${D}${sysconfdir}/sysctl.d"
+ fi
}
FILES:${PN} += " \
@@ -107,6 +123,9 @@ SYSTEMD_SERVICE:${PN} = "podman.service podman.socket"
# that busybox is configured with nsenter
VIRTUAL-RUNTIME_base-utils-nsenter ?= "util-linux-nsenter"
-RDEPENDS:${PN} += "conmon virtual-runc iptables cni skopeo ${VIRTUAL-RUNTIME_base-utils-nsenter}"
+RDEPENDS:${PN} += "\
+ conmon virtual-runc iptables cni skopeo ${VIRTUAL-RUNTIME_base-utils-nsenter} \
+ ${@bb.utils.contains('PACKAGECONFIG', 'rootless', 'fuse-overlayfs slirp4netns', '', d)} \
+"
RRECOMMENDS:${PN} += "slirp4netns kernel-module-xt-masquerade kernel-module-xt-comment"
RCONFLICTS:${PN} = "${@bb.utils.contains('PACKAGECONFIG', 'docker', 'docker', '', d)}"
diff --git a/recipes-containers/runc/runc-docker_git.bb b/recipes-containers/runc/runc-docker_git.bb
index dc93a7c0..97373a72 100644
--- a/recipes-containers/runc/runc-docker_git.bb
+++ b/recipes-containers/runc/runc-docker_git.bb
@@ -2,13 +2,13 @@ include runc.inc
# Note: this rev is before the required protocol field, update when all components
# have been updated to match.
-SRCREV_runc-docker = "b9460f26b49efa086b99f32557219d0f24bd23ae"
+SRCREV_runc-docker = "974efd2dfca0abec041a3708a2b66bfac6bd2484"
SRC_URI = "git://github.com/opencontainers/runc;branch=release-1.1;name=runc-docker;protocol=https \
file://0001-runc-Add-console-socket-dev-null.patch \
file://0001-Makefile-respect-GOBUILDFLAGS-for-runc-and-remove-re.patch \
file://0001-runc-docker-SIGUSR1-daemonize.patch \
"
-RUNC_VERSION = "1.1.0"
+RUNC_VERSION = "1.1.4"
CVE_PRODUCT = "runc"
diff --git a/recipes-containers/runc/runc-opencontainers_git.bb b/recipes-containers/runc/runc-opencontainers_git.bb
index ab573107..59ddca9b 100644
--- a/recipes-containers/runc/runc-opencontainers_git.bb
+++ b/recipes-containers/runc/runc-opencontainers_git.bb
@@ -1,10 +1,10 @@
include runc.inc
-SRCREV = "b9460f26b49efa086b99f32557219d0f24bd23ae"
+SRCREV = "974efd2dfca0abec041a3708a2b66bfac6bd2484"
SRC_URI = " \
git://github.com/opencontainers/runc;branch=release-1.1;protocol=https \
file://0001-Makefile-respect-GOBUILDFLAGS-for-runc-and-remove-re.patch \
"
-RUNC_VERSION = "1.1.0"
+RUNC_VERSION = "1.1.4"
CVE_PRODUCT = "runc"
diff --git a/recipes-containers/singularity/singularity_git.bb b/recipes-containers/singularity/singularity_git.bb
index 321a9a61..e3903ecc 100644
--- a/recipes-containers/singularity/singularity_git.bb
+++ b/recipes-containers/singularity/singularity_git.bb
@@ -2,7 +2,7 @@
# Singularity build / config: read up on the dev-so test for more info)
INSANE_SKIP:${PN} += "dev-so"
-RDEPENDS:${PN} += "glibc python3 ca-certificates openssl bash e2fsprogs-mke2fs"
+RDEPENDS:${PN} += "python3 ca-certificates openssl bash e2fsprogs-mke2fs"
LICENSE = "BSD-3-Clause | Apache-2.0"
LIC_FILES_CHKSUM = "file://COPYRIGHT.md;md5=be78c34e483dd7d8439358b1e024b294 \
diff --git a/recipes-containers/skopeo/skopeo_git.bb b/recipes-containers/skopeo/skopeo_git.bb
index 35377a8d..e396e3af 100644
--- a/recipes-containers/skopeo/skopeo_git.bb
+++ b/recipes-containers/skopeo/skopeo_git.bb
@@ -22,8 +22,6 @@ RDEPENDS:${PN} = " \
SRC_URI = " \
git://github.com/containers/skopeo;branch=main;protocol=https \
file://0001-Makefile-use-pkg-config-instead-of-gpgme-config.patch \
- file://storage.conf \
- file://registries.conf \
"
SRCREV = "3e2defd6d37b742adde2aac6cb01f6c3c17da8e2"
@@ -35,6 +33,14 @@ S = "${WORKDIR}/git"
inherit goarch
inherit pkgconfig
+inherit container-host
+
+# This CVE was fixed in the container image go library skopeo is using.
+# See:
+# https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2019-10214
+# https://github.com/containers/image/issues/654
+CVE_CHECK_IGNORE += "CVE-2019-10214"
+
# This disables seccomp and apparmor, which are on by default in the
# go package.
EXTRA_OEMAKE="BUILDTAGS=''"
@@ -74,10 +80,6 @@ do_install() {
install -d ${D}/${sysconfdir}/containers
install ${S}/src/import/bin/skopeo ${D}/${sbindir}/
- install ${S}/src/import/default-policy.json ${D}/${sysconfdir}/containers/policy.json
-
- install ${WORKDIR}/storage.conf ${D}/${sysconfdir}/containers/storage.conf
- install ${WORKDIR}/registries.conf ${D}/${sysconfdir}/containers/registries.conf
}
do_install:append:class-native() {
diff --git a/recipes-core/packagegroups/packagegroup-container.bb b/recipes-core/packagegroups/packagegroup-container.bb
index 8d418e95..8309a086 100644
--- a/recipes-core/packagegroups/packagegroup-container.bb
+++ b/recipes-core/packagegroups/packagegroup-container.bb
@@ -9,7 +9,7 @@ PACKAGES = "\
packagegroup-lxc \
packagegroup-docker \
packagegroup-oci \
- ${@bb.utils.contains('DISTRO_FEATURES', 'seccomp', \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'seccomp ipv6', \
'packagegroup-podman', '', d)} \
packagegroup-containerd \
"
diff --git a/recipes-core/sysvinit/sysvinit-inittab_xen.inc b/recipes-core/sysvinit/sysvinit-inittab_xen.inc
index 7f92bd1f..724f47dc 100644
--- a/recipes-core/sysvinit/sysvinit-inittab_xen.inc
+++ b/recipes-core/sysvinit/sysvinit-inittab_xen.inc
@@ -1,4 +1,10 @@
+
+
do_install:append() {
- echo "" >> ${D}${sysconfdir}/inittab
- echo "X0:12345:respawn:${base_sbindir}/getty-wrapper 115200 hvc0" >> ${D}${sysconfdir}/inittab
+ # if SERIAL_CONSOLES contains hvc0, it is already added in inittab so do
+ # not add it twice
+ if echo "${SERIAL_CONSOLES}" | grep -vq "hvc0"; then
+ echo "" >> ${D}${sysconfdir}/inittab
+ echo "X0:12345:respawn:${base_sbindir}/getty-wrapper 115200 hvc0" >> ${D}${sysconfdir}/inittab
+ fi
}
diff --git a/recipes-devtools/go/go-context_git.bb b/recipes-devtools/go/go-context_git.bb
index 37f39b38..0959054b 100644
--- a/recipes-devtools/go/go-context_git.bb
+++ b/recipes-devtools/go/go-context_git.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=c50f6bd9c1e15ed0bad3bea18e3c1b7f"
SRCNAME = "context"
PKG_NAME = "github.com/gorilla/${SRCNAME}"
-SRC_URI = "git://${PKG_NAME}.git;branch=master;protocol=https"
+SRC_URI = "git://${PKG_NAME}.git;branch=main;protocol=https"
SRCREV = "14f550f51af52180c2eefed15e5fd18d63c0a64a"
diff --git a/recipes-devtools/go/go-mux_git.bb b/recipes-devtools/go/go-mux_git.bb
index 3f939f45..fb4502b6 100644
--- a/recipes-devtools/go/go-mux_git.bb
+++ b/recipes-devtools/go/go-mux_git.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=c50f6bd9c1e15ed0bad3bea18e3c1b7f"
SRCNAME = "mux"
PKG_NAME = "github.com/gorilla/${SRCNAME}"
-SRC_URI = "git://${PKG_NAME}.git;branch=master;protocol=https"
+SRC_URI = "git://${PKG_NAME}.git;branch=main;protocol=https"
SRCREV = "136d54f81f00414c45c3c68dd47e98cc97519c5e"
diff --git a/recipes-devtools/yq/yq_git.bb b/recipes-devtools/yq/yq_git.bb
index 1fccb120..0f73362c 100644
--- a/recipes-devtools/yq/yq_git.bb
+++ b/recipes-devtools/yq/yq_git.bb
@@ -13,17 +13,19 @@ SRCREV_cobra = "b97b5ead31f7d34f764ac8666e40c214bb8e06dc"
SRCREV_pflag = "6971c29c4a22981adeaee7f4b437c0cffe08c031"
SRCREV_logging = "b2cb9fa56473e98db8caba80237377e83fe44db5"
SRCREV_yaml = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
+SRCREV_xerrors="5ec99f83aff198f5fbd629d6c8d8eb38a04218ca"
SRCREV_FORMAT = "yq_color"
SRC_URI = "git://${GO_IMPORT};name=yq;branch=master;protocol=https \
- git://github.com/fatih/color;name=color;destsuffix=build/vendor/src/github.com/fatih/color;branch=master;protocol=https \
+ git://github.com/fatih/color;name=color;destsuffix=build/vendor/src/github.com/fatih/color;branch=main;protocol=https \
git://github.com/goccy/go-yaml;name=lexer;destsuffix=build/vendor/src/github.com/goccy/go-yaml/;branch=master;protocol=https \
git://github.com/kylelemons/godebug;name=debug;destsuffix=build/vendor/src/github.com/kylelemons/godebug/;branch=master;protocol=https \
git://github.com/pkg/errors;name=errors;destsuffix=build/vendor/src/github.com/pkg/errors/;branch=master;protocol=https \
- git://github.com/spf13/cobra;name=cobra;destsuffix=build/vendor/src/github.com/spf13/cobra;branch=master;protocol=https \
+ git://github.com/spf13/cobra;name=cobra;destsuffix=build/vendor/src/github.com/spf13/cobra;branch=main;protocol=https \
git://github.com/spf13/pflag;name=pflag;destsuffix=build/vendor/src/github.com/spf13/pflag;branch=master;protocol=https \
git://github.com/op/go-logging.git;name=logging;destsuffix=build/vendor/src/gopkg.in/op/go-logging.v1;branch=master;protocol=https \
git://github.com/go-yaml/yaml.git;name=yaml;branch=v3;destsuffix=build/vendor/src/gopkg.in/yaml.v3;protocol=https \
+ git://github.com/golang/xerrors;name=xerrors;protocol=https;nobranch=1;destsuffix=build/vendor/src/golang.org/x/xerrors \
"
PV = "1.13.1+git${SRCREV_yq}"
diff --git a/recipes-extended/ceph/ceph_15.2.15.bb b/recipes-extended/ceph/ceph_15.2.17.bb
index 17dbcf35..9fb2e722 100644
--- a/recipes-extended/ceph/ceph_15.2.15.bb
+++ b/recipes-extended/ceph/ceph_15.2.17.bb
@@ -16,7 +16,7 @@ SRC_URI = "http://download.ceph.com/tarballs/ceph-${PV}.tar.gz \
file://0001-SnappyCompressor.h-fix-snappy-compiler-error.patch \
"
-SRC_URI[sha256sum] = "5dccdaff2ebe18d435b32bfc06f8b5f474bf6ac0432a6a07d144b7c56700d0bf"
+SRC_URI[sha256sum] = "d8efe4996aeb01dd2f1cc939c5e434e5a7e2aeaf3f659c0510ffd550477a32e2"
DEPENDS = "boost bzip2 curl expat gperf-native \
keyutils libaio libibverbs lz4 \
diff --git a/recipes-extended/cloud-init/cloud-init_21.4.bb b/recipes-extended/cloud-init/cloud-init_21.4.bb
index 9c2dae79..5cb62272 100644
--- a/recipes-extended/cloud-init/cloud-init_21.4.bb
+++ b/recipes-extended/cloud-init/cloud-init_21.4.bb
@@ -19,6 +19,11 @@ DISTUTILS_INSTALL_ARGS:append = " ${@bb.utils.contains('DISTRO_FEATURES', 'syste
do_install:append() {
ln -s ${libdir}/${BPN}/uncloud-init ${D}${sysconfdir}/cloud/uncloud-init
ln -s ${libdir}/${BPN}/write-ssh-key-fingerprints ${D}${sysconfdir}/cloud/write-ssh-key-fingerprints
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then
+ install -m 755 -d ${D}${sysconfdir}/init.d/
+ install -m 755 ${S}/sysvinit/redhat/* ${D}${sysconfdir}/init.d/
+ fi
+
}
inherit pkgconfig
diff --git a/recipes-extended/fuse-overlayfs/fuse-overlayfs/0001-Fix-buffer-overflow-on-workdir-path.patch b/recipes-extended/fuse-overlayfs/fuse-overlayfs/0001-Fix-buffer-overflow-on-workdir-path.patch
new file mode 100644
index 00000000..129423d4
--- /dev/null
+++ b/recipes-extended/fuse-overlayfs/fuse-overlayfs/0001-Fix-buffer-overflow-on-workdir-path.patch
@@ -0,0 +1,32 @@
+From 7e5992d6121aed0cfcbfaf70472f28d87cff1426 Mon Sep 17 00:00:00 2001
+From: Andrei Gherzan <andrei.gherzan@huawei.com>
+Date: Mon, 11 Jul 2022 20:36:06 +0200
+Subject: [PATCH] Fix buffer overflow on workdir path
+
+We make sure that the path used for workdir is reallocated before
+appending. This was initially included in upstream as part of
+https://github.com/containers/fuse-overlayfs/commit/d5b725b6f18a437db66bfc1456d04c3bf658f66a.
+
+Signed-off-by: Andrei Gherzan <andrei.gherzan@huawei.com>
+Upstream-Status: Backport
+---
+ main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/main.c b/main.c
+index e5bdda1..118a6cb 100644
+--- a/main.c
++++ b/main.c
+@@ -5039,6 +5039,9 @@ main (int argc, char *argv[])
+ if (path == NULL)
+ goto err_out1;
+ mkdir (path, 0700);
++ path = realloc(path, strlen(path)+strlen("/work")+1);
++ if (!path)
++ error (EXIT_FAILURE, errno, "allocating workdir path");
+ strcat (path, "/work");
+ mkdir (path, 0700);
+ free (lo.workdir);
+--
+2.25.1
+
diff --git a/recipes-extended/fuse-overlayfs/fuse-overlayfs_0.6.4.bb b/recipes-extended/fuse-overlayfs/fuse-overlayfs_0.6.4.bb
index a02c1e60..4f793bd9 100644
--- a/recipes-extended/fuse-overlayfs/fuse-overlayfs_0.6.4.bb
+++ b/recipes-extended/fuse-overlayfs/fuse-overlayfs_0.6.4.bb
@@ -6,7 +6,10 @@ LICENSE = "GPL-3.0-or-later"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
SRCREV = "098d9ad79fdbb8538adde08628408aa32a8b4b17"
-SRC_URI = "git://github.com/containers/fuse-overlayfs.git;nobranch=1;protocol=https"
+SRC_URI = " \
+ git://github.com/containers/fuse-overlayfs.git;nobranch=1;protocol=https \
+ file://0001-Fix-buffer-overflow-on-workdir-path.patch \
+"
DEPENDS = "fuse3"
diff --git a/recipes-extended/images/xtf-image.bb b/recipes-extended/images/xtf-image.bb
index a78959df..f9ecea91 100644
--- a/recipes-extended/images/xtf-image.bb
+++ b/recipes-extended/images/xtf-image.bb
@@ -31,4 +31,4 @@ IMAGE_INSTALL:append = " xtf"
DEFAULT_TEST_SUITES:append = " xtf_minimal"
-QB_DEFAULT_FSTYPE_x86-64 = "wic"
+QB_DEFAULT_FSTYPE:x86-64 = "wic"
diff --git a/recipes-extended/irqbalance/irqbalance.inc b/recipes-extended/irqbalance/irqbalance.inc
index c69b74ec..038cbae3 100644
--- a/recipes-extended/irqbalance/irqbalance.inc
+++ b/recipes-extended/irqbalance/irqbalance.inc
@@ -42,6 +42,5 @@ do_install () {
chmod 755 ${D}${sysconfdir}/init.d/irqbalanced
install -d ${D}${systemd_unitdir}/system
- install -m 0644 ${WORKDIR}/irqbalanced.service ${D}${systemd_unitdir}/system
- sed -i -e 's,@SBINDIR@,${sbindir},g' ${D}${systemd_unitdir}/system/irqbalanced.service
+ install -m 0644 ${S}/misc/irqbalance.service ${D}${systemd_unitdir}/system/irqbalanced.service
}
diff --git a/recipes-extended/irqbalance/irqbalance/irqbalanced.service b/recipes-extended/irqbalance/irqbalance/irqbalanced.service
deleted file mode 100644
index 5b284faa..00000000
--- a/recipes-extended/irqbalance/irqbalance/irqbalanced.service
+++ /dev/null
@@ -1,9 +0,0 @@
-[Unit]
-Description=irqbalance daemon
-After=syslog.target
-
-[Service]
-ExecStart=@SBINDIR@/irqbalance --foreground
-
-[Install]
-WantedBy=multi-user.target
diff --git a/recipes-extended/irqbalance/irqbalance_git.bb b/recipes-extended/irqbalance/irqbalance_git.bb
index 48774889..a944c2f1 100644
--- a/recipes-extended/irqbalance/irqbalance_git.bb
+++ b/recipes-extended/irqbalance/irqbalance_git.bb
@@ -10,7 +10,6 @@ PV = "1.8.0+git${SRCPV}"
SRC_URI = "git://github.com/Irqbalance/irqbalance;branch=master;protocol=https \
file://add-initscript.patch \
file://irqbalance-Add-status-and-reload-commands.patch \
- file://irqbalanced.service \
"
S = "${WORKDIR}/git"
diff --git a/recipes-extended/libvirt/libvirt/CVE-2023-2700.patch b/recipes-extended/libvirt/libvirt/CVE-2023-2700.patch
new file mode 100644
index 00000000..b6907b04
--- /dev/null
+++ b/recipes-extended/libvirt/libvirt/CVE-2023-2700.patch
@@ -0,0 +1,54 @@
+From 6425a311b8ad19d6f9c0b315bf1d722551ea3585 Mon Sep 17 00:00:00 2001
+From: Tim Shearer <TShearer@adva.com>
+Date: Mon, 1 May 2023 13:15:48 +0000
+Subject: [PATCH] virpci: Resolve leak in virPCIVirtualFunctionList cleanup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Repeatedly querying an SR-IOV PCI device's capabilities exposes a
+memory leak caused by a failure to free the virPCIVirtualFunction
+array within the parent struct's g_autoptr cleanup.
+
+Valgrind output after getting a single interface's XML description
+1000 times:
+
+==325982== 256,000 bytes in 1,000 blocks are definitely lost in loss record 2,634 of 2,635
+==325982== at 0x4C3C096: realloc (vg_replace_malloc.c:1437)
+==325982== by 0x59D952D: g_realloc (in /usr/lib64/libglib-2.0.so.0.5600.4)
+==325982== by 0x4EE1F52: virReallocN (viralloc.c:52)
+==325982== by 0x4EE1FB7: virExpandN (viralloc.c:78)
+==325982== by 0x4EE219A: virInsertElementInternal (viralloc.c:183)
+==325982== by 0x4EE23B2: virAppendElement (viralloc.c:288)
+==325982== by 0x4F65D85: virPCIGetVirtualFunctionsFull (virpci.c:2389)
+==325982== by 0x4F65753: virPCIGetVirtualFunctions (virpci.c:2256)
+==325982== by 0x505CB75: virNodeDeviceGetPCISRIOVCaps (node_device_conf.c:2969)
+==325982== by 0x505D181: virNodeDeviceGetPCIDynamicCaps (node_device_conf.c:3099)
+==325982== by 0x505BC4E: virNodeDeviceUpdateCaps (node_device_conf.c:2677)
+==325982== by 0x260FCBB2: nodeDeviceGetXMLDesc (node_device_driver.c:355)
+
+Signed-off-by: Tim Shearer <tshearer@adva.com>
+Reviewed-by: Ján Tomko <jtomko@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.com/libvirt/libvirt/-/commit/6425a311b8ad19d6f9c0b315bf1d722551ea3585]
+CVE: CVE-2023-2700
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/util/virpci.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/util/virpci.c b/src/util/virpci.c
+index d141fde..1516682 100644
+--- a/src/util/virpci.c
++++ b/src/util/virpci.c
+@@ -2254,6 +2254,7 @@ virPCIVirtualFunctionListFree(virPCIVirtualFunctionList *list)
+ g_free(list->functions[i].ifname);
+ }
+
++ g_free(list->functions);
+ g_free(list);
+ }
+
+--
+2.25.1
+
diff --git a/recipes-extended/libvirt/libvirt_8.1.0.bb b/recipes-extended/libvirt/libvirt_8.1.0.bb
index 89f82bf8..63cf4914 100644
--- a/recipes-extended/libvirt/libvirt_8.1.0.bb
+++ b/recipes-extended/libvirt/libvirt_8.1.0.bb
@@ -29,6 +29,7 @@ SRC_URI = "http://libvirt.org/sources/libvirt-${PV}.tar.xz;name=libvirt \
file://hook_support.py \
file://gnutls-helper.py \
file://0001-qemu-segmentation-fault-in-virtqemud-executing-qemuD.patch \
+ file://CVE-2023-2700.patch \
"
SRC_URI[libvirt.sha256sum] = "3c6c43becffeb34a3f397c616206aa69a893ff8bf5e8208393c84e8e75352934"
diff --git a/recipes-extended/upx/upx/0001-MyCom.h-fix-build-with-gcc-11.patch b/recipes-extended/upx/upx/0001-MyCom.h-fix-build-with-gcc-11.patch
deleted file mode 100644
index 8b07c77e..00000000
--- a/recipes-extended/upx/upx/0001-MyCom.h-fix-build-with-gcc-11.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From 8fe8cd22163fe11b791aac15b642d122ea98b9b5 Mon Sep 17 00:00:00 2001
-From: Martin Jansa <martin.jansa@lge.com>
-Date: Fri, 14 May 2021 02:26:13 -0700
-Subject: [PATCH] MyCom.h: fix build with gcc-11
-
-* fixes:
- ./../src/lzma-sdk/C/7zip/Compress/LZMA/LZMAEncoder.h: In member function 'virtual ULONG NCompress::NLZMA::CEncoder::Release()':
- ./../src/lzma-sdk/C/7zip/Compress/LZMA/../../../Common/MyCom.h:159:32: error: this 'if' clause does not guard... [-Werror=misleading-indentation]
- 159 | STDMETHOD_(ULONG, Release)() { if (--__m_RefCount != 0) \
- | ^~
-
-Upstream-Status: Submitted [https://github.com/upx/upx-lzma-sdk/pull/5]
-Signed-off-by: Martin Jansa <martin.jansa@lge.com>
----
- C/Common/MyCom.h | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/C/Common/MyCom.h b/C/Common/MyCom.h
-index b8dbf38..2e3c54a 100644
---- a/C/Common/MyCom.h
-+++ b/C/Common/MyCom.h
-@@ -156,8 +156,7 @@ public:
-
- #define MY_ADDREF_RELEASE \
- STDMETHOD_(ULONG, AddRef)() { return ++__m_RefCount; } \
--STDMETHOD_(ULONG, Release)() { if (--__m_RefCount != 0) \
-- return __m_RefCount; delete this; return 0; }
-+STDMETHOD_(ULONG, Release)() { if (--__m_RefCount != 0) return __m_RefCount; delete this; return 0; }
-
- #define MY_UNKNOWN_IMP_SPEC(i) \
- MY_QUERYINTERFACE_BEGIN \
diff --git a/recipes-extended/upx/upx_git.bb b/recipes-extended/upx/upx_git.bb
index 9f400b1b..ec5daac2 100644
--- a/recipes-extended/upx/upx_git.bb
+++ b/recipes-extended/upx/upx_git.bb
@@ -1,35 +1,16 @@
-HOMEPAGE = "http://upx.sourceforge.net"
SUMMARY = "Ultimate executable compressor."
-
-SRCREV = "4e1ae22a1a07be5135c68b25ff05058ae8ae48e1"
-SRC_URI = "gitsm://github.com/upx/upx;branch=devel;protocol=https \
- file://0001-MyCom.h-fix-build-with-gcc-11.patch;patchdir=src/lzma-sdk \
-"
-
+HOMEPAGE = "* https://upx.github.io/"
LICENSE = "GPL-2.0-only"
LIC_FILES_CHKSUM = "file://LICENSE;md5=353753597aa110e0ded3508408c6374a"
+SRCREV_upx = "099c3d829e80488af7395a4242b318877e980da4"
+PV = "4.2.2+git${SRCPV}"
-DEPENDS = "zlib libucl xz"
+# Note: DO NOT use released tarball in favor of the git repository with submodules.
+# it makes maintenance easier for CVEs or other issues.
+SRC_URI = "gitsm://github.com/upx/upx;protocol=https;;name=upx;branch=devel"
S = "${WORKDIR}/git"
-PV = "3.96+${SRCPV}"
-
-EXTRA_OEMAKE += " \
- UPX_UCLDIR=${STAGING_DIR_TARGET} \
- UPX_LZMADIR=${STAGING_DIR_TARGET} \
-"
-
-# FIXME: The build fails if security flags are enabled
-SECURITY_CFLAGS = ""
-
-do_compile() {
- oe_runmake -C src all
-}
-
-do_install:append() {
- install -d ${D}${bindir}
- install -m 755 ${B}/src/upx.out ${D}${bindir}/upx
-}
+inherit pkgconfig cmake
BBCLASSEXTEND = "native"
diff --git a/recipes-networking/cni/cni_git.bb b/recipes-networking/cni/cni_git.bb
index 63c39293..f84e8a43 100644
--- a/recipes-networking/cni/cni_git.bb
+++ b/recipes-networking/cni/cni_git.bb
@@ -29,6 +29,9 @@ PV = "v1.0.1+git${SRCREV_cni}"
inherit go
inherit goarch
+# https://github.com/llvm/llvm-project/issues/53999
+TOOLCHAIN = "gcc"
+
do_compile() {
mkdir -p ${S}/src/github.com/containernetworking
ln -sfr ${S}/src/import ${S}/src/github.com/containernetworking/cni
@@ -36,10 +39,10 @@ do_compile() {
export GO111MODULE=off
cd ${B}/src/github.com/containernetworking/cni/libcni
- ${GO} build
+ ${GO} build ${GOBUILDFLAGS}
cd ${B}/src/github.com/containernetworking/cni/cnitool
- ${GO} build
+ ${GO} build ${GOBUILDFLAGS}
cd ${B}/src/github.com/containernetworking/plugins
PLUGINS="$(ls -d plugins/meta/*; ls -d plugins/ipam/*; ls -d plugins/main/* | grep -v windows)"
@@ -47,10 +50,12 @@ do_compile() {
for p in $PLUGINS; do
plugin="$(basename "$p")"
echo "building: $p"
- ${GO} build -o ${B}/plugins/bin/$plugin github.com/containernetworking/plugins/$p
+ ${GO} build ${GOBUILDFLAGS} -o ${B}/plugins/bin/$plugin github.com/containernetworking/plugins/$p
done
}
+do_compile[cleandirs] = "${B}/plugins"
+
do_install() {
localbindir="${libexecdir}/cni/"
diff --git a/recipes-networking/openvswitch/openvswitch_git.bb b/recipes-networking/openvswitch/openvswitch_git.bb
index 2e125e78..08a34440 100644
--- a/recipes-networking/openvswitch/openvswitch_git.bb
+++ b/recipes-networking/openvswitch/openvswitch_git.bb
@@ -14,12 +14,12 @@ RDEPENDS:${PN}-ptest += "\
"
S = "${WORKDIR}/git"
-PV = "2.17.1+${SRCPV}"
-CVE_VERSION = "2.17.1"
+PV = "2.17.9+${SRCPV}"
+CVE_VERSION = "2.17.9"
FILESEXTRAPATHS:append := "${THISDIR}/${PN}-git:"
-SRCREV = "41bb202fb37f184b0a8820a029c62d03c118614e"
+SRCREV = "0bea06d9957e3966d94c48873cd9afefba1c2677"
SRC_URI += "git://github.com/openvswitch/ovs.git;protocol=https;branch=branch-2.17 \
file://openvswitch-add-ptest-71d553b995d0bd527d3ab1e9fbaf5a2ae34de2f3.patch \
file://run-ptest \
diff --git a/recipes-networking/slirp4netns/slirp4netns_git.bb b/recipes-networking/slirp4netns/slirp4netns_git.bb
index a63871c9..6526d39c 100644
--- a/recipes-networking/slirp4netns/slirp4netns_git.bb
+++ b/recipes-networking/slirp4netns/slirp4netns_git.bb
@@ -18,4 +18,6 @@ DEPENDS = "glib-2.0 libcap libseccomp libslirp"
S = "${WORKDIR}/git"
+RRECOMMENDS:${PN} += "kernel-module-tun"
+
inherit autotools pkgconfig